code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def is_visit_primitive(obj):
'''Returns true if properly visiting the object returns only the object itself.'''
from .base import visit
if (isinstance(obj, tuple(PRIMITIVE_TYPES)) and not isinstance(obj, STR)
and not isinstance(obj, bytes)):
return True
if (isinstance(obj, CONTAINERS) and not isinstance(obj, STR) and not
isinstance(obj, bytes)):
return False
if isinstance(obj, STR) or isinstance(obj, bytes):
if len(obj) == 1:
return True
return False
return list(visit(obj, max_enum=2)) == [obj]
|
Returns true if properly visiting the object returns only the object itself.
|
def _handle_inotify_event(self, wd):
"""Handle a series of events coming-in from inotify."""
b = os.read(wd, 1024)
if not b:
return
self.__buffer += b
while 1:
length = len(self.__buffer)
if length < _STRUCT_HEADER_LENGTH:
_LOGGER.debug("Not enough bytes for a header.")
return
# We have, at least, a whole-header in the buffer.
peek_slice = self.__buffer[:_STRUCT_HEADER_LENGTH]
header_raw = struct.unpack(
_HEADER_STRUCT_FORMAT,
peek_slice)
header = _INOTIFY_EVENT(*header_raw)
type_names = self._get_event_names(header.mask)
_LOGGER.debug("Events received in stream: {}".format(type_names))
event_length = (_STRUCT_HEADER_LENGTH + header.len)
if length < event_length:
return
filename = self.__buffer[_STRUCT_HEADER_LENGTH:event_length]
# Our filename is 16-byte aligned and right-padded with NULs.
filename_bytes = filename.rstrip(b'\0')
self.__buffer = self.__buffer[event_length:]
path = self.__watches_r.get(header.wd)
if path is not None:
filename_unicode = filename_bytes.decode('utf8')
yield (header, type_names, path, filename_unicode)
buffer_length = len(self.__buffer)
if buffer_length < _STRUCT_HEADER_LENGTH:
break
|
Handle a series of events coming-in from inotify.
|
def part(self):
"""Retrieve the part that holds this Property.
:returns: The :class:`Part` associated to this property
:raises APIError: if the `Part` is not found
"""
part_id = self._json_data['part']
return self._client.part(pk=part_id, category=self._json_data['category'])
|
Retrieve the part that holds this Property.
:returns: The :class:`Part` associated to this property
:raises APIError: if the `Part` is not found
|
def _readResponse(self):
"""
Yield each row of response untill !done is received.
:throws TrapError: If one !trap is received.
:throws MultiTrapError: If > 1 !trap is received.
"""
traps = []
reply_word = None
while reply_word != '!done':
reply_word, words = self._readSentence()
if reply_word == '!trap':
traps.append(TrapError(**words))
elif reply_word in ('!re', '!done') and words:
yield words
if len(traps) > 1:
raise MultiTrapError(*traps)
elif len(traps) == 1:
raise traps[0]
|
Yield each row of response untill !done is received.
:throws TrapError: If one !trap is received.
:throws MultiTrapError: If > 1 !trap is received.
|
def responds(self):
"""
:returns: The frequency with which the user associated with this profile
responds to messages.
"""
contacted_text = self._contacted_xpb.\
get_text_(self.profile_tree).lower()
if 'contacted' not in contacted_text:
return contacted_text.strip().replace('replies ', '')
|
:returns: The frequency with which the user associated with this profile
responds to messages.
|
def save(self, save_json=True, save_xml=True):
"""
Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool
"""
if self.layer_is_file_based:
if save_json:
self.write_to_file(self.json_uri)
if save_xml:
self.write_to_file(self.xml_uri)
else:
self.write_to_db(save_json, save_xml)
|
Saves the metadata json and/or xml to a file or DB.
:param save_json: flag to save json
:type save_json: bool
:param save_xml: flag to save xml
:type save_xml: bool
|
def reset_env(exclude=[]):
"""Remove environment variables, used in Jupyter notebooks"""
if os.getenv(env.INITED):
wandb_keys = [key for key in os.environ.keys() if key.startswith(
'WANDB_') and key not in exclude]
for key in wandb_keys:
del os.environ[key]
return True
else:
return False
|
Remove environment variables, used in Jupyter notebooks
|
def str_replace(x, pat, repl, n=-1, flags=0, regex=False):
"""Replace occurences of a pattern/regex in a column with some other string.
:param str pattern: string or a regex pattern
:param str replace: a replacement string
:param int n: number of replacements to be made from the start. If -1 make all replacements.
:param int flags: ??
:param bool regex: If True, ...?
:returns: an expression containing the string replacements.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.replace(pat='et', repl='__')
Expression = str_replace(text, pat='et', repl='__')
Length: 5 dtype: str (expression)
---------------------------------
0 Som__hing
1 very pr__ty
2 is coming
3 our
4 way.
"""
sl = _to_string_sequence(x).replace(pat, repl, n, flags, regex)
return column.ColumnStringArrow(sl.bytes, sl.indices, sl.length, sl.offset, string_sequence=sl)
|
Replace occurences of a pattern/regex in a column with some other string.
:param str pattern: string or a regex pattern
:param str replace: a replacement string
:param int n: number of replacements to be made from the start. If -1 make all replacements.
:param int flags: ??
:param bool regex: If True, ...?
:returns: an expression containing the string replacements.
Example:
>>> import vaex
>>> text = ['Something', 'very pretty', 'is coming', 'our', 'way.']
>>> df = vaex.from_arrays(text=text)
>>> df
# text
0 Something
1 very pretty
2 is coming
3 our
4 way.
>>> df.text.str.replace(pat='et', repl='__')
Expression = str_replace(text, pat='et', repl='__')
Length: 5 dtype: str (expression)
---------------------------------
0 Som__hing
1 very pr__ty
2 is coming
3 our
4 way.
|
def updatePassword(self,
user,
currentPassword,
newPassword):
"""Change the password of a user."""
return self.__post('/api/updatePassword',
data={
'user': user,
'currentPassword': currentPassword,
'newPassword': newPassword
})
|
Change the password of a user.
|
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
for elem in data:
id = utils.lowerFirst(elem.attrib['id'])
if id in self._settings:
self._settings[id]._loadData(elem)
continue
self._settings[id] = Setting(self._server, elem, self._initpath)
|
Load attribute values from Plex XML response.
|
def ghuser_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
"""
app = inliner.document.settings.env.app
#app.info('user link %r' % text)
ref = 'https://www.github.com/' + text
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
|
Link to a GitHub user.
Returns 2 part tuple containing list of nodes to insert into the
document and a list of system messages. Both are allowed to be
empty.
:param name: The role name used in the document.
:param rawtext: The entire markup snippet, with role.
:param text: The text marked with the role.
:param lineno: The line number where rawtext appears in the input.
:param inliner: The inliner instance that called us.
:param options: Directive options for customization.
:param content: The directive content for customization.
|
def OnShowFindReplace(self, event):
"""Calls the find-replace dialog"""
data = wx.FindReplaceData(wx.FR_DOWN)
dlg = wx.FindReplaceDialog(self.grid, data, "Find & Replace",
wx.FR_REPLACEDIALOG)
dlg.data = data # save a reference to data
dlg.Show(True)
|
Calls the find-replace dialog
|
def set_content(self, data):
"""
handle the content from the data
:param data: contains the data from the provider
:type data: dict
:rtype: string
"""
content = self._get_content(data, 'content')
if content == '':
content = self._get_content(data, 'summary_detail')
if content == '':
if data.get('description'):
content = data.get('description')
return content
|
handle the content from the data
:param data: contains the data from the provider
:type data: dict
:rtype: string
|
def SetSerializersProfiler(self, serializers_profiler):
"""Sets the serializers profiler.
Args:
serializers_profiler (SerializersProfiler): serializers profiler.
"""
self._serializers_profiler = serializers_profiler
if self._storage_file:
self._storage_file.SetSerializersProfiler(serializers_profiler)
|
Sets the serializers profiler.
Args:
serializers_profiler (SerializersProfiler): serializers profiler.
|
def _filtdim(items, shape, dim, nsl):
"""Return items, shape filtered by a dimension slice."""
normshape = tuple(stop - start for start, stop in shape)
nsl_type = type(nsl)
newitems = list()
# Number of groups
num = reduce(operator.mul, normshape[:dim+1])
# Size of each group
size = len(items) // num
# Size of the dimension
n = normshape[dim]
if nsl_type is int:
for i in range(num):
if i % n == nsl:
newitems += items[size*i:size*(i+1)]
# Collapse dimension
newshape = shape[:dim] + shape[dim+1:]
elif nsl_type is slice:
for i in range(num):
if nsl.start <= (i % n) < nsl.stop:
newitems += items[size*i:size*(i+1)]
# Reshape dimension
offset = shape[dim][0]
redim = (offset + nsl.start, offset + nsl.stop)
newshape = shape[:dim] + (redim, ) + shape[dim+1:]
# farray
else:
if nsl.size < clog2(n):
fstr = "expected dim {} select to have >= {} bits, got {}"
raise ValueError(fstr.format(dim, clog2(n), nsl.size))
groups = [list() for _ in range(n)]
for i in range(num):
groups[i % n] += items[size*i:size*(i+1)]
for muxins in zip(*groups):
it = boolfunc.iter_terms(nsl._items)
xs = [reduce(operator.and_, (muxin, ) + next(it))
for muxin in muxins]
newitems.append(reduce(operator.or_, xs))
# Collapse dimension
newshape = shape[:dim] + shape[dim+1:]
return newitems, newshape
|
Return items, shape filtered by a dimension slice.
|
def put_encryption_materials(self, cache_key, encryption_materials, plaintext_length, entry_hints=None):
"""Does not add encryption materials to the cache since there is no cache to which to add them.
:param bytes cache_key: Identifier for entries in cache
:param encryption_materials: Encryption materials to add to cache
:type encryption_materials: aws_encryption_sdk.materials_managers.EncryptionMaterials
:param int plaintext_length: Length of plaintext associated with this request to the cache
:param entry_hints: Metadata to associate with entry (optional)
:type entry_hints: aws_encryption_sdk.caches.CryptoCacheEntryHints
:rtype: aws_encryption_sdk.caches.CryptoMaterialsCacheEntry
"""
return CryptoMaterialsCacheEntry(cache_key=cache_key, value=encryption_materials)
|
Does not add encryption materials to the cache since there is no cache to which to add them.
:param bytes cache_key: Identifier for entries in cache
:param encryption_materials: Encryption materials to add to cache
:type encryption_materials: aws_encryption_sdk.materials_managers.EncryptionMaterials
:param int plaintext_length: Length of plaintext associated with this request to the cache
:param entry_hints: Metadata to associate with entry (optional)
:type entry_hints: aws_encryption_sdk.caches.CryptoCacheEntryHints
:rtype: aws_encryption_sdk.caches.CryptoMaterialsCacheEntry
|
def _process_genes(self, limit=None):
"""
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
"""
LOG.info("Processing genes")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
model = Model(graph)
line_counter = 0
raw = '/'.join((self.rawdir, self.files['gene']['file']))
geno = Genotype(graph)
with open(raw, 'r', encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter='\t', quotechar='\"')
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, ncbi_gene_id
# , empty # till next time
) = row
if self.test_mode and gene_id not in self.test_ids['gene']:
continue
gene_id = 'ZFIN:' + gene_id.strip()
ncbi_gene_id = 'NCBIGene:' + ncbi_gene_id.strip()
self.id_label_map[gene_id] = gene_symbol
if not self.test_mode and limit is not None and line_counter > limit:
pass
else:
geno.addGene(gene_id, gene_symbol)
model.addEquivalentClass(gene_id, ncbi_gene_id)
LOG.info("Done with genes")
return
|
This table provides the ZFIN gene id, the SO type of the gene,
the gene symbol, and the NCBI Gene ID.
Triples created:
<gene id> a class
<gene id> rdfs:label gene_symbol
<gene id> equivalent class <ncbi_gene_id>
:param limit:
:return:
|
def setup(app):
"""When used for spinx extension."""
global _is_sphinx
_is_sphinx = True
app.add_config_value('no_underscore_emphasis', False, 'env')
app.add_source_parser('.md', M2RParser)
app.add_directive('mdinclude', MdInclude)
|
When used for spinx extension.
|
def add_handler( # noqa: F811
self, fd: Union[int, _Selectable], handler: Callable[..., None], events: int
) -> None:
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` and ``close()`` method.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
|
Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` and ``close()`` method.
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
|
def get_self_host(request_data):
"""
Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
"""
if 'http_host' in request_data:
current_host = request_data['http_host']
elif 'server_name' in request_data:
current_host = request_data['server_name']
else:
raise Exception('No hostname defined')
if ':' in current_host:
current_host_data = current_host.split(':')
possible_port = current_host_data[-1]
try:
possible_port = float(possible_port)
current_host = current_host_data[0]
except ValueError:
current_host = ':'.join(current_host_data)
return current_host
|
Returns the current host.
:param request_data: The request as a dict
:type: dict
:return: The current host
:rtype: string
|
def list_provincies(self, gewest=2):
'''
List all `provincies` in a `gewest`.
:param gewest: The :class:`Gewest` for which the \
`provincies` are wanted.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Provincie`.
'''
try:
gewest_id = gewest.id
except AttributeError:
gewest_id = gewest
def creator():
return [Provincie(p[0], p[1], Gewest(p[2])) for p in self.provincies if p[2] == gewest_id]
if self.caches['permanent'].is_configured:
key = 'ListProvinciesByGewestId#%s' % gewest_id
provincies = self.caches['permanent'].get_or_create(key, creator)
else:
provincies = creator()
for p in provincies:
p.set_gateway(self)
return provincies
|
List all `provincies` in a `gewest`.
:param gewest: The :class:`Gewest` for which the \
`provincies` are wanted.
:param integer sort: What field to sort on.
:rtype: A :class:`list` of :class:`Provincie`.
|
def match(self, objects: List[Any]) -> bool:
"""
Return True if the list of objects matches the expression.
"""
s = self._make_string(objects)
m = self._compiled_expression.match(s)
return m is not None
|
Return True if the list of objects matches the expression.
|
def rmdir_p(self):
""" Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist. """
try:
self.rmdir()
except OSError:
_, e, _ = sys.exc_info()
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
return self
|
Like :meth:`rmdir`, but does not raise an exception if the
directory is not empty or does not exist.
|
def make_back_notes(self, body):
"""
The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p>
"""
for notes in self.article.root.xpath('./back/notes'):
notes_sec = deepcopy(notes.find('sec'))
notes_sec.tag = 'div'
notes_sec.attrib['class'] = 'back-notes'
body.append(notes_sec)
|
The notes element in PLoS articles can be employed for posting notices
of corrections or adjustments in proof. The <notes> element has a very
diverse content model, but PLoS practice appears to be fairly
consistent: a single <sec> containing a <title> and a <p>
|
def detect(self):
"""Detect and return the IP address."""
if PY3: # py23
import subprocess # noqa: S404 @UnresolvedImport pylint: disable=import-error
else:
import commands as subprocess # @UnresolvedImport pylint: disable=import-error
try:
theip = subprocess.getoutput(self.opts_command) # noqa: S605
except Exception:
theip = None
self.set_current_value(theip)
return theip
|
Detect and return the IP address.
|
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog =wx.MessageDialog(parent = parent,
message = msg,
caption = 'Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
|
Signal an error condition -- in a GUI, popup a error dialog
|
def hsepd_pdf(sigma1, sigma2, xi, beta,
sim=None, obs=None, node=None, skip_nan=False):
"""Calculate the probability densities based on the
heteroskedastic skewed exponential power distribution.
For convenience, the required parameters of the probability density
function as well as the simulated and observed values are stored
in a dictonary:
>>> import numpy
>>> from hydpy import round_
>>> from hydpy import hsepd_pdf
>>> general = {'sigma1': 0.2,
... 'sigma2': 0.0,
... 'xi': 1.0,
... 'beta': 0.0,
... 'sim': numpy.arange(10.0, 41.0),
... 'obs': numpy.full(31, 25.0)}
The following test function allows the variation of one parameter
and prints some and plots all of probability density values
corresponding to different simulated values:
>>> def test(**kwargs):
... from matplotlib import pyplot
... special = general.copy()
... name, values = list(kwargs.items())[0]
... results = numpy.zeros((len(general['sim']), len(values)+1))
... results[:, 0] = general['sim']
... for jdx, value in enumerate(values):
... special[name] = value
... results[:, jdx+1] = hsepd_pdf(**special)
... pyplot.plot(results[:, 0], results[:, jdx+1],
... label='%s=%.1f' % (name, value))
... pyplot.legend()
... for idx, result in enumerate(results):
... if not (idx % 5):
... round_(result)
When varying parameter `beta`, the resulting probabilities correspond
to the Laplace distribution (1.0), normal distribution (0.0), and the
uniform distribution (-1.0), respectively. Note that we use -0.99
instead of -1.0 for approximating the uniform distribution to prevent
from running into numerical problems, which are not solved yet:
>>> test(beta=[1.0, 0.0, -0.99])
10.0, 0.002032, 0.000886, 0.0
15.0, 0.008359, 0.010798, 0.0
20.0, 0.034382, 0.048394, 0.057739
25.0, 0.141421, 0.079788, 0.057739
30.0, 0.034382, 0.048394, 0.057739
35.0, 0.008359, 0.010798, 0.0
40.0, 0.002032, 0.000886, 0.0
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
When varying parameter `xi`, the resulting density is negatively
skewed (0.2), symmetric (1.0), and positively skewed (5.0),
respectively:
>>> test(xi=[0.2, 1.0, 5.0])
10.0, 0.0, 0.000886, 0.003175
15.0, 0.0, 0.010798, 0.012957
20.0, 0.092845, 0.048394, 0.036341
25.0, 0.070063, 0.079788, 0.070063
30.0, 0.036341, 0.048394, 0.092845
35.0, 0.012957, 0.010798, 0.0
40.0, 0.003175, 0.000886, 0.0
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
In the above examples, the actual `sigma` (5.0) is calculated by
multiplying `sigma1` (0.2) with the mean simulated value (25.0),
internally. This can be done for modelling homoscedastic errors.
Instead, `sigma2` is multiplied with the individual simulated values
to account for heteroscedastic errors. With increasing values of
`sigma2`, the resulting densities are modified as follows:
>>> test(sigma2=[0.0, 0.1, 0.2])
10.0, 0.000886, 0.002921, 0.005737
15.0, 0.010798, 0.018795, 0.022831
20.0, 0.048394, 0.044159, 0.037988
25.0, 0.079788, 0.053192, 0.039894
30.0, 0.048394, 0.04102, 0.032708
35.0, 0.010798, 0.023493, 0.023493
40.0, 0.000886, 0.011053, 0.015771
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
"""
sim, obs = prepare_arrays(sim, obs, node, skip_nan)
sigmas = _pars_h(sigma1, sigma2, sim)
mu_xi, sigma_xi, w_beta, c_beta = _pars_sepd(xi, beta)
x, mu = obs, sim
a = (x-mu)/sigmas
a_xi = numpy.empty(a.shape)
idxs = mu_xi+sigma_xi*a < 0.
a_xi[idxs] = numpy.absolute(xi*(mu_xi+sigma_xi*a[idxs]))
a_xi[~idxs] = numpy.absolute(1./xi*(mu_xi+sigma_xi*a[~idxs]))
ps = (2.*sigma_xi/(xi+1./xi)*w_beta *
numpy.exp(-c_beta*a_xi**(2./(1.+beta))))/sigmas
return ps
|
Calculate the probability densities based on the
heteroskedastic skewed exponential power distribution.
For convenience, the required parameters of the probability density
function as well as the simulated and observed values are stored
in a dictonary:
>>> import numpy
>>> from hydpy import round_
>>> from hydpy import hsepd_pdf
>>> general = {'sigma1': 0.2,
... 'sigma2': 0.0,
... 'xi': 1.0,
... 'beta': 0.0,
... 'sim': numpy.arange(10.0, 41.0),
... 'obs': numpy.full(31, 25.0)}
The following test function allows the variation of one parameter
and prints some and plots all of probability density values
corresponding to different simulated values:
>>> def test(**kwargs):
... from matplotlib import pyplot
... special = general.copy()
... name, values = list(kwargs.items())[0]
... results = numpy.zeros((len(general['sim']), len(values)+1))
... results[:, 0] = general['sim']
... for jdx, value in enumerate(values):
... special[name] = value
... results[:, jdx+1] = hsepd_pdf(**special)
... pyplot.plot(results[:, 0], results[:, jdx+1],
... label='%s=%.1f' % (name, value))
... pyplot.legend()
... for idx, result in enumerate(results):
... if not (idx % 5):
... round_(result)
When varying parameter `beta`, the resulting probabilities correspond
to the Laplace distribution (1.0), normal distribution (0.0), and the
uniform distribution (-1.0), respectively. Note that we use -0.99
instead of -1.0 for approximating the uniform distribution to prevent
from running into numerical problems, which are not solved yet:
>>> test(beta=[1.0, 0.0, -0.99])
10.0, 0.002032, 0.000886, 0.0
15.0, 0.008359, 0.010798, 0.0
20.0, 0.034382, 0.048394, 0.057739
25.0, 0.141421, 0.079788, 0.057739
30.0, 0.034382, 0.048394, 0.057739
35.0, 0.008359, 0.010798, 0.0
40.0, 0.002032, 0.000886, 0.0
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
When varying parameter `xi`, the resulting density is negatively
skewed (0.2), symmetric (1.0), and positively skewed (5.0),
respectively:
>>> test(xi=[0.2, 1.0, 5.0])
10.0, 0.0, 0.000886, 0.003175
15.0, 0.0, 0.010798, 0.012957
20.0, 0.092845, 0.048394, 0.036341
25.0, 0.070063, 0.079788, 0.070063
30.0, 0.036341, 0.048394, 0.092845
35.0, 0.012957, 0.010798, 0.0
40.0, 0.003175, 0.000886, 0.0
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
In the above examples, the actual `sigma` (5.0) is calculated by
multiplying `sigma1` (0.2) with the mean simulated value (25.0),
internally. This can be done for modelling homoscedastic errors.
Instead, `sigma2` is multiplied with the individual simulated values
to account for heteroscedastic errors. With increasing values of
`sigma2`, the resulting densities are modified as follows:
>>> test(sigma2=[0.0, 0.1, 0.2])
10.0, 0.000886, 0.002921, 0.005737
15.0, 0.010798, 0.018795, 0.022831
20.0, 0.048394, 0.044159, 0.037988
25.0, 0.079788, 0.053192, 0.039894
30.0, 0.048394, 0.04102, 0.032708
35.0, 0.010798, 0.023493, 0.023493
40.0, 0.000886, 0.011053, 0.015771
.. testsetup::
>>> from matplotlib import pyplot
>>> pyplot.close()
|
def from_country(cls, country):
"""Retrieve the first datacenter id associated to a country."""
result = cls.list({'sort_by': 'id ASC'})
dc_countries = {}
for dc in result:
if dc['country'] not in dc_countries:
dc_countries[dc['country']] = dc['id']
return dc_countries.get(country)
|
Retrieve the first datacenter id associated to a country.
|
def get_info(self):
'''
Get information about the counter
.. note::
GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes
if this is called after sampling data.
'''
if not self.info:
ci = win32pdh.GetCounterInfo(self.handle, 0)
self.info = {
'type': ci[0],
'version': ci[1],
'scale': ci[2],
'default_scale': ci[3],
'user_data': ci[4],
'query_user_data': ci[5],
'full_path': ci[6],
'machine_name': ci[7][0],
'object_name': ci[7][1],
'instance_name': ci[7][2],
'parent_instance': ci[7][3],
'instance_index': ci[7][4],
'counter_name': ci[7][5],
'explain_text': ci[8]
}
return self.info
|
Get information about the counter
.. note::
GetCounterInfo sometimes crashes in the wrapper code. Fewer crashes
if this is called after sampling data.
|
def analyze(problem, Y, M=4, print_to_console=False, seed=None):
"""Performs the Fourier Amplitude Sensitivity Test (FAST) on model outputs.
Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 4)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Cukier, R. I., C. M. Fortuin, K. E. Shuler, A. G. Petschek, and J. H.
Schaibly (1973). "Study of the sensitivity of coupled reaction
systems to uncertainties in rate coefficients." J. Chem. Phys.,
59(8):3873-3878, doi:10.1063/1.1680571.
.. [2] Saltelli, A., S. Tarantola, and K. P.-S. Chan (1999). "A
Quantitative Model-Independent Method for Global Sensitivity
Analysis of Model Output." Technometrics, 41(1):39-56,
doi:10.1080/00401706.1999.10485594.
Examples
--------
>>> X = fast_sampler.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = fast.analyze(problem, Y, print_to_console=False)
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
if Y.size % (D) == 0:
N = int(Y.size / D)
else:
print("""
Error: Number of samples in model output file must be a multiple of D,
where D is the number of parameters in your parameter file.
""")
exit()
# Recreate the vector omega used in the sampling
omega = np.zeros([D])
omega[0] = math.floor((N - 1) / (2 * M))
m = math.floor(omega[0] / (2 * M))
if m >= (D - 1):
omega[1:] = np.floor(np.linspace(1, m, D - 1))
else:
omega[1:] = np.arange(D - 1) % m + 1
# Calculate and Output the First and Total Order Values
if print_to_console:
print("Parameter First Total")
Si = ResultDict((k, [None] * D) for k in ['S1', 'ST'])
Si['names'] = problem['names']
for i in range(D):
l = np.arange(i * N, (i + 1) * N)
Si['S1'][i] = compute_first_order(Y[l], N, M, omega[0])
Si['ST'][i] = compute_total_order(Y[l], N, omega[0])
if print_to_console:
print("%s %f %f" %
(problem['names'][i], Si['S1'][i], Si['ST'][i]))
return Si
|
Performs the Fourier Amplitude Sensitivity Test (FAST) on model outputs.
Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 4)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Cukier, R. I., C. M. Fortuin, K. E. Shuler, A. G. Petschek, and J. H.
Schaibly (1973). "Study of the sensitivity of coupled reaction
systems to uncertainties in rate coefficients." J. Chem. Phys.,
59(8):3873-3878, doi:10.1063/1.1680571.
.. [2] Saltelli, A., S. Tarantola, and K. P.-S. Chan (1999). "A
Quantitative Model-Independent Method for Global Sensitivity
Analysis of Model Output." Technometrics, 41(1):39-56,
doi:10.1080/00401706.1999.10485594.
Examples
--------
>>> X = fast_sampler.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = fast.analyze(problem, Y, print_to_console=False)
|
def global_matches(self, text):
"""Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
"""
#print 'Completer->global_matches, txt=%r' % text # dbg
matches = []
match_append = matches.append
n = len(text)
for lst in [keyword.kwlist,
__builtin__.__dict__.keys(),
self.namespace.keys(),
self.global_namespace.keys()]:
for word in lst:
if word[:n] == text and word != "__builtins__":
match_append(word)
return matches
|
Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace or self.global_namespace that match.
|
def compute(self, runner_results, setup=False, poll=False, ignore_errors=False):
''' walk through all results and increment stats '''
for (host, value) in runner_results.get('contacted', {}).iteritems():
if not ignore_errors and (('failed' in value and bool(value['failed'])) or
('rc' in value and value['rc'] != 0)):
self._increment('failures', host)
elif 'skipped' in value and bool(value['skipped']):
self._increment('skipped', host)
elif 'changed' in value and bool(value['changed']):
if not setup and not poll:
self._increment('changed', host)
self._increment('ok', host)
else:
if not poll or ('finished' in value and bool(value['finished'])):
self._increment('ok', host)
for (host, value) in runner_results.get('dark', {}).iteritems():
self._increment('dark', host)
|
walk through all results and increment stats
|
def debug_variable_node_render(self, context):
"""
Like DebugVariableNode.render, but doesn't catch UnicodeDecodeError.
"""
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData): # nopep8
return escape(output)
else:
return output
|
Like DebugVariableNode.render, but doesn't catch UnicodeDecodeError.
|
def fan_speed(self, speed: int = None) -> bool:
"""Adjust Fan Speed by Specifying 1,2,3 as argument or cycle
through speeds increasing by one"""
body = helpers.req_body(self.manager, 'devicestatus')
body['uuid'] = self.uuid
head = helpers.req_headers(self.manager)
if self.details.get('mode') != 'manual':
self.mode_toggle('manual')
else:
if speed is not None:
level = int(self.details.get('level'))
if speed == level:
return False
elif speed in [1, 2, 3]:
body['level'] = speed
else:
if (level + 1) > 3:
body['level'] = 1
else:
body['level'] = int(level + 1)
r, _ = helpers.call_api('/131airPurifier/v1/device/updateSpeed',
'put', json=body, headers=head)
if r is not None and helpers.check_response(r, 'airpur_status'):
self.details['level'] = body['level']
return True
else:
return False
|
Adjust Fan Speed by Specifying 1,2,3 as argument or cycle
through speeds increasing by one
|
def get_connection(self, command_name, *keys, **options):
"Get a connection from the pool"
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
try:
# ensure this connection is connected to Redis
connection.connect()
# connections that the pool provides should be ready to send
# a command. if not, the connection was either returned to the
# pool before all data has been read or the socket has been
# closed. either way, reconnect and verify everything is good.
if not connection.is_ready_for_command():
connection.disconnect()
connection.connect()
if not connection.is_ready_for_command():
raise ConnectionError('Connection not ready')
except: # noqa: E722
# release the connection back to the pool so that we don't leak it
self.release(connection)
raise
return connection
|
Get a connection from the pool
|
def headerData(self, section, orientation, role):
"""Get the Header for the columns in the table
Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>`
:param section: column of header to return
:type section: int
"""
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return self.headers[section]
|
Get the Header for the columns in the table
Required by view, see :qtdoc:`subclassing<qabstractitemmodel.subclassing>`
:param section: column of header to return
:type section: int
|
def get_total_size_trans(self, entries):
"""
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
"""
size = 0
for entry in entries:
if entry['response']['_transferSize'] > 0:
size += entry['response']['_transferSize']
return size
|
Returns the total size of a collection of entries - transferred.
NOTE: use with har file generated with chrome-har-capturer
:param entries: ``list`` of entries to calculate the total size of.
|
def version_router(self, request, response, api_version=None, versions={}, not_found=None, **kwargs):
"""Intelligently routes a request to the correct handler based on the version being requested"""
request_version = self.determine_version(request, api_version)
if request_version:
request_version = int(request_version)
versions.get(request_version or False, versions.get(None, not_found))(request, response,
api_version=api_version,
**kwargs)
|
Intelligently routes a request to the correct handler based on the version being requested
|
def is_valid(self, request_data, request_id=None, raise_exceptions=False):
"""
Validates the response object.
:param request_data: Request Data
:type request_data: dict
:param request_id: Optional argument. The ID of the AuthNRequest sent by this SP to the IdP
:type request_id: string
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:returns: True if the SAML Response is valid, False if not
:rtype: bool
"""
self.__error = None
try:
# Checks SAML version
if self.document.get('Version', None) != '2.0':
raise OneLogin_Saml2_ValidationError(
'Unsupported SAML version',
OneLogin_Saml2_ValidationError.UNSUPPORTED_SAML_VERSION
)
# Checks that ID exists
if self.document.get('ID', None) is None:
raise OneLogin_Saml2_ValidationError(
'Missing ID attribute on SAML Response',
OneLogin_Saml2_ValidationError.MISSING_ID
)
# Checks that the response has the SUCCESS status
self.check_status()
# Checks that the response only has one assertion
if not self.validate_num_assertions():
raise OneLogin_Saml2_ValidationError(
'SAML Response must contain 1 assertion',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_ASSERTIONS
)
idp_data = self.__settings.get_idp_data()
idp_entity_id = idp_data.get('entityId', '')
sp_data = self.__settings.get_sp_data()
sp_entity_id = sp_data.get('entityId', '')
signed_elements = self.process_signed_elements()
has_signed_response = '{%s}Response' % OneLogin_Saml2_Constants.NS_SAMLP in signed_elements
has_signed_assertion = '{%s}Assertion' % OneLogin_Saml2_Constants.NS_SAML in signed_elements
if self.__settings.is_strict():
no_valid_xml_msg = 'Invalid SAML Response. Not match the saml-schema-protocol-2.0.xsd'
res = OneLogin_Saml2_Utils.validate_xml(
tostring(self.document),
'saml-schema-protocol-2.0.xsd',
self.__settings.is_debug_active()
)
if not isinstance(res, Document):
raise OneLogin_Saml2_ValidationError(
no_valid_xml_msg,
OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT
)
# If encrypted, check also the decrypted document
if self.encrypted:
res = OneLogin_Saml2_Utils.validate_xml(
tostring(self.decrypted_document),
'saml-schema-protocol-2.0.xsd',
self.__settings.is_debug_active()
)
if not isinstance(res, Document):
raise OneLogin_Saml2_ValidationError(
no_valid_xml_msg,
OneLogin_Saml2_ValidationError.INVALID_XML_FORMAT
)
security = self.__settings.get_security_data()
current_url = OneLogin_Saml2_Utils.get_self_url_no_query(request_data)
in_response_to = self.document.get('InResponseTo', None)
if request_id is None and in_response_to is not None and security.get('rejectUnsolicitedResponsesWithInResponseTo', False):
raise OneLogin_Saml2_ValidationError(
'The Response has an InResponseTo attribute: %s while no InResponseTo was expected' % in_response_to,
OneLogin_Saml2_ValidationError.WRONG_INRESPONSETO
)
# Check if the InResponseTo of the Response matchs the ID of the AuthNRequest (requestId) if provided
if request_id is not None and in_response_to != request_id:
raise OneLogin_Saml2_ValidationError(
'The InResponseTo of the Response: %s, does not match the ID of the AuthNRequest sent by the SP: %s' % (in_response_to, request_id),
OneLogin_Saml2_ValidationError.WRONG_INRESPONSETO
)
if not self.encrypted and security.get('wantAssertionsEncrypted', False):
raise OneLogin_Saml2_ValidationError(
'The assertion of the Response is not encrypted and the SP require it',
OneLogin_Saml2_ValidationError.NO_ENCRYPTED_ASSERTION
)
if security.get('wantNameIdEncrypted', False):
encrypted_nameid_nodes = self.__query_assertion('/saml:Subject/saml:EncryptedID/xenc:EncryptedData')
if len(encrypted_nameid_nodes) != 1:
raise OneLogin_Saml2_ValidationError(
'The NameID of the Response is not encrypted and the SP require it',
OneLogin_Saml2_ValidationError.NO_ENCRYPTED_NAMEID
)
# Checks that a Conditions element exists
if not self.check_one_condition():
raise OneLogin_Saml2_ValidationError(
'The Assertion must include a Conditions element',
OneLogin_Saml2_ValidationError.MISSING_CONDITIONS
)
# Validates Assertion timestamps
self.validate_timestamps(raise_exceptions=True)
# Checks that an AuthnStatement element exists and is unique
if not self.check_one_authnstatement():
raise OneLogin_Saml2_ValidationError(
'The Assertion must include an AuthnStatement element',
OneLogin_Saml2_ValidationError.WRONG_NUMBER_OF_AUTHSTATEMENTS
)
# Checks that the response has all of the AuthnContexts that we provided in the request.
# Only check if failOnAuthnContextMismatch is true and requestedAuthnContext is set to a list.
requested_authn_contexts = security.get('requestedAuthnContext', True)
if security.get('failOnAuthnContextMismatch', False) and requested_authn_contexts and requested_authn_contexts is not True:
authn_contexts = self.get_authn_contexts()
unmatched_contexts = set(requested_authn_contexts).difference(authn_contexts)
if unmatched_contexts:
raise OneLogin_Saml2_ValidationError(
'The AuthnContext "%s" didn\'t include requested context "%s"' % (', '.join(authn_contexts), ', '.join(unmatched_contexts)),
OneLogin_Saml2_ValidationError.AUTHN_CONTEXT_MISMATCH
)
# Checks that there is at least one AttributeStatement if required
attribute_statement_nodes = self.__query_assertion('/saml:AttributeStatement')
if security.get('wantAttributeStatement', True) and not attribute_statement_nodes:
raise OneLogin_Saml2_ValidationError(
'There is no AttributeStatement on the Response',
OneLogin_Saml2_ValidationError.NO_ATTRIBUTESTATEMENT
)
encrypted_attributes_nodes = self.__query_assertion('/saml:AttributeStatement/saml:EncryptedAttribute')
if encrypted_attributes_nodes:
raise OneLogin_Saml2_ValidationError(
'There is an EncryptedAttribute in the Response and this SP not support them',
OneLogin_Saml2_ValidationError.ENCRYPTED_ATTRIBUTES
)
# Checks destination
destination = self.document.get('Destination', None)
if destination:
if not destination.startswith(current_url):
# TODO: Review if following lines are required, since we can control the
# request_data
# current_url_routed = OneLogin_Saml2_Utils.get_self_routed_url_no_query(request_data)
# if not destination.startswith(current_url_routed):
raise OneLogin_Saml2_ValidationError(
'The response was received at %s instead of %s' % (current_url, destination),
OneLogin_Saml2_ValidationError.WRONG_DESTINATION
)
elif destination == '':
raise OneLogin_Saml2_ValidationError(
'The response has an empty Destination value',
OneLogin_Saml2_ValidationError.EMPTY_DESTINATION
)
# Checks audience
valid_audiences = self.get_audiences()
if valid_audiences and sp_entity_id not in valid_audiences:
raise OneLogin_Saml2_ValidationError(
'%s is not a valid audience for this Response' % sp_entity_id,
OneLogin_Saml2_ValidationError.WRONG_AUDIENCE
)
# Checks the issuers
issuers = self.get_issuers()
for issuer in issuers:
if issuer is None or issuer != idp_entity_id:
raise OneLogin_Saml2_ValidationError(
'Invalid issuer in the Assertion/Response (expected %(idpEntityId)s, got %(issuer)s)' %
{
'idpEntityId': idp_entity_id,
'issuer': issuer
},
OneLogin_Saml2_ValidationError.WRONG_ISSUER
)
# Checks the session Expiration
session_expiration = self.get_session_not_on_or_after()
if session_expiration and session_expiration <= OneLogin_Saml2_Utils.now():
raise OneLogin_Saml2_ValidationError(
'The attributes have expired, based on the SessionNotOnOrAfter of the AttributeStatement of this Response',
OneLogin_Saml2_ValidationError.SESSION_EXPIRED
)
# Checks the SubjectConfirmation, at least one SubjectConfirmation must be valid
any_subject_confirmation = False
subject_confirmation_nodes = self.__query_assertion('/saml:Subject/saml:SubjectConfirmation')
for scn in subject_confirmation_nodes:
method = scn.get('Method', None)
if method and method != OneLogin_Saml2_Constants.CM_BEARER:
continue
sc_data = scn.find('saml:SubjectConfirmationData', namespaces=OneLogin_Saml2_Constants.NSMAP)
if sc_data is None:
continue
else:
irt = sc_data.get('InResponseTo', None)
if (in_response_to is None and irt is not None and
security.get('rejectUnsolicitedResponsesWithInResponseTo', False)) or \
in_response_to and irt and irt != in_response_to:
continue
recipient = sc_data.get('Recipient', None)
if recipient and current_url not in recipient:
continue
nooa = sc_data.get('NotOnOrAfter', None)
if nooa:
parsed_nooa = OneLogin_Saml2_Utils.parse_SAML_to_time(nooa)
if parsed_nooa <= OneLogin_Saml2_Utils.now():
continue
nb = sc_data.get('NotBefore', None)
if nb:
parsed_nb = OneLogin_Saml2_Utils.parse_SAML_to_time(nb)
if parsed_nb > OneLogin_Saml2_Utils.now():
continue
if nooa:
self.valid_scd_not_on_or_after = OneLogin_Saml2_Utils.parse_SAML_to_time(nooa)
any_subject_confirmation = True
break
if not any_subject_confirmation:
raise OneLogin_Saml2_ValidationError(
'A valid SubjectConfirmation was not found on this Response',
OneLogin_Saml2_ValidationError.WRONG_SUBJECTCONFIRMATION
)
if security.get('wantAssertionsSigned', False) and not has_signed_assertion:
raise OneLogin_Saml2_ValidationError(
'The Assertion of the Response is not signed and the SP require it',
OneLogin_Saml2_ValidationError.NO_SIGNED_ASSERTION
)
if security.get('wantMessagesSigned', False) and not has_signed_response:
raise OneLogin_Saml2_ValidationError(
'The Message of the Response is not signed and the SP require it',
OneLogin_Saml2_ValidationError.NO_SIGNED_MESSAGE
)
if not signed_elements or (not has_signed_response and not has_signed_assertion):
raise OneLogin_Saml2_ValidationError(
'No Signature found. SAML Response rejected',
OneLogin_Saml2_ValidationError.NO_SIGNATURE_FOUND
)
else:
cert = idp_data.get('x509cert', None)
fingerprint = idp_data.get('certFingerprint', None)
fingerprintalg = idp_data.get('certFingerprintAlgorithm', None)
multicerts = None
if 'x509certMulti' in idp_data and 'signing' in idp_data['x509certMulti'] and idp_data['x509certMulti']['signing']:
multicerts = idp_data['x509certMulti']['signing']
# If find a Signature on the Response, validates it checking the original response
if has_signed_response and not OneLogin_Saml2_Utils.validate_sign(self.document, cert, fingerprint, fingerprintalg, xpath=OneLogin_Saml2_Utils.RESPONSE_SIGNATURE_XPATH, multicerts=multicerts, raise_exceptions=False):
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. SAML Response rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
document_check_assertion = self.decrypted_document if self.encrypted else self.document
if has_signed_assertion and not OneLogin_Saml2_Utils.validate_sign(document_check_assertion, cert, fingerprint, fingerprintalg, xpath=OneLogin_Saml2_Utils.ASSERTION_SIGNATURE_XPATH, multicerts=multicerts, raise_exceptions=False):
raise OneLogin_Saml2_ValidationError(
'Signature validation failed. SAML Response rejected',
OneLogin_Saml2_ValidationError.INVALID_SIGNATURE
)
return True
except Exception as err:
self.__error = err.__str__()
debug = self.__settings.is_debug_active()
if debug:
print(err.__str__())
if raise_exceptions:
raise err
return False
|
Validates the response object.
:param request_data: Request Data
:type request_data: dict
:param request_id: Optional argument. The ID of the AuthNRequest sent by this SP to the IdP
:type request_id: string
:param raise_exceptions: Whether to return false on failure or raise an exception
:type raise_exceptions: Boolean
:returns: True if the SAML Response is valid, False if not
:rtype: bool
|
def mk_set_headers(self, data, columns):
""" figure out sizes and create header fmt """
columns = tuple(columns)
lens = []
for key in columns:
value_len = max(len(str(each.get(key, ''))) for each in data)
# account for header lengths
lens.append(max(value_len, len(self._get_name(key))))
fmt = self.mk_fmt(*lens)
return fmt
|
figure out sizes and create header fmt
|
def _root(path, root):
'''
Relocate an absolute path to a new root directory.
'''
if root:
return os.path.join(root, os.path.relpath(path, os.path.sep))
else:
return path
|
Relocate an absolute path to a new root directory.
|
def __generate_cluster_centers(self, width):
"""!
@brief Generates centers (means in statistical term) for clusters.
@param[in] width (list): Width of generated clusters.
@return (list) Generated centers in line with normal distribution.
"""
centers = []
default_offset = max(width) * 4.0
for i in range(self.__amount_clusters):
center = [ random.gauss(i * default_offset, width[i] / 2.0) for _ in range(self.__dimension) ]
centers.append(center)
return centers
|
!
@brief Generates centers (means in statistical term) for clusters.
@param[in] width (list): Width of generated clusters.
@return (list) Generated centers in line with normal distribution.
|
def generate_id(self):
"""Generate a fresh id"""
if self.use_repeatable_ids:
self.repeatable_id_counter += 1
return 'autobaked-{}'.format(self.repeatable_id_counter)
else:
return str(uuid4())
|
Generate a fresh id
|
def _find_keep_files(root, keep):
'''
Compile a list of valid keep files (and directories).
Used by _clean_dir()
'''
real_keep = set()
real_keep.add(root)
if isinstance(keep, list):
for fn_ in keep:
if not os.path.isabs(fn_):
continue
fn_ = os.path.normcase(os.path.abspath(fn_))
real_keep.add(fn_)
while True:
fn_ = os.path.abspath(os.path.dirname(fn_))
real_keep.add(fn_)
drive, path = os.path.splitdrive(fn_)
if not path.lstrip(os.sep):
break
return real_keep
|
Compile a list of valid keep files (and directories).
Used by _clean_dir()
|
def value(self):
""" 成交量序列(張)
:rtype: list
"""
val = (round(i / 1000, 3) for i in self.__serial_price(1))
return list(val)
|
成交量序列(張)
:rtype: list
|
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
if not hasattr(self, 'children'):
return Either(Required(self))
else:
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
|
Transform pattern into an equivalent, with only top-level Either.
|
def f_delete_links(self, iterator_of_links, remove_from_trajectory=False):
"""Deletes several links from the hard disk.
Links can be passed as a string ``'groupA.groupB.linkA'``
or as a tuple containing the node from which the link should be removed and the
name of the link ``(groupWithLink, 'linkA')``.
"""
to_delete_links = []
group_link_pairs = []
for elem in iterator_of_links:
if isinstance(elem, str):
split_names = elem.split('.')
parent_name = '.'.join(split_names[:-1])
link = split_names[-1]
parent_node = self.f_get(parent_name) if parent_name != '' else self
link_name = parent_node.v_full_name + '.' + link if parent_name != '' else link
to_delete_links.append((pypetconstants.DELETE_LINK, link_name))
group_link_pairs.append((parent_node, link))
else:
link_name = elem[0].v_full_name + '.' + elem[1]
to_delete_links.append((pypetconstants.DELETE_LINK, link_name))
group_link_pairs.append(elem)
try:
self._storage_service.store(pypetconstants.LIST, to_delete_links,
trajectory_name=self.v_name)
except:
self._logger.error('Could not remove `%s` from the trajectory. Maybe the'
' item(s) was/were never stored to disk.' % str(to_delete_links))
raise
if remove_from_trajectory:
for group, link in group_link_pairs:
group.f_remove_link(link)
|
Deletes several links from the hard disk.
Links can be passed as a string ``'groupA.groupB.linkA'``
or as a tuple containing the node from which the link should be removed and the
name of the link ``(groupWithLink, 'linkA')``.
|
def check_labels(self):
""" Checks if all the labels has been declared
"""
for entry in self.labels:
self.check_is_declared(entry.name, entry.lineno, CLASS.label)
|
Checks if all the labels has been declared
|
def supports(cls, template_file=None):
"""
:return: Whether the engine can process given template file or not.
"""
if anytemplate.compat.IS_PYTHON_3:
cls._priority = 99
return False # Always as it's not ported to python 3.
return super(Engine, cls).supports(template_file=template_file)
|
:return: Whether the engine can process given template file or not.
|
def threshold_monitor_hidden_threshold_monitor_sfp_policy_area_threshold_high_threshold(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
sfp = ET.SubElement(threshold_monitor, "sfp")
policy = ET.SubElement(sfp, "policy")
policy_name_key = ET.SubElement(policy, "policy_name")
policy_name_key.text = kwargs.pop('policy_name')
area = ET.SubElement(policy, "area")
type_key = ET.SubElement(area, "type")
type_key.text = kwargs.pop('type')
area_value_key = ET.SubElement(area, "area_value")
area_value_key.text = kwargs.pop('area_value')
threshold = ET.SubElement(area, "threshold")
high_threshold = ET.SubElement(threshold, "high-threshold")
high_threshold.text = kwargs.pop('high_threshold')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def log_normalize(a, axis=None):
"""Normalizes the input array so that the exponent of the sum is 1.
Parameters
----------
a : array
Non-normalized input data.
axis : int
Dimension along which normalization is performed.
Notes
-----
Modifies the input **inplace**.
"""
with np.errstate(under="ignore"):
a_lse = logsumexp(a, axis)
a -= a_lse[:, np.newaxis]
|
Normalizes the input array so that the exponent of the sum is 1.
Parameters
----------
a : array
Non-normalized input data.
axis : int
Dimension along which normalization is performed.
Notes
-----
Modifies the input **inplace**.
|
def _save_model(self, steps=0):
"""
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info('Saved Model')
|
Saves current model to checkpoint folder.
:param steps: Current number of steps in training process.
:param saver: Tensorflow saver for session.
|
def compress_table(condition, tbl, axis=None, out=None, blen=None, storage=None,
create='table', **kwargs):
"""Return selected rows of a table."""
# setup
if axis is not None and axis != 0:
raise NotImplementedError('only axis 0 is supported')
if out is not None:
# argument is only there for numpy API compatibility
raise NotImplementedError('out argument is not supported')
storage = _util.get_storage(storage)
names, columns = _util.check_table_like(tbl)
blen = _util.get_blen_table(tbl, blen)
_util.check_equal_length(columns[0], condition)
length = len(columns[0])
nnz = count_nonzero(condition)
# block iteration
out = None
for i in range(0, length, blen):
j = min(i+blen, length)
bcond = condition[i:j]
# don't access any data unless we have to
if np.any(bcond):
bcolumns = [c[i:j] for c in columns]
res = [np.compress(bcond, c, axis=0) for c in bcolumns]
if out is None:
out = getattr(storage, create)(res, names=names,
expectedlen=nnz, **kwargs)
else:
out.append(res)
return out
|
Return selected rows of a table.
|
def read_plain_int64(file_obj, count):
"""Read `count` 64-bit ints using the plain encoding."""
return struct.unpack("<{}q".format(count).encode("utf-8"), file_obj.read(8 * count))
|
Read `count` 64-bit ints using the plain encoding.
|
def _is_duplicate_record(self, rtype, name, content):
"""Check if DNS entry already exists."""
records = self._list_records(rtype, name, content)
is_duplicate = len(records) >= 1
if is_duplicate:
LOGGER.info('Duplicate record %s %s %s, NOOP', rtype, name, content)
return is_duplicate
|
Check if DNS entry already exists.
|
def estimate_parameters(self, max_dist_kb, size_bin_kb, display_graph):
"""
estimation by least square optimization of Rippe parameters on the
experimental data
:param max_dist_kb:
:param size_bin_kb:
"""
logger.info("estimation of the parameters of the model")
self.bins = np.arange(
size_bin_kb, max_dist_kb + size_bin_kb, size_bin_kb
)
self.mean_contacts = np.zeros_like(self.bins, dtype=np.float32)
self.dict_collect = dict()
self.gpu_vect_frags.copy_from_gpu()
epsi = self.mean_value_trans
for k in self.bins:
self.dict_collect[k] = []
for i in range(0, 2000):
# print "frag i = ", i
start = self.sparse_matrix.indptr[i]
end = self.sparse_matrix.indptr[i + 1]
id_j = self.sparse_matrix.indices[start:end]
data = self.sparse_matrix.data[start:end]
info_i = self.np_sub_frags_2_frags[i]
init_id_fi = info_i[0]
# pos_i = self.S_o_A_frags["pos"][init_id_fi]
id_c_i = self.S_o_A_frags["id_c"][init_id_fi]
s_i = (
self.S_o_A_frags["start_bp"][init_id_fi] / 1000.0
+ self.np_sub_frags_2_frags[i][1]
)
len_kb_c_i = self.S_o_A_frags["l_cont_bp"][init_id_fi] / 1000
local_bins = np.arange(
size_bin_kb,
min(len_kb_c_i, max_dist_kb) + size_bin_kb,
size_bin_kb,
)
local_storage = np.zeros_like(local_bins, dtype=np.int32)
for fj, dj in zip(id_j, data):
info_j = self.np_sub_frags_2_frags[fj]
init_id_fj = info_j[0]
id_c_j = self.S_o_A_frags["id_c"][init_id_fj]
if id_c_i == id_c_j:
# pos_j = self.S_o_A_frags["pos"][init_id_fj]
s_j = (
self.S_o_A_frags["start_bp"][init_id_fj] / 1000.0
+ self.np_sub_frags_2_frags[fj][1]
)
d = np.abs(s_i - s_j)
if d < max_dist_kb:
id_bin = d / size_bin_kb
local_storage[id_bin] += dj
# self.dict_collect[self.bins[id_bin]].append(dj)
# we have to add also the zeros
for my_bin, val in zip(local_bins, local_storage):
# print "bin = ", bin
self.dict_collect[my_bin].append(val)
for id_bin in range(0, len(self.bins)):
k = self.bins[id_bin]
self.mean_contacts[id_bin] = np.mean(self.dict_collect[k])
for id_bin in range(0, len(self.bins)):
k = self.bins[id_bin]
tmp = np.mean(self.dict_collect[k])
if np.isnan(tmp) or tmp == 0:
# if np.isnan(tmp):
# if np.isnan(tmp):
# print "removing nan"
self.mean_contacts[id_bin] = np.nan
else:
self.mean_contacts[id_bin] = tmp + epsi
self.mean_contacts_upd = []
self.bins_upd = []
for count, ele in zip(self.mean_contacts, self.bins):
if not np.isnan(count):
self.bins_upd.append(ele)
self.mean_contacts_upd.append(count)
self.bins_upd = np.array(self.bins_upd)
self.mean_contacts_upd = np.array(self.mean_contacts_upd)
# self.mean_contacts_upd =
# ndi.filters.gaussian_filter1d(self.mean_contacts_upd,
# sigma=len(self.bins_upd) / 5.)
p, self.y_estim = nuis.estimate_param_hic(
self.mean_contacts_upd, self.bins_upd
)
##########################################
fit_param = p.x
##########################################
logger.info("mean value trans = {}".format(self.mean_value_trans))
##########################################
estim_max_dist = nuis.estimate_max_dist_intra(
fit_param, self.mean_value_trans
)
logger.info("max distance cis/trans = {}".format(estim_max_dist))
##########################################
self.param_simu = self.setup_model_parameters(
fit_param, estim_max_dist
)
self.gpu_param_simu = cuda.mem_alloc(self.param_simu.nbytes)
self.gpu_param_simu_test = cuda.mem_alloc(self.param_simu.nbytes)
cuda.memcpy_htod(self.gpu_param_simu, self.param_simu)
if display_graph:
plt.loglog(self.bins_upd, self.mean_contacts_upd, "-*b")
plt.loglog(self.bins_upd, self.y_estim, "-*r")
plt.xlabel("genomic distance (kb)")
plt.ylabel("frequency of contact")
plt.legend(["obs", "fit"])
plt.show()
|
estimation by least square optimization of Rippe parameters on the
experimental data
:param max_dist_kb:
:param size_bin_kb:
|
def reverse_mapping(mapping):
"""
For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True
"""
keys, values = zip(*mapping.items())
return dict(zip(values, keys))
|
For every key, value pair, return the mapping for the
equivalent value, key pair
>>> reverse_mapping({'a': 'b'}) == {'b': 'a'}
True
|
def set_itunes_element(self):
"""Set each of the itunes elements."""
self.set_itunes_author_name()
self.set_itunes_block()
self.set_itunes_closed_captioned()
self.set_itunes_duration()
self.set_itunes_explicit()
self.set_itune_image()
self.set_itunes_order()
self.set_itunes_subtitle()
self.set_itunes_summary()
|
Set each of the itunes elements.
|
def install_plugin(self, dir, entry_script=None):
"""
Install *Vim* plugin.
:param string dir: the root directory contains *Vim* script
:param string entry_script: path to the initializing script
"""
self.runtimepath.append(dir)
if entry_script is not None:
self.command('runtime! {0}'.format(entry_script), False)
|
Install *Vim* plugin.
:param string dir: the root directory contains *Vim* script
:param string entry_script: path to the initializing script
|
def active_brokers(self):
"""Set of brokers that are not inactive or decommissioned."""
return {
broker for broker in six.itervalues(self.brokers)
if not broker.inactive and not broker.decommissioned
}
|
Set of brokers that are not inactive or decommissioned.
|
def is_equal(self, other):
"""
If two intervals are the same
"""
other = IntervalCell.coerce(other)
return other.low == self.low and other.high == self.high
|
If two intervals are the same
|
def read_data(self, **kwargs):
"""
get the data from the service
as the pocket service does not have any date
in its API linked to the note,
add the triggered date to the dict data
thus the service will be triggered when data will be found
:param kwargs: contain keyword args : trigger_id at least
:type kwargs: dict
:rtype: list
"""
trigger_id = kwargs.get('trigger_id')
data = list()
kwargs['model_name'] = 'Tumblr'
kwargs['app_label'] = 'th_tumblr'
super(ServiceTumblr, self).read_data(**kwargs)
cache.set('th_tumblr_' + str(trigger_id), data)
return data
|
get the data from the service
as the pocket service does not have any date
in its API linked to the note,
add the triggered date to the dict data
thus the service will be triggered when data will be found
:param kwargs: contain keyword args : trigger_id at least
:type kwargs: dict
:rtype: list
|
def honeypot_exempt(view_func):
"""
Mark view as exempt from honeypot validation
"""
# borrowing liberally from django's csrf_exempt
def wrapped(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped.honeypot_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped)
|
Mark view as exempt from honeypot validation
|
def get_feature_state_for_scope(self, feature_id, user_scope, scope_name, scope_value):
"""GetFeatureStateForScope.
[Preview API] Get the state of the specified feature for the given named scope
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>`
"""
route_values = {}
if feature_id is not None:
route_values['featureId'] = self._serialize.url('feature_id', feature_id, 'str')
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
if scope_name is not None:
route_values['scopeName'] = self._serialize.url('scope_name', scope_name, 'str')
if scope_value is not None:
route_values['scopeValue'] = self._serialize.url('scope_value', scope_value, 'str')
response = self._send(http_method='GET',
location_id='dd291e43-aa9f-4cee-8465-a93c78e414a4',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('ContributedFeatureState', response)
|
GetFeatureStateForScope.
[Preview API] Get the state of the specified feature for the given named scope
:param str feature_id: Contribution id of the feature
:param str user_scope: User-Scope at which to get the value. Should be "me" for the current user or "host" for all users.
:param str scope_name: Scope at which to get the feature setting for (e.g. "project" or "team")
:param str scope_value: Value of the scope (e.g. the project or team id)
:rtype: :class:`<ContributedFeatureState> <azure.devops.v5_0.feature_management.models.ContributedFeatureState>`
|
def is_child_of_bin(self, id_, bin_id):
"""Tests if a bin is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a child of
``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_child_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_child_of_catalog(id_=id_, catalog_id=bin_id)
return self._hierarchy_session.is_child(id_=bin_id, child_id=id_)
|
Tests if a bin is a direct child of another.
arg: id (osid.id.Id): an ``Id``
arg: bin_id (osid.id.Id): the ``Id`` of a bin
return: (boolean) - ``true`` if the ``id`` is a child of
``bin_id,`` ``false`` otherwise
raise: NotFound - ``bin_id`` is not found
raise: NullArgument - ``id`` or ``bin_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
|
def create_translation_field(translated_field, language):
"""
Takes the original field, a given language, a decider model and return a
Field class for model.
"""
cls_name = translated_field.__class__.__name__
if not isinstance(translated_field, tuple(SUPPORTED_FIELDS.keys())):
raise ImproperlyConfigured("%s is not supported by Linguist." % cls_name)
translation_class = field_factory(translated_field.__class__)
kwargs = get_translation_class_kwargs(translated_field.__class__)
return translation_class(
translated_field=translated_field, language=language, **kwargs
)
|
Takes the original field, a given language, a decider model and return a
Field class for model.
|
def deepish_copy(org):
"""Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
"""
out = dict().fromkeys(org)
for k, v in org.items():
if isinstance(v, dict):
out[k] = deepish_copy(v)
else:
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
|
Improved speed deep copy for dictionaries of simple python types.
Thanks to Gregg Lind:
http://writeonly.wordpress.com/2009/05/07/deepcopy-is-a-pig-for-simple-data/
|
def _generate_token(self, length=32):
'''
_generate_token - internal function for generating randomized alphanumberic
strings of a given length
'''
return ''.join(choice(ascii_letters + digits) for x in range(length))
|
_generate_token - internal function for generating randomized alphanumberic
strings of a given length
|
def bulk_exports(self):
"""
:returns: Version bulk_exports of preview
:rtype: twilio.rest.preview.bulk_exports.BulkExports
"""
if self._bulk_exports is None:
self._bulk_exports = BulkExports(self)
return self._bulk_exports
|
:returns: Version bulk_exports of preview
:rtype: twilio.rest.preview.bulk_exports.BulkExports
|
def unmajority(p, a, b, c):
"""Unmajority gate."""
p.ccx(a, b, c)
p.cx(c, a)
p.cx(a, b)
|
Unmajority gate.
|
def to_xml(self, opts = defaultdict(lambda: None)):
'''
Generate XML from the current settings.
'''
if not self.launch_url or not self.secure_launch_url:
raise InvalidLTIConfigError('Invalid LTI configuration')
root = etree.Element('cartridge_basiclti_link', attrib = {
'{%s}%s' %(NSMAP['xsi'], 'schemaLocation'): 'http://www.imsglobal.org/xsd/imslticc_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticc_v1p0.xsd http://www.imsglobal.org/xsd/imsbasiclti_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imsbasiclti_v1p0p1.xsd http://www.imsglobal.org/xsd/imslticm_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticm_v1p0.xsd http://www.imsglobal.org/xsd/imslticp_v1p0 http://www.imsglobal.org/xsd/lti/ltiv1p0/imslticp_v1p0.xsd',
'xmlns': 'http://www.imsglobal.org/xsd/imslticc_v1p0'
}, nsmap = NSMAP)
for key in ['title', 'description', 'launch_url', 'secure_launch_url']:
option = etree.SubElement(root, '{%s}%s' %(NSMAP['blti'], key))
option.text = getattr(self, key)
vendor_keys = ['name', 'code', 'description', 'url']
if any('vendor_' + key for key in vendor_keys) or\
self.vendor_contact_email:
vendor_node = etree.SubElement(root, '{%s}%s'
%(NSMAP['blti'], 'vendor'))
for key in vendor_keys:
if getattr(self, 'vendor_' + key) != None:
v_node = etree.SubElement(vendor_node,
'{%s}%s' %(NSMAP['lticp'], key))
v_node.text = getattr(self, 'vendor_' + key)
if getattr(self, 'vendor_contact_email'):
v_node = etree.SubElement(vendor_node,
'{%s}%s' %(NSMAP['lticp'], 'contact'))
c_name = etree.SubElement(v_node,
'{%s}%s' %(NSMAP['lticp'], 'name'))
c_name.text = self.vendor_contact_name
c_email = etree.SubElement(v_node,
'{%s}%s' %(NSMAP['lticp'], 'email'))
c_email.text = self.vendor_contact_email
# Custom params
if len(self.custom_params) != 0:
custom_node = etree.SubElement(root, '{%s}%s' %(NSMAP['blti'],
'custom'))
for (key, val) in sorted(self.custom_params.items()):
c_node = etree.SubElement(custom_node, '{%s}%s'
%(NSMAP['lticm'], 'property'))
c_node.set('name', key)
c_node.text = val
# Extension params
if len(self.extensions) != 0:
for (key, params) in sorted(self.extensions.items()):
extension_node = etree.SubElement(root, '{%s}%s' %(NSMAP['blti'],
'extensions'), platform = key)
self.recursive_options(extension_node,params)
if getattr(self, 'cartridge_bundle'):
identifierref = etree.SubElement(root, 'cartridge_bundle',
identifierref = self.cartridge_bundle)
if getattr(self, 'cartridge_icon'):
identifierref = etree.SubElement(root, 'cartridge_icon',
identifierref = self.cartridge_icon)
return '<?xml version="1.0" encoding="UTF-8"?>' + etree.tostring(root)
|
Generate XML from the current settings.
|
def git_list_refs(repo_dir):
"""List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
command = ['git', 'show-ref', '--dereference', '--head']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
return {ref: commit_hash for commit_hash, ref in
[l.split(None, 1) for l in output]}
|
List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
|
def iter(self, count=0, func=sum):
'''Iterator of infinite dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
'''
while True:
yield self.roll(count, func)
|
Iterator of infinite dice rolls.
:param count: [0] Return list of ``count`` sums
:param func: [sum] Apply func to list of individual die rolls func([])
|
def add(self, field, data_type=None, nullable=True, metadata=None):
"""
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
"""
if isinstance(field, StructField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of struct_field to create.")
if isinstance(data_type, str):
data_type_f = _parse_datatype_json_value(data_type)
else:
data_type_f = data_type
self.fields.append(StructField(field, data_type_f, nullable, metadata))
self.names.append(field)
# Precalculated list of fields that need conversion with fromInternal/toInternal functions
self._needConversion = [f.needConversion() for f in self]
self._needSerializeAnyField = any(self._needConversion)
return self
|
Construct a StructType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a StructField object.
b) Between 2 and 4 parameters as (name, data_type, nullable (optional),
metadata(optional). The data_type parameter may be either a String or a
DataType object.
>>> struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
>>> struct2 = StructType([StructField("f1", StringType(), True), \\
... StructField("f2", StringType(), True, None)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add(StructField("f1", StringType(), True))
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
>>> struct1 = StructType().add("f1", "string", True)
>>> struct2 = StructType([StructField("f1", StringType(), True)])
>>> struct1 == struct2
True
:param field: Either the name of the field or a StructField object
:param data_type: If present, the DataType of the StructField to create
:param nullable: Whether the field to add should be nullable (default True)
:param metadata: Any additional metadata (default None)
:return: a new updated StructType
|
def _scheduleUpgrade(self,
ev_data: UpgradeLogData,
failTimeout) -> None:
"""
Schedules node upgrade to a newer version
:param ev_data: upgrade event parameters
"""
logger.info(
"{}'s upgrader processing upgrade for version {}={}"
.format(self, ev_data.pkg_name, ev_data.version))
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
self._notifier.sendMessageUponNodeUpgradeScheduled(
"Upgrade of package {} on node '{}' to version {} "
"has been scheduled on {}"
.format(ev_data.pkg_name, self.nodeName,
ev_data.version, ev_data.when))
self._actionLog.append_scheduled(ev_data)
callAgent = partial(self._callUpgradeAgent, ev_data, failTimeout)
delay = 0
if now < ev_data.when:
delay = (ev_data.when - now).total_seconds()
self.scheduledAction = ev_data
self._schedule(callAgent, delay)
|
Schedules node upgrade to a newer version
:param ev_data: upgrade event parameters
|
def clean_gff(gff, cleaned, add_chr=False, chroms_to_ignore=None,
featuretypes_to_ignore=None):
"""
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
"""
logger.info("Cleaning GFF")
chroms_to_ignore = chroms_to_ignore or []
featuretypes_to_ignore = featuretypes_to_ignore or []
with open(cleaned, 'w') as fout:
for i in gffutils.iterators.DataIterator(gff):
if add_chr:
i.chrom = "chr" + i.chrom
if i.chrom in chroms_to_ignore:
continue
if i.featuretype in featuretypes_to_ignore:
continue
fout.write(str(i) + '\n')
return cleaned
|
Cleans a GFF file by removing features on unwanted chromosomes and of
unwanted featuretypes. Optionally adds "chr" to chrom names.
|
def get_params(url, ignore_empty=False):
"""
Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters.
Handles parameters-only urls as `q=banana&peel=false`.
:param str url: url to parse
:param bool ignore_empty: ignore empty value parameter or not
:return: dictionary of params and their values
:rtype: dict
"""
try:
params_start_index = url.index('?')
except ValueError:
params_start_index = 0
params_string = url[params_start_index + 1:]
params_dict = {}
for pair in params_string.split('&'):
if not pair:
continue
splitted = pair.split('=')
param, value = splitted
if not value and ignore_empty:
continue
value = int(value) if value.isdigit() else value
params_dict[param] = value
return params_dict
|
Static method that parses a given `url` and retrieves `url`'s parameters. Could also ignore empty value parameters.
Handles parameters-only urls as `q=banana&peel=false`.
:param str url: url to parse
:param bool ignore_empty: ignore empty value parameter or not
:return: dictionary of params and their values
:rtype: dict
|
def add_term_facet(self, *args, **kwargs):
"""Add a term factory facet"""
self.facets.append(TermFacet(*args, **kwargs))
|
Add a term factory facet
|
def wait_for_servers(session, servers):
"""Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
"""
nclient = nova.Client(NOVA_VERSION, session=session,
region_name=os.environ['OS_REGION_NAME'])
while True:
deployed = []
undeployed = []
for server in servers:
c = nclient.servers.get(server.id)
if c.addresses != {} and c.status == 'ACTIVE':
deployed.append(server)
if c.status == 'ERROR':
undeployed.append(server)
logger.info("[nova]: Polling the Deployment")
logger.info("[nova]: %s deployed servers" % len(deployed))
logger.info("[nova]: %s undeployed servers" % len(undeployed))
if len(deployed) + len(undeployed) >= len(servers):
break
time.sleep(3)
return deployed, undeployed
|
Wait for the servers to be ready.
Note(msimonin): we don't garantee the SSH connection to be ready.
|
def to_html(self):
"""Render a Paragraph MessageElement as html
:returns: The html representation of the Paragraph MessageElement
"""
if self.text is None:
return
else:
return '<p%s>%s%s</p>' % (
self.html_attributes(), self.html_icon(), self.text.to_html())
|
Render a Paragraph MessageElement as html
:returns: The html representation of the Paragraph MessageElement
|
def _migrate_db_pre010(self, dbname, newslab):
'''
Check for any pre-010 entries in 'dbname' in my slab and migrate those to the new slab.
Once complete, drop the database from me with the name 'dbname'
Returns (bool): True if a migration occurred, else False
'''
donekey = f'migrdone:{dbname}'
if self.metadict.get(donekey, False):
return
if not self.layrslab.dbexists(dbname):
self.metadict.set(donekey, True)
return False
oldslab = self.layrslab
olddb = oldslab.initdb(dbname)
entries = oldslab.stat(olddb)['entries']
if not entries:
self.metadict.set(donekey, True)
return False
if newslab.dbexists(dbname):
logger.warning('Incomplete migration detected. Dropping new splices to restart.')
newslab.dropdb(dbname)
logger.info('New splice dropping complete.')
logger.info('Pre-010 %s migration starting. Total rows: %d...', dbname, entries)
def progfunc(count):
logger.info('Progress %d/%d (%2.2f%%)', count, entries, count / entries * 100)
oldslab.copydb(olddb, newslab, destdbname=dbname, progresscb=progfunc)
logger.info('Pre-010 %s migration copying done. Deleting from old location...', dbname)
oldslab.dropdb(dbname)
logger.info('Pre-010 %s migration completed.', dbname)
self.metadict.set(donekey, True)
return True
|
Check for any pre-010 entries in 'dbname' in my slab and migrate those to the new slab.
Once complete, drop the database from me with the name 'dbname'
Returns (bool): True if a migration occurred, else False
|
def get_version_info():
"""
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
"""
from astropy import __version__
astropy_version = __version__
from photutils import __version__
photutils_version = __version__
return 'astropy: {0}, photutils: {1}'.format(astropy_version,
photutils_version)
|
Return astropy and photutils versions.
Returns
-------
result : str
The astropy and photutils versions.
|
def datetime_to_ns(then):
"""Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
"""
if then == datetime(1970, 1, 1, 0, 0):
return 'Antiquity'
now = datetime.utcnow()
delta = now - then
seconds = delta.total_seconds()
# There's gotta be a better way to do this...
years, seconds = divmod(seconds, 60*60*24*365)
days, seconds = divmod(seconds, 60*60*24)
hours, seconds = divmod(seconds, 60*60)
minutes, seconds = divmod(seconds, 60)
years = int(years)
days = int(days)
hours = int(hours)
minutes = int(minutes)
seconds = round(seconds)
if years > 1:
if days > 1:
return f'{years} years {days} days ago'
elif days == 1:
return '{years} years 1 day ago'
return '{years} years ago'
if years == 1:
if days > 1:
return f'1 year {days} days ago'
elif days == 1:
return '1 year 1 day ago'
return '1 year ago'
if days > 3:
return f'{days} days ago'
if days > 1:
if hours > 1:
return f'{days} days {hours} hours ago'
elif hours == 1:
return f'{days} days 1 hour ago'
return f'{days} days ago'
if days == 1:
if hours > 1:
return f'1 day {hours} hours ago'
elif hours == 1:
return '1 day 1 hour ago'
return '1 day ago'
if hours > 1:
return f'{hours} hours ago'
if hours == 1:
return f'{minutes + 60} minutes ago'
if minutes > 1:
return f'{minutes} minutes ago'
if minutes == 1:
return '1 minute ago'
return 'Seconds ago'
|
Transform a :any:`datetime.datetime` into a NationStates-style
string.
For example "6 days ago", "105 minutes ago", etc.
|
def _apply_dvs_config(config_spec, config_dict):
'''
Applies the values of the config dict dictionary to a config spec
(vim.VMwareDVSConfigSpec)
'''
if config_dict.get('name'):
config_spec.name = config_dict['name']
if config_dict.get('contact_email') or config_dict.get('contact_name'):
if not config_spec.contact:
config_spec.contact = vim.DVSContactInfo()
config_spec.contact.contact = config_dict.get('contact_email')
config_spec.contact.name = config_dict.get('contact_name')
if config_dict.get('description'):
config_spec.description = config_dict.get('description')
if config_dict.get('max_mtu'):
config_spec.maxMtu = config_dict.get('max_mtu')
if config_dict.get('lacp_api_version'):
config_spec.lacpApiVersion = config_dict.get('lacp_api_version')
if config_dict.get('network_resource_control_version'):
config_spec.networkResourceControlVersion = \
config_dict.get('network_resource_control_version')
if config_dict.get('uplink_names'):
if not config_spec.uplinkPortPolicy or \
not isinstance(config_spec.uplinkPortPolicy,
vim.DVSNameArrayUplinkPortPolicy):
config_spec.uplinkPortPolicy = \
vim.DVSNameArrayUplinkPortPolicy()
config_spec.uplinkPortPolicy.uplinkPortName = \
config_dict['uplink_names']
|
Applies the values of the config dict dictionary to a config spec
(vim.VMwareDVSConfigSpec)
|
def validate_enum_attribute(fully_qualified_name: str, spec: Dict[str, Any], attribute: str,
candidates: Set[Union[str, int, float]]) -> Optional[InvalidValueError]:
""" Validates to ensure that the value of an attribute lies within an allowed set of candidates """
if attribute not in spec:
return
if spec[attribute] not in candidates:
return InvalidValueError(fully_qualified_name, spec, attribute, candidates)
|
Validates to ensure that the value of an attribute lies within an allowed set of candidates
|
def _qmed_from_pot_records(self):
"""
Return QMED estimate based on peaks-over-threshold (POT) records.
Methodology source: FEH, Vol. 3, pp. 77-78
:return: QMED in m³/s
:rtype: float
"""
pot_dataset = self.catchment.pot_dataset
if not pot_dataset:
raise InsufficientDataError("POT dataset must be set for catchment {} to estimate QMED from POT data."
.format(self.catchment.id))
complete_year_records, length = self._complete_pot_years(pot_dataset)
if length < 1:
raise InsufficientDataError("Insufficient POT flow records available for catchment {}."
.format(self.catchment.id))
position = 0.790715789 * length + 0.539684211
i = floor(position)
w = 1 + i - position # This is equivalent to table 12.1!
flows = [record.flow for record in complete_year_records]
flows.sort(reverse=True)
return w * flows[i - 1] + (1 - w) * flows[i]
|
Return QMED estimate based on peaks-over-threshold (POT) records.
Methodology source: FEH, Vol. 3, pp. 77-78
:return: QMED in m³/s
:rtype: float
|
def _vowelinstem(self, stem):
"""vowelinstem(stem) is TRUE <=> stem contains a vowel"""
for i in range(len(stem)):
if not self._cons(stem, i):
return True
return False
|
vowelinstem(stem) is TRUE <=> stem contains a vowel
|
def _write_adminfile(kwargs):
'''
Create a temporary adminfile based on the keyword arguments passed to
pkg.install.
'''
# Set the adminfile default variables
email = kwargs.get('email', '')
instance = kwargs.get('instance', 'quit')
partial = kwargs.get('partial', 'nocheck')
runlevel = kwargs.get('runlevel', 'nocheck')
idepend = kwargs.get('idepend', 'nocheck')
rdepend = kwargs.get('rdepend', 'nocheck')
space = kwargs.get('space', 'nocheck')
setuid = kwargs.get('setuid', 'nocheck')
conflict = kwargs.get('conflict', 'nocheck')
action = kwargs.get('action', 'nocheck')
basedir = kwargs.get('basedir', 'default')
# Make tempfile to hold the adminfile contents.
adminfile = salt.utils.files.mkstemp(prefix="salt-")
def _write_line(fp_, line):
fp_.write(salt.utils.stringutils.to_str(line))
with salt.utils.files.fopen(adminfile, 'w') as fp_:
_write_line(fp_, 'email={0}\n'.format(email))
_write_line(fp_, 'instance={0}\n'.format(instance))
_write_line(fp_, 'partial={0}\n'.format(partial))
_write_line(fp_, 'runlevel={0}\n'.format(runlevel))
_write_line(fp_, 'idepend={0}\n'.format(idepend))
_write_line(fp_, 'rdepend={0}\n'.format(rdepend))
_write_line(fp_, 'space={0}\n'.format(space))
_write_line(fp_, 'setuid={0}\n'.format(setuid))
_write_line(fp_, 'conflict={0}\n'.format(conflict))
_write_line(fp_, 'action={0}\n'.format(action))
_write_line(fp_, 'basedir={0}\n'.format(basedir))
return adminfile
|
Create a temporary adminfile based on the keyword arguments passed to
pkg.install.
|
def job_get_log(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /job-xxxx/getLog API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2FgetLog
"""
return DXHTTPRequest('/%s/getLog' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /job-xxxx/getLog API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fjob-xxxx%2FgetLog
|
def visit_Dict(self, node: AST, dfltChaining: bool = True) -> str:
"""Return dict representation of `node`s elements."""
items = (': '.join((self.visit(key), self.visit(value)))
for key, value in zip(node.keys, node.values))
return f"{{{', '.join(items)}}}"
|
Return dict representation of `node`s elements.
|
def locality_preserving_projections(self, coordinates, num_dims=None):
'''Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).'''
X = np.atleast_2d(coordinates) # n x d
L = self.laplacian(normed=True) # n x n
u,s,_ = np.linalg.svd(X.T.dot(X))
Fplus = np.linalg.pinv(u * np.sqrt(s)) # d x d
n, d = X.shape
if n >= d: # optimized order: F(X'LX)F'
T = Fplus.dot(X.T.dot(L.dot(X))).dot(Fplus.T)
else: # optimized order: (FX')L(XF')
T = Fplus.dot(X.T).dot(L.dot(X.dot(Fplus.T)))
L = 0.5*(T+T.T)
return _null_space(L, num_vecs=num_dims, overwrite=True)
|
Locality Preserving Projections (LPP, linearized Laplacian Eigenmaps).
|
def _init_go2ntpresent(go_ntsets, go_all, gosubdag):
"""Mark all GO IDs with an X if present in the user GO list."""
go2ntpresent = {}
ntobj = namedtuple('NtPresent', " ".join(nt.hdr for nt in go_ntsets))
# Get present marks for GO sources
for goid_all in go_all:
present_true = [goid_all in nt.go_set for nt in go_ntsets]
present_str = ['X' if tf else '.' for tf in present_true]
go2ntpresent[goid_all] = ntobj._make(present_str)
# Get present marks for all other GO ancestors
goids_ancestors = set(gosubdag.go2obj).difference(go2ntpresent)
assert not goids_ancestors.intersection(go_all)
strmark = ['.' for _ in range(len(go_ntsets))]
for goid in goids_ancestors:
go2ntpresent[goid] = ntobj._make(strmark)
return go2ntpresent
|
Mark all GO IDs with an X if present in the user GO list.
|
def _create_data_files_directory(symlink=False):
"""Install data_files in the /etc directory."""
current_directory = os.path.abspath(os.path.dirname(__file__))
etc_kytos = os.path.join(BASE_ENV, ETC_KYTOS)
if not os.path.exists(etc_kytos):
os.makedirs(etc_kytos)
src = os.path.join(current_directory, KYTOS_SKEL_PATH)
dst = os.path.join(BASE_ENV, KYTOS_SKEL_PATH)
if os.path.exists(dst):
if not os.listdir(dst):
# Path already exists but it's empty, so we'll populate it
# We remove it first to avoid an exception from copytree
os.rmdir(dst)
shutil.copytree(src, dst)
else:
# It doesn't exist yet, so we should symlink or copy contents
if symlink:
os.symlink(src, dst)
else:
shutil.copytree(src, dst)
|
Install data_files in the /etc directory.
|
def get_jids():
'''
Return a list of all job ids
'''
with _get_serv(ret=None, commit=True) as cur:
sql = '''SELECT jid, load
FROM jids'''
cur.execute(sql)
data = cur.fetchall()
ret = {}
for jid, load in data:
ret[jid] = salt.utils.jid.format_jid_instance(jid,
salt.utils.json.loads(load))
return ret
|
Return a list of all job ids
|
def AddBlob(self, blob_hash, length, chunk_number):
"""Add another blob to this image using its hash."""
if len(blob_hash.AsBytes()) != self._HASH_SIZE:
raise ValueError("Hash '%s' doesn't have correct length (%d)." %
(blob_hash, self._HASH_SIZE))
# If we're adding a new blob, we should increase the size. If we're just
# updating an existing blob, the size should stay the same.
# That is, if we read the index at the right offset and no hash is there, we
# must not have seen this blob before, so we say we're adding a new one and
# increase in size.
if not self.ChunkExists(chunk_number):
# We say that we've increased in size by the size of the blob,
# but really we only store its hash in the AFF4SparseImage.
self.size += length
self._dirty = True
# Keep track of the biggest chunk_number we've seen so far.
if chunk_number > self.last_chunk:
self.last_chunk = chunk_number
self._dirty = True
index_urn = self.urn.Add(self.CHUNK_ID_TEMPLATE % chunk_number)
# TODO(amoser): This opens a subobject for each AddBlob call :/
with aff4.FACTORY.Create(
index_urn, aff4.AFF4MemoryStream, token=self.token) as fd:
fd.write(blob_hash.AsBytes())
if chunk_number in self.chunk_cache:
self.chunk_cache.Pop(chunk_number)
|
Add another blob to this image using its hash.
|
def change_tz(cal, new_timezone, default, utc_only=False, utc_tz=icalendar.utc):
"""
Change the timezone of the specified component.
Args:
cal (Component): the component to change
new_timezone (tzinfo): the timezone to change to
default (tzinfo): a timezone to assume if the dtstart or dtend in cal
doesn't have an existing timezone
utc_only (bool): only convert dates that are in utc
utc_tz (tzinfo): the tzinfo to compare to for UTC when processing
utc_only=True
"""
for vevent in getattr(cal, 'vevent_list', []):
start = getattr(vevent, 'dtstart', None)
end = getattr(vevent, 'dtend', None)
for node in (start, end):
if node:
dt = node.value
if (isinstance(dt, datetime) and
(not utc_only or dt.tzinfo == utc_tz)):
if dt.tzinfo is None:
dt = dt.replace(tzinfo = default)
node.value = dt.astimezone(new_timezone)
|
Change the timezone of the specified component.
Args:
cal (Component): the component to change
new_timezone (tzinfo): the timezone to change to
default (tzinfo): a timezone to assume if the dtstart or dtend in cal
doesn't have an existing timezone
utc_only (bool): only convert dates that are in utc
utc_tz (tzinfo): the tzinfo to compare to for UTC when processing
utc_only=True
|
def determine_band_channel(kal_out):
"""Return band, channel, target frequency from kal output."""
band = ""
channel = ""
tgt_freq = ""
while band == "":
for line in kal_out.splitlines():
if "Using " in line and " channel " in line:
band = str(line.split()[1])
channel = str(line.split()[3])
tgt_freq = str(line.split()[4]).replace(
"(", "").replace(")", "")
if band == "":
band = None
return(band, channel, tgt_freq)
|
Return band, channel, target frequency from kal output.
|
def rolling_window(array, axis, window, center, fill_value):
"""
Make an ndarray with a rolling window of axis-th dimension.
The rolling dimension will be placed at the last dimension.
"""
if isinstance(array, dask_array_type):
return dask_array_ops.rolling_window(
array, axis, window, center, fill_value)
else: # np.ndarray
return nputils.rolling_window(
array, axis, window, center, fill_value)
|
Make an ndarray with a rolling window of axis-th dimension.
The rolling dimension will be placed at the last dimension.
|
def _ensure_programmer_executable():
""" Find the lpc21isp executable and ensure it is executable
"""
# Find the lpc21isp executable, explicitly allowing the case where it
# is not executable (since that’s exactly what we’re trying to fix)
updater_executable = shutil.which('lpc21isp',
mode=os.F_OK)
# updater_executable might be None; we’re passing it here unchecked
# because if it is None, we’re about to fail when we try to program
# the smoothie, and we want the exception to bubble up.
os.chmod(updater_executable, 0o777)
|
Find the lpc21isp executable and ensure it is executable
|
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
"""
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname)
|
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.