code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def getFiledAgainst(self, filedagainst_name, projectarea_id=None,
projectarea_name=None, archived=False):
"""Get :class:`rtcclient.models.FiledAgainst` object by its name
:param filedagainst_name: the filedagainst name
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the project area name
:param archived: (default is False) whether the filedagainst is
archived
:return: the :class:`rtcclient.models.FiledAgainst` object
:rtype: rtcclient.models.FiledAgainst
"""
self.log.debug("Try to get <FiledAgainst %s>", filedagainst_name)
if not isinstance(filedagainst_name,
six.string_types) or not filedagainst_name:
excp_msg = "Please specify a valid FiledAgainst name"
self.log.error(excp_msg)
raise exception.BadValue(excp_msg)
fas = self._getFiledAgainsts(projectarea_id=projectarea_id,
projectarea_name=projectarea_name,
archived=archived,
filedagainst_name=filedagainst_name)
if fas is not None:
filedagainst = fas[0]
self.log.info("Find <FiledAgainst %s>", filedagainst)
return filedagainst
error_msg = "No FiledAgainst named %s" % filedagainst_name
self.log.error(error_msg)
raise exception.NotFound(error_msg)
|
Get :class:`rtcclient.models.FiledAgainst` object by its name
:param filedagainst_name: the filedagainst name
:param projectarea_id: the :class:`rtcclient.project_area.ProjectArea`
id
:param projectarea_name: the project area name
:param archived: (default is False) whether the filedagainst is
archived
:return: the :class:`rtcclient.models.FiledAgainst` object
:rtype: rtcclient.models.FiledAgainst
|
def prepare_encoder(inputs, hparams, attention_type="local_1d"):
"""Prepare encoder for images."""
x = prepare_image(inputs, hparams, name="enc_channels")
# Add position signals.
x = add_pos_signals(x, hparams, "enc_pos")
x_shape = common_layers.shape_list(x)
if attention_type == "local_1d":
x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size])
x.set_shape([None, None, hparams.hidden_size])
elif attention_type == "local_2d":
x.set_shape([None, None, None, hparams.hidden_size])
return x
|
Prepare encoder for images.
|
def x_lower_limit(self, limit=None):
"""Returns or sets (if a value is provided) the value at which the
x-axis should start. By default this is zero (unless there are negative
values).
:param limit: If given, the chart's x_lower_limit will be set to this.
:raises ValueError: if you try to make the lower limit larger than the\
upper limit."""
if limit is None:
if self._x_lower_limit is None:
if self.smallest_x() < 0:
if self.smallest_x() == self.largest_x():
return int(self.smallest_x() - 1)
else:
return self.smallest_x()
else:
return 0
else:
return self._x_lower_limit
else:
if not is_numeric(limit):
raise TypeError(
"lower x limit must be numeric, not '%s'" % str(limit)
)
if limit >= self.largest_x():
raise ValueError(
"lower x limit must be less than upper limit (%s), not %s" % (
str(self.largest_x()), str(limit)
)
)
self._x_lower_limit = limit
|
Returns or sets (if a value is provided) the value at which the
x-axis should start. By default this is zero (unless there are negative
values).
:param limit: If given, the chart's x_lower_limit will be set to this.
:raises ValueError: if you try to make the lower limit larger than the\
upper limit.
|
def element(element, name, default=None):
"""
Returns the value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
"""
element_value = element.find(name)
return element_value.text if element_value is not None else default
|
Returns the value of an element, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param name: The name of the element to evaluate
:type name: str
:param default: The default value to return if the element is not defined
|
def _next_regular(target):
"""
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
"""
if target <= 6:
return target
# Quickly check if it's already a power of 2
if not (target & (target - 1)):
return target
match = float('inf') # Anything found will be smaller
p5 = 1
while p5 < target:
p35 = p5
while p35 < target:
# Ceiling integer division, avoiding conversion to float
# (quotient = ceil(target / p35))
quotient = -(-target // p35)
# Quickly find next power of 2 >= quotient
try:
p2 = 2 ** ((quotient - 1).bit_length())
except AttributeError:
# Fallback for Python <2.7
p2 = 2 ** _bit_length_26(quotient - 1)
N = p2 * p35
if N == target:
return N
elif N < match:
match = N
p35 *= 3
if p35 == target:
return p35
if p35 < match:
match = p35
p5 *= 5
if p5 == target:
return p5
if p5 < match:
match = p5
return match
|
Find the next regular number greater than or equal to target.
Regular numbers are composites of the prime factors 2, 3, and 5.
Also known as 5-smooth numbers or Hamming numbers, these are the optimal
size for inputs to FFTPACK.
Target must be a positive integer.
|
def _get_result_paths(self, data):
""" Return a dict of ResultPath objects representing all possible output
"""
assignment_fp = str(self.Parameters['-o'].Value).strip('"')
if not os.path.isabs(assignment_fp):
assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir)
return {'Assignments': ResultPath(assignment_fp, IsWritten=True)}
|
Return a dict of ResultPath objects representing all possible output
|
def cutadaptit_pairs(data, sample):
"""
Applies trim & filters to pairs, including adapter detection. If we have
barcode information then we use it to trim reversecut+bcode+adapter from
reverse read, if not then we have to apply a more general cut to make sure
we remove the barcode, this uses wildcards and so will have more false
positives that trim a little extra from the ends of reads. Should we add
a warning about this when filter_adapters=2 and no barcodes?
"""
LOGGER.debug("Entering cutadaptit_pairs - {}".format(sample.name))
sname = sample.name
## applied to read pairs
#trim_r1 = str(data.paramsdict["edit_cutsites"][0])
#trim_r2 = str(data.paramsdict["edit_cutsites"][1])
finput_r1 = sample.files.concat[0][0]
finput_r2 = sample.files.concat[0][1]
## Get adapter sequences. This is very important. For the forward adapter
## we don't care all that much about getting the sequence just before the
## Illumina adapter, b/c it will either be random (in RAD), or the reverse
## cut site of cut1 or cut2 (gbs or ddrad). Either way, we can still trim it
## off later in step7 with trim overhang if we want. And it should be invar-
## iable unless the cut site has an ambiguous char. The reverse adapter is
## super important, however b/c it can contain the inline barcode and
## revcomp cut site. We def want to trim out the barcode, and ideally the
## cut site too to be safe. Problem is we don't always know the barcode if
## users demultiplexed their data elsewhere. So, if barcode is missing we
## do a very fuzzy match before the adapter and trim it out.
## this just got more complicated now that we allow merging technical
## replicates in step 1 since a single sample might have multiple barcodes
## associated with it and so we need to search for multiple adapter+barcode
## combinations.
## We will assume that if they are 'linking_barcodes()' here then there are
## no technical replicates in the barcodes file. If there ARE technical
## replicates, then they should run step1 so they are merged, in which case
## the sample specific barcodes will be saved to each Sample under its
## .barcode attribute as a list.
if not data.barcodes:
## try linking barcodes again in case user just added a barcodes path
## after receiving the warning. We assume no technical replicates here.
try:
data._link_barcodes()
except Exception as inst:
LOGGER.warning(" error adding barcodes info: %s", inst)
## barcodes are present meaning they were parsed to the samples in step 1.
if data.barcodes:
try:
adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] \
+ data._hackersonly["p3_adapter"]
if isinstance(sample.barcode, list):
bcode = fullcomp(sample.barcode[0])[::-1]
elif isinstance(data.barcodes[sample.name], list):
bcode = fullcomp(data.barcodes[sample.name][0][::-1])
else:
bcode = fullcomp(data.barcodes[sample.name])[::-1]
## add full adapter (-revcompcut-revcompbcode-adapter)
adapter2 = fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] \
+ bcode \
+ data._hackersonly["p5_adapter"]
except KeyError as inst:
msg = """
Sample name does not exist in the barcode file. The name in the barcode file
for each sample must exactly equal the raw file name for the sample minus
`_R1`. So for example a sample called WatDo_PipPrep_R1_100.fq.gz must
be referenced in the barcode file as WatDo_PipPrep_100. The name in your
barcode file for this sample must match: {}
""".format(sample.name)
LOGGER.error(msg)
raise IPyradWarningExit(msg)
else:
print(NO_BARS_GBS_WARNING)
#adapter1 = fullcomp(data.paramsdict["restriction_overhang"][1])[::-1]+\
# data._hackersonly["p3_adapter"]
#adapter2 = "XXX"
adapter1 = data._hackersonly["p3_adapter"]
adapter2 = fullcomp(data._hackersonly["p5_adapter"])
## parse trim_reads
trim5r1 = trim5r2 = trim3r1 = trim3r2 = []
if data.paramsdict.get("trim_reads"):
trimlen = data.paramsdict.get("trim_reads")
## trim 5' end
if trimlen[0]:
trim5r1 = ["-u", str(trimlen[0])]
if trimlen[1] < 0:
trim3r1 = ["-u", str(trimlen[1])]
if trimlen[1] > 0:
trim3r1 = ["--length", str(trimlen[1])]
## legacy support for trimlen = 0,0 default
if len(trimlen) > 2:
if trimlen[2]:
trim5r2 = ["-U", str(trimlen[2])]
if len(trimlen) > 3:
if trimlen[3]:
if trimlen[3] < 0:
trim3r2 = ["-U", str(trimlen[3])]
if trimlen[3] > 0:
trim3r2 = ["--length", str(trimlen[3])]
else:
## legacy support
trimlen = data.paramsdict.get("edit_cutsites")
trim5r1 = ["-u", str(trimlen[0])]
trim5r2 = ["-U", str(trimlen[1])]
## testing new 'trim_reads' setting
cmdf1 = ["cutadapt"]
if trim5r1:
cmdf1 += trim5r1
if trim3r1:
cmdf1 += trim3r1
if trim5r2:
cmdf1 += trim5r2
if trim3r2:
cmdf1 += trim3r2
cmdf1 += ["--trim-n",
"--max-n", str(data.paramsdict["max_low_qual_bases"]),
"--minimum-length", str(data.paramsdict["filter_min_trim_len"]),
"-o", OPJ(data.dirs.edits, sname+".trimmed_R1_.fastq.gz"),
"-p", OPJ(data.dirs.edits, sname+".trimmed_R2_.fastq.gz"),
finput_r1,
finput_r2]
## additional args
if int(data.paramsdict["filter_adapters"]) < 2:
## add a dummy adapter to let cutadapt know whe are not using legacy-mode
cmdf1.insert(1, "XXX")
cmdf1.insert(1, "-A")
if int(data.paramsdict["filter_adapters"]):
cmdf1.insert(1, "20,20")
cmdf1.insert(1, "-q")
cmdf1.insert(1, str(data.paramsdict["phred_Qscore_offset"]))
cmdf1.insert(1, "--quality-base")
if int(data.paramsdict["filter_adapters"]) > 1:
## if technical replicates then add other copies
if isinstance(sample.barcode, list):
for extrabar in sample.barcode[1:]:
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][0])[::-1] + \
fullcomp(extrabar)[::-1] + \
data._hackersonly["p5_adapter"]
data._hackersonly["p5_adapters_extra"] += \
fullcomp(data.paramsdict["restriction_overhang"][1])[::-1] + \
data._hackersonly["p3_adapter"]
## first enter extra cuts
zcut1 = list(set(data._hackersonly["p3_adapters_extra"]))[::-1]
zcut2 = list(set(data._hackersonly["p5_adapters_extra"]))[::-1]
for ecut1, ecut2 in zip(zcut1, zcut2):
cmdf1.insert(1, ecut1)
cmdf1.insert(1, "-a")
cmdf1.insert(1, ecut2)
cmdf1.insert(1, "-A")
## then put the main cut first
cmdf1.insert(1, adapter1)
cmdf1.insert(1, '-a')
cmdf1.insert(1, adapter2)
cmdf1.insert(1, '-A')
## do modifications to read1 and write to tmp file
LOGGER.debug(" ".join(cmdf1))
#sys.exit()
try:
proc1 = sps.Popen(cmdf1, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
res1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
LOGGER.info("this is where I want it to interrupt")
raise KeyboardInterrupt()
## raise errors if found
if proc1.returncode:
raise IPyradWarningExit(" error [returncode={}]: {}\n{}"\
.format(proc1.returncode, " ".join(cmdf1), res1))
LOGGER.debug("Exiting cutadaptit_pairs - {}".format(sname))
## return results string to be parsed outside of engine
return res1
|
Applies trim & filters to pairs, including adapter detection. If we have
barcode information then we use it to trim reversecut+bcode+adapter from
reverse read, if not then we have to apply a more general cut to make sure
we remove the barcode, this uses wildcards and so will have more false
positives that trim a little extra from the ends of reads. Should we add
a warning about this when filter_adapters=2 and no barcodes?
|
def simplex_connect(self, solution_g):
'''
API:
simplex_connect(self, solution_g)
Description:
At this point we assume that the solution does not have a cycle.
We check if all the nodes are connected, if not we add an arc to
solution_g that does not create a cycle and return True. Otherwise
we do nothing and return False.
Pre:
(1) We assume there is no cycle in the solution.
Input:
solution_g: current spanning tree solution instance.
Post:
(1) solution_g is updated. An arc that does not create a cycle is
added.
(2) 'component' attribute of nodes are changed.
Return:
Returns True if an arc is added, returns False otherwise.
'''
nl = solution_g.get_node_list()
current = nl[0]
pred = solution_g.simplex_search(current, current)
separated = list(pred.keys())
for n in nl:
if solution_g.get_node(n).get_attr('component') != current:
# find an arc from n to seperated
for m in separated:
if (n,m) in self.edge_attr:
solution_g.add_edge(n,m)
return True
elif (m,n) in self.edge_attr:
solution_g.add_edge(m,n)
return True
return False
|
API:
simplex_connect(self, solution_g)
Description:
At this point we assume that the solution does not have a cycle.
We check if all the nodes are connected, if not we add an arc to
solution_g that does not create a cycle and return True. Otherwise
we do nothing and return False.
Pre:
(1) We assume there is no cycle in the solution.
Input:
solution_g: current spanning tree solution instance.
Post:
(1) solution_g is updated. An arc that does not create a cycle is
added.
(2) 'component' attribute of nodes are changed.
Return:
Returns True if an arc is added, returns False otherwise.
|
def get_revisions(page, page_num=1):
"""
Returns paginated queryset of PageRevision instances for
specified Page instance.
:param page: the page instance.
:param page_num: the pagination page number.
:rtype: django.db.models.query.QuerySet.
"""
revisions = page.revisions.order_by('-created_at')
current = page.get_latest_revision()
if current:
revisions.exclude(id=current.id)
paginator = Paginator(revisions, 5)
try:
revisions = paginator.page(page_num)
except PageNotAnInteger:
revisions = paginator.page(1)
except EmptyPage:
revisions = paginator.page(paginator.num_pages)
return revisions
|
Returns paginated queryset of PageRevision instances for
specified Page instance.
:param page: the page instance.
:param page_num: the pagination page number.
:rtype: django.db.models.query.QuerySet.
|
def tag_add(self, item, tag):
"""
Add tag to the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str
"""
tags = self.item(item, "tags")
self.item(item, tags=tags + (tag,))
|
Add tag to the tags of item.
:param item: item identifier
:type item: str
:param tag: tag name
:type tag: str
|
def save_params(self, fname):
"""Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
"""
arg_params, aux_params = self.get_params()
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
ndarray.save(fname, save_dict)
|
Saves model parameters to file.
Parameters
----------
fname : str
Path to output param file.
Examples
--------
>>> # An example of saving module parameters.
>>> mod.save_params('myfile')
|
def DbGetProperty(self, argin):
""" Get free object property
:param argin: Str[0] = Object name
Str[1] = Property name
Str[n] = Property name
:type: tango.DevVarStringArray
:return: Str[0] = Object name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number (array case)
Str[4] = Property value 1
Str[n] = Property value n (array case)
Str[n + 1] = Property name
Str[n + 2] = Property value number (array case)
Str[n + 3] = Property value 1
Str[n + m] = Property value m
:rtype: tango.DevVarStringArray """
self._log.debug("In DbGetProperty()")
object_name = argin[0]
return self.db.get_property(object_name, argin[1:])
|
Get free object property
:param argin: Str[0] = Object name
Str[1] = Property name
Str[n] = Property name
:type: tango.DevVarStringArray
:return: Str[0] = Object name
Str[1] = Property number
Str[2] = Property name
Str[3] = Property value number (array case)
Str[4] = Property value 1
Str[n] = Property value n (array case)
Str[n + 1] = Property name
Str[n + 2] = Property value number (array case)
Str[n + 3] = Property value 1
Str[n + m] = Property value m
:rtype: tango.DevVarStringArray
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
"""
if self._context is None:
self._context = PhoneNumberContext(self._version, phone_number=self._solution['phone_number'], )
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: PhoneNumberContext for this PhoneNumberInstance
:rtype: twilio.rest.lookups.v1.phone_number.PhoneNumberContext
|
def _add_to_typedef(self, typedef_curr, line, lnum):
"""Add new fields to the current typedef."""
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2).split('!')[0].rstrip()
if field_name == "id":
self._chk_none(typedef_curr.id, lnum)
typedef_curr.id = field_value
elif field_name == "name":
self._chk_none(typedef_curr.name, lnum)
typedef_curr.name = field_value
elif field_name == "transitive_over":
typedef_curr.transitive_over.append(field_value)
elif field_name == "inverse_of":
self._chk_none(typedef_curr.inverse_of, lnum)
typedef_curr.inverse_of = field_value
# Note: there are other tags that aren't imported here.
else:
self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum)
|
Add new fields to the current typedef.
|
def rand_unicode(min_char=MIN_UNICHR, max_char=MAX_UNICHR, min_len=MIN_STRLEN,
max_len=MAX_STRLEN, **kwargs):
'''For values in the unicode range, regardless of Python version.
'''
from syn.five import unichr
return unicode(rand_str(min_char, max_char, min_len, max_len, unichr))
|
For values in the unicode range, regardless of Python version.
|
def create(cls, name, protocol_number, protocol_agent=None, comment=None):
"""
Create the IP Service
:param str name: name of ip-service
:param int protocol_number: ip proto number for this service
:param str,ProtocolAgent protocol_agent: optional protocol agent for
this service
:param str comment: optional comment
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: IPService
"""
json = {'name': name,
'protocol_number': protocol_number,
'protocol_agent_ref': element_resolver(protocol_agent) or None,
'comment': comment}
return ElementCreator(cls, json)
|
Create the IP Service
:param str name: name of ip-service
:param int protocol_number: ip proto number for this service
:param str,ProtocolAgent protocol_agent: optional protocol agent for
this service
:param str comment: optional comment
:raises CreateElementFailed: failure creating element with reason
:return: instance with meta
:rtype: IPService
|
def errprt(op, lenout, inlist):
"""
Retrieve or set the list of error message items to be output when an
error is detected.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errprt_c.html
:param op: The operation, "GET" or "SET".
:type op: str
:param lenout: Length of list for output.
:type lenout: int
:param inlist: Specification of error messages to be output.
:type inlist: list of str.
:return: A list of error message items.
:rtype: list of str.
"""
lenout = ctypes.c_int(lenout)
op = stypes.stringToCharP(op)
inlist = ctypes.create_string_buffer(str.encode(inlist), lenout.value)
inlistptr = ctypes.c_char_p(ctypes.addressof(inlist))
libspice.errdev_c(op, lenout, inlistptr)
return stypes.toPythonString(inlistptr)
|
Retrieve or set the list of error message items to be output when an
error is detected.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errprt_c.html
:param op: The operation, "GET" or "SET".
:type op: str
:param lenout: Length of list for output.
:type lenout: int
:param inlist: Specification of error messages to be output.
:type inlist: list of str.
:return: A list of error message items.
:rtype: list of str.
|
def refocus(self, distance, method="helmholtz", h5file=None, h5mode="a"):
"""Compute a numerically refocused QPImage
Parameters
----------
distance: float
Focusing distance [m]
method: str
Refocusing method, one of ["helmholtz","fresnel"]
h5file: str, h5py.Group, h5py.File, or None
A path to an hdf5 data file where the QPImage is cached.
If set to `None` (default), all data will be handled in
memory using the "core" driver of the :mod:`h5py`'s
:class:`h5py:File` class. If the file does not exist,
it is created. If the file already exists, it is opened
with the file mode defined by `hdf5_mode`. If this is
an instance of h5py.Group or h5py.File, then this will
be used to internally store all data.
h5mode: str
Valid file modes are (only applies if `h5file` is a path)
- "r": Readonly, file must exist
- "r+": Read/write, file must exist
- "w": Create file, truncate if exists
- "w-" or "x": Create file, fail if exists
- "a": Read/write if exists, create otherwise (default)
Returns
-------
qpi: qpimage.QPImage
Refocused phase and amplitude data
See Also
--------
:mod:`nrefocus`: library used for numerical focusing
"""
field2 = nrefocus.refocus(field=self.field,
d=distance/self["pixel size"],
nm=self["medium index"],
res=self["wavelength"]/self["pixel size"],
method=method
)
if "identifier" in self:
ident = self["identifier"]
else:
ident = ""
meta_data = self.meta
meta_data["identifier"] = "{}@{}{:.5e}m".format(ident,
method[0],
distance)
qpi2 = QPImage(data=field2,
which_data="field",
meta_data=meta_data,
h5file=h5file,
h5mode=h5mode)
return qpi2
|
Compute a numerically refocused QPImage
Parameters
----------
distance: float
Focusing distance [m]
method: str
Refocusing method, one of ["helmholtz","fresnel"]
h5file: str, h5py.Group, h5py.File, or None
A path to an hdf5 data file where the QPImage is cached.
If set to `None` (default), all data will be handled in
memory using the "core" driver of the :mod:`h5py`'s
:class:`h5py:File` class. If the file does not exist,
it is created. If the file already exists, it is opened
with the file mode defined by `hdf5_mode`. If this is
an instance of h5py.Group or h5py.File, then this will
be used to internally store all data.
h5mode: str
Valid file modes are (only applies if `h5file` is a path)
- "r": Readonly, file must exist
- "r+": Read/write, file must exist
- "w": Create file, truncate if exists
- "w-" or "x": Create file, fail if exists
- "a": Read/write if exists, create otherwise (default)
Returns
-------
qpi: qpimage.QPImage
Refocused phase and amplitude data
See Also
--------
:mod:`nrefocus`: library used for numerical focusing
|
def setGroupIcon( cls, groupName, icon ):
"""
Sets the group icon for the wizard plugin to the inputed icon.
:param groupName | <str>
icon | <str>
"""
if ( cls._groupIcons is None ):
cls._groupIcons = {}
cls._groupIcons[nativestring(groupName)] = icon
|
Sets the group icon for the wizard plugin to the inputed icon.
:param groupName | <str>
icon | <str>
|
def multivariate_neg_logposterior(self,beta):
""" Returns negative log posterior, for a model with a covariance matrix
Parameters
----------
beta : np.array
Contains untransformed starting values for latent_variables
Returns
----------
Negative log posterior
"""
post = self.neg_loglik(beta)
for k in range(0,self.z_no):
if self.latent_variables.z_list[k].prior.covariance_prior is True:
post += -self.latent_variables.z_list[k].prior.logpdf(self.custom_covariance(beta))
break
else:
post += -self.latent_variables.z_list[k].prior.logpdf(beta[k])
return post
|
Returns negative log posterior, for a model with a covariance matrix
Parameters
----------
beta : np.array
Contains untransformed starting values for latent_variables
Returns
----------
Negative log posterior
|
def set_owner(self):
"""Parses owner name and email then sets value"""
owner = self.soup.find('itunes:owner')
try:
self.owner_name = owner.find('itunes:name').string
except AttributeError:
self.owner_name = None
try:
self.owner_email = owner.find('itunes:email').string
except AttributeError:
self.owner_email = None
|
Parses owner name and email then sets value
|
def inFocus(self):
"""Set GUI on-top flag"""
previous_flags = self.window.flags()
self.window.setFlags(previous_flags |
QtCore.Qt.WindowStaysOnTopHint)
|
Set GUI on-top flag
|
def import_event_definition_elements(diagram_graph, element, event_definitions):
"""
Helper function, that adds event definition elements (defines special types of events) to corresponding events.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param element: object representing a BPMN XML event element,
:param event_definitions: list of event definitions, that belongs to given event.
"""
element_id = element.getAttribute(consts.Consts.id)
event_def_list = []
for definition_type in event_definitions:
event_def_xml = element.getElementsByTagNameNS("*", definition_type)
for index in range(len(event_def_xml)):
# tuple - definition type, definition id
event_def_tmp = {consts.Consts.id: event_def_xml[index].getAttribute(consts.Consts.id),
consts.Consts.definition_type: definition_type}
event_def_list.append(event_def_tmp)
diagram_graph.node[element_id][consts.Consts.event_definitions] = event_def_list
|
Helper function, that adds event definition elements (defines special types of events) to corresponding events.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param element: object representing a BPMN XML event element,
:param event_definitions: list of event definitions, that belongs to given event.
|
def attach_to_fbo(self):
"""Attach the texture to a bound FBO object, for rendering to texture."""
gl.glFramebufferTexture2DEXT(gl.GL_FRAMEBUFFER_EXT, self.attachment_point, self.target0, self.id, 0)
|
Attach the texture to a bound FBO object, for rendering to texture.
|
def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out
|
Prepare bgzipped fastq inputs
|
def _bbox(nodes):
"""Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box.
"""
left, bottom = np.min(nodes, axis=1)
right, top = np.max(nodes, axis=1)
return left, right, bottom, top
|
Get the bounding box for set of points.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
Args:
nodes (numpy.ndarray): A set of points.
Returns:
Tuple[float, float, float, float]: The left, right,
bottom and top bounds for the box.
|
def _create_related(self, obj, related, subfield_dict):
"""
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
"""
for field, items in related.items():
subobjects = []
all_subrelated = []
Subtype, reverse_id_field, subsubdict = subfield_dict[field]
for order, item in enumerate(items):
# pull off 'subrelated' (things that are related to this obj)
subrelated = {}
for subfield in subsubdict:
subrelated[subfield] = item.pop(subfield)
if field in self.preserve_order:
item['order'] = order
item[reverse_id_field] = obj.id
try:
subobjects.append(Subtype(**item))
all_subrelated.append(subrelated)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, item, Subtype))
# add all subobjects at once (really great for actions & votes)
try:
Subtype.objects.bulk_create(subobjects)
except Exception as e:
raise DataImportError('{} while importing {} as {}'.format(e, subobjects, Subtype))
# after import the subobjects, import their subsubobjects
for subobj, subrel in zip(subobjects, all_subrelated):
self._create_related(subobj, subrel, subsubdict)
|
create DB objects related to a base object
obj: a base object to create related
related: dict mapping field names to lists of related objects
subfield_list: where to get the next layer of subfields
|
def get_dates_file(path):
""" parse dates file of dates and probability of choosing"""
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates]
|
parse dates file of dates and probability of choosing
|
def where_equals(self, field_name, value, exact=False):
"""
To get all the document that equal to the value in the given field_name
@param str field_name: The field name in the index you want to query.
@param value: The value will be the fields value you want to query
@param bool exact: If True getting exact match of the query
"""
if field_name is None:
raise ValueError("None field_name is invalid")
field_name = Query.escape_if_needed(field_name)
self._add_operator_if_needed()
token = "equals"
if self.negate:
self.negate = False
token = "not_equals"
self.last_equality = {field_name: value}
token = _Token(field_name=field_name, value=self.add_query_parameter(value), token=token, exact=exact)
token.write = self.rql_where_write(token)
self._where_tokens.append(token)
return self
|
To get all the document that equal to the value in the given field_name
@param str field_name: The field name in the index you want to query.
@param value: The value will be the fields value you want to query
@param bool exact: If True getting exact match of the query
|
def delete_repository(self, namespace, repository):
"""DELETE /v1/repositories/(namespace)/(repository)/"""
return self._http_call(self.REPO, delete,
namespace=namespace, repository=repository)
|
DELETE /v1/repositories/(namespace)/(repository)/
|
def image_predict(self, X):
"""
Predicts class label for the entire image.
:param X: Array of images to be classified.
:type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands]
:return: raster classification map
:rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x]
"""
pixels = self.extract_pixels(X)
predictions = self.classifier.predict(pixels)
return predictions.reshape(X.shape[0], X.shape[1], X.shape[2])
|
Predicts class label for the entire image.
:param X: Array of images to be classified.
:type X: numpy array, shape = [n_images, n_pixels_y, n_pixels_x, n_bands]
:return: raster classification map
:rtype: numpy array, [n_samples, n_pixels_y, n_pixels_x]
|
def get_next_redirect_url(request, redirect_field_name="next"):
"""
Returns the next URL to redirect to, if it was explicitly passed
via the request.
"""
redirect_to = get_request_param(request, redirect_field_name)
if not get_adapter(request).is_safe_url(redirect_to):
redirect_to = None
return redirect_to
|
Returns the next URL to redirect to, if it was explicitly passed
via the request.
|
def load_servers_from_env(self, filter=[], dynamic=None):
'''Load the name servers environment variable and parse each server in
the list.
@param filter Restrict the parsed objects to only those in this
path. For example, setting filter to [['/',
'localhost', 'host.cxt', 'comp1.rtc']] will
prevent 'comp2.rtc' in the same naming context
from being parsed.
@param dynamic Override the tree-wide dynamic setting. If not provided,
the value given when the tree was created will be used.
'''
if dynamic == None:
dynamic = self._dynamic
if NAMESERVERS_ENV_VAR in os.environ:
servers = [s for s in os.environ[NAMESERVERS_ENV_VAR].split(';') \
if s]
self._parse_name_servers(servers, filter, dynamic)
|
Load the name servers environment variable and parse each server in
the list.
@param filter Restrict the parsed objects to only those in this
path. For example, setting filter to [['/',
'localhost', 'host.cxt', 'comp1.rtc']] will
prevent 'comp2.rtc' in the same naming context
from being parsed.
@param dynamic Override the tree-wide dynamic setting. If not provided,
the value given when the tree was created will be used.
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'entities') and self.entities is not None:
_dict['entities'] = [x._to_dict() for x in self.entities]
return _dict
|
Return a json dictionary representing this model.
|
def diamond_search_output_basename(self, out_path):
'''Does not include the .daa part that diamond creates'''
return os.path.join(self.outdir, out_path, "%s_diamond_search" % self.basename)
|
Does not include the .daa part that diamond creates
|
def find():
"""Find the configuration file if any."""
names = ('archan.yml', 'archan.yaml', '.archan.yml', '.archan.yaml')
current_dir = os.getcwd()
configconfig_file = os.path.join(current_dir, '.configconfig')
default_config_dir = os.path.join(current_dir, 'config')
if os.path.isfile(configconfig_file):
logger.debug('Reading %s to get config folder path',
configconfig_file)
with open(configconfig_file) as stream:
config_dir = os.path.join(current_dir, stream.read()).strip()
elif os.path.isdir(default_config_dir):
config_dir = default_config_dir
else:
config_dir = current_dir
logger.debug('Config folder = %s', config_dir)
for name in names:
config_file = os.path.join(config_dir, name)
logger.debug('Searching for config file at %s', config_file)
if os.path.isfile(config_file):
logger.debug('Found %s', config_file)
return config_file
logger.debug('No config file found')
return None
|
Find the configuration file if any.
|
def _get_key_redis_key(bank, key):
'''
Return the Redis key given the bank name and the key name.
'''
opts = _get_redis_keys_opts()
return '{prefix}{separator}{bank}/{key}'.format(
prefix=opts['key_prefix'],
separator=opts['separator'],
bank=bank,
key=key
)
|
Return the Redis key given the bank name and the key name.
|
def general_setting(key, default=None, expected_type=None, qsettings=None):
"""Helper function to get a value from settings.
:param key: Unique key for setting.
:type key: basestring
:param default: The default value in case of the key is not found or there
is an error.
:type default: basestring, None, boolean, int, float
:param expected_type: The type of object expected.
:type expected_type: type
:param qsettings: A custom QSettings to use. If it's not defined, it will
use the default one.
:type qsettings: qgis.PyQt.QtCore.QSettings
:returns: The value of the key in the setting.
:rtype: object
Note:
The API for QSettings to get a value is different for PyQt and Qt C++.
In PyQt we can specify the expected type.
See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value
"""
if qsettings is None:
qsettings = QSettings()
try:
if isinstance(expected_type, type):
return qsettings.value(key, default, type=expected_type)
else:
return qsettings.value(key, default)
except TypeError as e:
LOGGER.debug('exception %s' % e)
LOGGER.debug('%s %s %s' % (key, default, expected_type))
return qsettings.value(key, default)
|
Helper function to get a value from settings.
:param key: Unique key for setting.
:type key: basestring
:param default: The default value in case of the key is not found or there
is an error.
:type default: basestring, None, boolean, int, float
:param expected_type: The type of object expected.
:type expected_type: type
:param qsettings: A custom QSettings to use. If it's not defined, it will
use the default one.
:type qsettings: qgis.PyQt.QtCore.QSettings
:returns: The value of the key in the setting.
:rtype: object
Note:
The API for QSettings to get a value is different for PyQt and Qt C++.
In PyQt we can specify the expected type.
See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value
|
def to_html(self,
protocol='http',
d3_url=None,
d3_scale_chromatic_url=None,
html_base=None):
'''
Parameters
----------
protocol : str
'http' or 'https' for including external urls
d3_url, str
None by default. The url (or path) of
d3, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.
d3_scale_chromatic_url : str
None by default.
URL of d3_scale_chromatic_url, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.
html_base : str
None by default. HTML of semiotic square to be inserted above plot.
Returns
-------
str, the html file representation
'''
d3_url_struct = D3URLs(d3_url, d3_scale_chromatic_url)
ExternalJSUtilts.ensure_valid_protocol(protocol)
javascript_to_insert = '\n'.join([
PackedDataUtils.full_content_of_javascript_files(),
self.scatterplot_structure._visualization_data.to_javascript(),
self.scatterplot_structure.get_js_to_call_build_scatterplot()
])
html_template = (PackedDataUtils.full_content_of_default_html_template()
if html_base is None
else self._format_html_base(html_base))
html_content = (
html_template
.replace('<!-- INSERT SCRIPT -->', javascript_to_insert, 1)
.replace('<!--D3URL-->', d3_url_struct.get_d3_url(), 1)
.replace('<!--D3SCALECHROMATIC-->', d3_url_struct.get_d3_scale_chromatic_url())
# .replace('<!-- INSERT D3 -->', self._get_packaged_file_content('d3.min.js'), 1)
)
'''
if html_base is not None:
html_file = html_file.replace('<!-- INSERT SEMIOTIC SQUARE -->',
html_base)
'''
extra_libs = ''
if self.scatterplot_structure._save_svg_button:
# extra_libs = '<script src="https://cdn.rawgit.com/edeno/d3-save-svg/gh-pages/assets/d3-save-svg.min.js" charset="utf-8"></script>'
extra_libs = ''
html_content = (html_content
.replace('<!-- EXTRA LIBS -->', extra_libs, 1)
.replace('http://', protocol + '://'))
return html_content
|
Parameters
----------
protocol : str
'http' or 'https' for including external urls
d3_url, str
None by default. The url (or path) of
d3, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.
d3_scale_chromatic_url : str
None by default.
URL of d3_scale_chromatic_url, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.
html_base : str
None by default. HTML of semiotic square to be inserted above plot.
Returns
-------
str, the html file representation
|
def _compute_and_transfer_to_final_run(self, process_name, start_timeperiod, end_timeperiod, job_record):
""" method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method"""
source_collection_name = context.process_context[process_name].source
start_id = self.ds.highest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
end_id = self.ds.lowest_primary_key(source_collection_name, start_timeperiod, end_timeperiod)
uow, transfer_to_final = self.insert_and_publish_uow(job_record, start_id, end_id)
self.update_job(job_record, uow, job.STATE_FINAL_RUN)
if transfer_to_final:
self._process_state_final_run(job_record)
|
method computes new unit_of_work and transfers the job to STATE_FINAL_RUN
it also shares _fuzzy_ DuplicateKeyError logic from _compute_and_transfer_to_progress method
|
def extract_subnetworks(
partition_file,
network_file,
output_dir,
max_cores=DEFAULT_MAX_CORES,
max_size_matrix=DEFAULT_MAX_SIZE_MATRIX,
saturation_threshold=DEFAULT_SATURATION_THRESHOLD,
):
"""Extract bin subnetworks from the main network
Identify bins, extract subnets, draws the adjacency matrices,
saves it all in a specified output directory.
Parameters
----------
partition_file : file, str or pathlib.Path
The file containing, for each chunk, the communities it was
assigned to at each iteration.
network_file : file, str or pathlib.Path
The file containing the network in sparse (edge list) format
output_dir : str or pathlib.Path
The output directory to write the subnetworks into.
max_cores : int, optional
The maximum number of bins to extract. Default is 100.
max_size_matrix : int, optional
When rendering contact maps for each bin, the maximum size for the
matrix. Default is 2000.
saturation_threshold : float, optional
When rendering contact maps for each bin, the percentile value over
which the color map should be saturated. Default is 80.
"""
logger.info("Loading partition...")
data_chunks = np.loadtxt(partition_file, usecols=(1,), dtype=np.int32)
logger.info("Loading network...")
network = np.loadtxt(network_file, dtype=np.int32)
cores = data_chunks
core_network = np.copy(network)
core_network[:, 0] = cores[network[:, 0]]
core_network[:, 1] = cores[network[:, 1]]
n = np.amax(cores) + 1
def extract(network_to_keep, filename):
subnetwork = np.copy(network[network_to_keep])
subnetwork[:, 0] -= 1
subnetwork[:, 1] -= 1
np.savetxt(filename, subnetwork, fmt="%i")
return subnetwork
def draw(subnetwork, filename):
try:
# Numpy array format
row = subnetwork[:, 0]
col = subnetwork[:, 1]
data = subnetwork[:, 2]
except TypeError:
# Scipy sparse format
row = subnetwork.row
col = subnetwork.col
data = subnetwork.data
row_indices = stats.rankdata(
np.concatenate((row, col)), method="dense"
)
col_indices = stats.rankdata(
np.concatenate((col, row)), method="dense"
)
data = np.concatenate((data, data))
# print("Row length: {}, col length: {}, data length: {}"
# "".format(len(row_indices), len(col_indices), len(data)))
unique_row = np.unique(row)
unique_col = np.unique(col)
# print("Network shape: {},{}".format(len(unique_row),
# len(unique_col)))
size = len(np.unique(np.concatenate((unique_row, unique_col)))) + 1
# print("Size of matrix to draw: {}".format(size))
try:
sparse_subnet = sparse.coo_matrix(
(data, (row_indices, col_indices)), shape=(size, size)
)
binning_factor = (size // max_size_matrix) + 1
binned_subnet = hcs.bin_sparse(
sparse_subnet, subsampling_factor=binning_factor
)
dense_subnet = binned_subnet.todense()
diagonal = np.diag(np.diag(dense_subnet))
normed_subnet = hcs.normalize_dense(dense_subnet - diagonal)
vmax = np.percentile(normed_subnet, saturation_threshold)
spaceless_pdf_plot_maker(normed_subnet, filename, vmax=vmax)
except MemoryError:
logger.warning(
"Warning, couldn't save matrix due to memory issues"
)
def extract_and_draw(network_to_keep, filename_text, filename_image):
subnetwork = extract(network_to_keep, filename=filename_text)
draw(subnetwork, filename=filename_image)
# Extract and draw subnetworks for chosen cores and draw 2D arrays
global_network_indices_list = []
for i in range(1, n):
if i > max_cores:
break
# print("Bin {}:".format(i))
network_to_keep_1 = core_network[:, 0] == i
network_to_keep_2 = core_network[:, 1] == i
network_to_keep = network_to_keep_1 * network_to_keep_2
nonzero_indices, = np.nonzero(network_to_keep)
global_network_indices_list += nonzero_indices.tolist()
subnetwork_file = os.path.join(
output_dir, "subnetwork_core_{}.dat".format(i)
)
image_name = os.path.join(output_dir, "core_{}.eps".format(i))
extract_and_draw(
network_to_keep=network_to_keep,
filename_text=subnetwork_file,
filename_image=image_name,
)
|
Extract bin subnetworks from the main network
Identify bins, extract subnets, draws the adjacency matrices,
saves it all in a specified output directory.
Parameters
----------
partition_file : file, str or pathlib.Path
The file containing, for each chunk, the communities it was
assigned to at each iteration.
network_file : file, str or pathlib.Path
The file containing the network in sparse (edge list) format
output_dir : str or pathlib.Path
The output directory to write the subnetworks into.
max_cores : int, optional
The maximum number of bins to extract. Default is 100.
max_size_matrix : int, optional
When rendering contact maps for each bin, the maximum size for the
matrix. Default is 2000.
saturation_threshold : float, optional
When rendering contact maps for each bin, the percentile value over
which the color map should be saturated. Default is 80.
|
def prompt(msg, default=NO_DEFAULT, validate=None):
""" Prompt user for input """
while True:
response = input(msg + " ").strip()
if not response:
if default is NO_DEFAULT:
continue
return default
if validate is None or validate(response):
return response
|
Prompt user for input
|
def _get_client_fqdn(self, client_info_contents):
"""Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN.
"""
yamldict = yaml.safe_load(client_info_contents)
fqdn = yamldict['system_info']['fqdn']
client_id = yamldict['client_id'].split('/')[1]
return client_id, fqdn
|
Extracts a GRR client's FQDN from its client_info.yaml file.
Args:
client_info_contents: The contents of the client_info.yaml file.
Returns:
A (str, str) tuple representing client ID and client FQDN.
|
def present(name,
object_name,
object_type,
defprivileges=None,
grant_option=None,
prepend='public',
maintenance_db=None,
user=None,
db_password=None,
db_host=None,
db_port=None,
db_user=None):
'''
Grant the requested privilege(s) on the specified object to a role
name
Name of the role to which privileges should be granted
object_name
Name of the object on which the grant is to be performed.
'ALL' may be used for objects of type 'table' or 'sequence'.
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
View permissions should specify `object_type: table`.
privileges
List of privileges to grant, from the list below:
- INSERT
- CREATE
- TRUNCATE
- CONNECT
- TRIGGER
- SELECT
- USAGE
- TEMPORARY
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
:note: privileges should not be set when granting group membership
grant_option
If grant_option is set to True, the recipient of the privilege can
in turn grant it to others
prepend
Table and Sequence object types live under a schema so this should be
provided if the object is not under the default `public` schema
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': 'The requested default privilege(s) are already set'
}
defprivileges = ','.join(defprivileges) if defprivileges else None
kwargs = {
'defprivileges': defprivileges,
'grant_option': grant_option,
'prepend': prepend,
'maintenance_db': maintenance_db,
'runas': user,
'host': db_host,
'user': db_user,
'port': db_port,
'password': db_password,
}
if not __salt__['postgres.has_default_privileges'](
name, object_name, object_type, **kwargs):
_defprivs = object_name if object_type == 'group' else defprivileges
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The default privilege(s): {0} are'
' set to be granted to {1}').format(_defprivs, name)
return ret
if __salt__['postgres.default_privileges_grant'](
name, object_name, object_type, **kwargs):
ret['comment'] = ('The default privilege(s): {0} have '
'been granted to {1}').format(_defprivs, name)
ret['changes'][name] = 'Present'
else:
ret['comment'] = ('Failed to grant default privilege(s):'
' {0} to {1}').format(_defprivs, name)
ret['result'] = False
return ret
|
Grant the requested privilege(s) on the specified object to a role
name
Name of the role to which privileges should be granted
object_name
Name of the object on which the grant is to be performed.
'ALL' may be used for objects of type 'table' or 'sequence'.
object_type
The object type, which can be one of the following:
- table
- sequence
- schema
- group
- function
View permissions should specify `object_type: table`.
privileges
List of privileges to grant, from the list below:
- INSERT
- CREATE
- TRUNCATE
- CONNECT
- TRIGGER
- SELECT
- USAGE
- TEMPORARY
- UPDATE
- EXECUTE
- REFERENCES
- DELETE
- ALL
:note: privileges should not be set when granting group membership
grant_option
If grant_option is set to True, the recipient of the privilege can
in turn grant it to others
prepend
Table and Sequence object types live under a schema so this should be
provided if the object is not under the default `public` schema
maintenance_db
The name of the database in which the language is to be installed
user
System user all operations should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
|
def evaluate(self, num_eval_batches=None):
"""Run one round of evaluation, return loss and accuracy."""
num_eval_batches = num_eval_batches or self.num_eval_batches
with tf.Graph().as_default() as graph:
self.tensors = self.model.build_eval_graph(self.eval_data_paths,
self.batch_size)
self.summary = tf.summary.merge_all()
self.saver = tf.train.Saver()
self.summary_writer = tf.summary.FileWriter(self.output_path)
self.sv = tf.train.Supervisor(
graph=graph,
logdir=self.output_path,
summary_op=None,
global_step=None,
saver=self.saver)
last_checkpoint = tf.train.latest_checkpoint(self.checkpoint_path)
with self.sv.managed_session(master='', start_standard_services=False) as session:
self.sv.saver.restore(session, last_checkpoint)
if not self.batch_of_examples:
self.sv.start_queue_runners(session)
for i in range(num_eval_batches):
self.batch_of_examples.append(session.run(self.tensors.examples))
for i in range(num_eval_batches):
session.run(self.tensors.metric_updates,
{self.tensors.examples: self.batch_of_examples[i]})
metric_values = session.run(self.tensors.metric_values)
global_step = tf.train.global_step(session, self.tensors.global_step)
summary = session.run(self.summary)
self.summary_writer.add_summary(summary, global_step)
self.summary_writer.flush()
return metric_values
|
Run one round of evaluation, return loss and accuracy.
|
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
from collections import defaultdict
orient = orient.lower()
if orient == 'minor':
new_data = defaultdict(OrderedDict)
for col, df in data.items():
for item, s in df.items():
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
|
Construct Panel from dict of DataFrame objects.
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
|
def enable_console_debug_logging(): # pragma no cover (Function useful only outside test environment)
"""
This function sets up a very simple logging configuration (log everything on standard output) that is useful for troubleshooting.
"""
logger = logging.getLogger("github")
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
|
This function sets up a very simple logging configuration (log everything on standard output) that is useful for troubleshooting.
|
def add_parser_arguments(parser, args, group=None, prefix=DATA_PREFIX):
"""
Helper method that populates parser arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
If `group` is a string, it will be used as group header in help output.
"""
if group:
parser = parser.add_argument_group(group)
for arg, kwargs in iteritems(args):
arg_name = kwargs.pop('arg', arg.replace('_', '-'))
if 'metavar' not in kwargs:
kwargs['metavar'] = arg.upper()
if 'dest' in kwargs:
kwargs['dest'] = prefix + kwargs['dest']
else:
kwargs['dest'] = prefix + arg
parser.add_argument('--' + arg_name, **kwargs)
|
Helper method that populates parser arguments. The argument values can
be later retrieved with `extract_arguments` method.
The `args` argument to this method should be a dict with strings as
keys and dicts as values. The keys will be used as keys in returned
data. Their values will be passed as kwargs to `parser.add_argument`.
There is special value `arg` that will be used as argument name if
present, otherwise a name will be generated based on the key.
If `group` is a string, it will be used as group header in help output.
|
def get_compiler(compiler, **compiler_attrs):
"""get and customize a compiler"""
if compiler is None or isinstance(compiler, str):
cc = ccompiler.new_compiler(compiler=compiler, verbose=0)
customize_compiler(cc)
if cc.compiler_type == 'mingw32':
customize_mingw(cc)
else:
cc = compiler
customize_gcc(cc)
for name, val in compiler_attrs.items():
setattr(cc, name, val)
return cc
|
get and customize a compiler
|
def _getPattern(self, ipattern, done=None):
"""Parses sort pattern.
:ipattern: A pattern to parse.
:done: If :ipattern: refers to done|undone,
use this to indicate proper state.
:returns: A pattern suitable for Model.modify.
"""
if ipattern is None:
return None
if ipattern is True:
if done is not None:
return ([(None, None, done)], {})
# REMEMBER: This False is for sort reverse!
return ([(0, False)], {})
def _getReverse(pm):
return pm == '-'
def _getIndex(k):
try:
return int(k)
except ValueError:
raise InvalidPatternError(k, "Invalid level number")
def _getDone(p):
v = p.split('=')
if len(v) == 2:
try:
return (Model.indexes[v[0]], v[1], done)
except KeyError:
raise InvalidPatternError(v[0], 'Invalid field name')
return (None, v[0], done)
ipattern1 = list()
ipattern2 = dict()
for s in ipattern.split(','):
if done is not None:
v = done
else:
v = _getReverse(s[-1])
k = s.split(':')
if len(k) == 1:
if done is not None:
ipattern1.append(_getDone(k[0]))
continue
ko = k[0][:-1]
try:
if len(k[0]) == 1:
k = 0
else:
k = Model.indexes[ko]
except KeyError:
k = _getIndex(k[0][:-1])
else:
ipattern1.append((k, v))
continue
v = (0, v)
elif len(k) == 2:
try:
if done is not None:
v = _getDone(k[1])
else:
v = (Model.indexes[k[1][:-1]], v)
k = _getIndex(k[0])
except KeyError:
raise InvalidPatternError(k[1][:-1], 'Invalid field name')
else:
raise InvalidPatternError(s, 'Unrecognized token in')
ipattern2.setdefault(k, []).append(v)
return (ipattern1, ipattern2)
|
Parses sort pattern.
:ipattern: A pattern to parse.
:done: If :ipattern: refers to done|undone,
use this to indicate proper state.
:returns: A pattern suitable for Model.modify.
|
def _find_listeners():
"""Find GPIB listeners.
"""
for i in range(31):
try:
if gpib.listener(BOARD, i) and gpib.ask(BOARD, 1) != i:
yield i
except gpib.GpibError as e:
logger.debug("GPIB error in _find_listeners(): %s", repr(e))
|
Find GPIB listeners.
|
def depth(args):
"""
%prog depth DP.tsv
Plot read depths across all TREDs.
"""
import seaborn as sns
p = OptionParser(depth.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="14x14")
if len(args) != 1:
sys.exit(not p.print_help())
tsvfile, = args
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(ncols=2, nrows=2,
figsize=(iopts.w, iopts.h))
plt.tight_layout(pad=6)
data = pd.read_csv(tsvfile, sep="\t", low_memory=False)
ids, treds = read_treds()
for (dp, ax, title) in zip(("FDP", "PDP", "RDP", "PEDP"),
(ax1, ax2, ax3, ax4),
("Spanning reads", "Partial reads",
"Repeat-only reads", "Paired-end reads")):
logging.debug("Build {}".format(title))
# Construct related data structure
xd = [] # (tred, dp)
mdp = [] # (tred, median_dp)
for tred, motif in zip(treds["abbreviation"], treds["motif"]):
if tred in ignore:
logging.debug("Ignore {}".format(tred))
continue
if len(motif) > 4:
if "/" in motif: # CTG/CAG
motif = motif.split("/")[0]
else:
motif = motif[:4] + ".."
xtred = "{} {}".format(tred, motif)
md = [x for x in data[tred + '.' + dp] if x >= 0]
subsample = 10000 if dp == "RDP" else 1000
md = sample(md, subsample)
pmd = [x for x in md if x > 0]
median = np.median(pmd) if pmd else 0
mdp.append((xtred, median))
for d in md:
xd.append((xtred, d))
# Determine order
mdp.sort(key=lambda x: x[1])
order, mdp = zip(*mdp)
# OK, now plot
xt, xd = zip(*xd)
sns.boxplot(xt, xd, ax=ax, order=order, fliersize=2)
xticklabels = ax.get_xticklabels()
ax.set_xticklabels(xticklabels, rotation=45, ha="right")
ax.set_title("Number of {} per locus".format(title), size=18)
ylim = 30 if dp == "RDP" else 100
ax.set_ylim(0, ylim)
yticklabels = [int(x) for x in ax.get_yticks()]
ax.set_yticklabels(yticklabels, family='Helvetica', size=14)
root = fig.add_axes([0, 0, 1, 1])
pad = .04
panel_labels(root, ((pad, 1 - pad, "A"), (1 / 2. + pad / 2, 1 - pad, "B"),
(pad, .5 - pad / 2, "C"), (1 / 2. + pad / 2, .5 - pad / 2, "D")))
normalize_axes(root)
image_name = "depth." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
%prog depth DP.tsv
Plot read depths across all TREDs.
|
def RFC3156_micalg_from_algo(hash_algo):
"""
Converts a GPGME hash algorithm name to one conforming to RFC3156.
GPGME returns hash algorithm names such as "SHA256", but RFC3156 says that
programs need to use names such as "pgp-sha256" instead.
:param str hash_algo: GPGME hash_algo
:returns: the lowercase name of of the algorithm with "pgp-" prepended
:rtype: str
"""
# hash_algo will be something like SHA256, but we need pgp-sha256.
algo = gpg.core.hash_algo_name(hash_algo)
if algo is None:
raise GPGProblem('Unknown hash algorithm {}'.format(algo),
code=GPGCode.INVALID_HASH_ALGORITHM)
return 'pgp-' + algo.lower()
|
Converts a GPGME hash algorithm name to one conforming to RFC3156.
GPGME returns hash algorithm names such as "SHA256", but RFC3156 says that
programs need to use names such as "pgp-sha256" instead.
:param str hash_algo: GPGME hash_algo
:returns: the lowercase name of of the algorithm with "pgp-" prepended
:rtype: str
|
def erase_up (self): # <ESC>[1J
'''Erases the screen from the current line up to the top of the
screen.'''
self.erase_start_of_line ()
self.fill_region (self.cur_r-1, 1, 1, self.cols)
|
Erases the screen from the current line up to the top of the
screen.
|
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
|
Rest helpers
|
def __update_action(self, revision):
"""Update a master document and revision history document
:param dict revision: The revision dictionary
"""
patch = revision.get("patch")
if patch.get("_id"):
del patch["_id"]
update_response = yield self.collection.patch(revision.get("master_id"), self.__make_storeable_patch_patchable(patch))
if update_response.get("n") == 0:
raise RevisionNotFoundException()
|
Update a master document and revision history document
:param dict revision: The revision dictionary
|
def spline_fit_magseries(times, mags, errs, period,
knotfraction=0.01,
maxknots=30,
sigclip=30.0,
plotfit=False,
ignoreinitfail=False,
magsarefluxes=False,
verbose=True):
'''This fits a univariate cubic spline to the phased light curve.
This fit may be better than the Fourier fit for sharply variable objects,
like EBs, so can be used to distinguish them from other types of variables.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a spline to.
period : float
The period to use for the spline fit.
knotfraction : float
The knot fraction is the number of internal knots to use for the
spline. A value of 0.01 (or 1%) of the total number of non-nan
observations appears to work quite well, without over-fitting. maxknots
controls the maximum number of knots that will be allowed.
maxknots : int
The maximum number of knots that will be used even if `knotfraction`
gives a value to use larger than `maxknots`. This helps dealing with
over-fitting to short time-scale variations.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'spline',
'fitinfo':{
'nknots': the number of knots used for the fit
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
# this is required to fit the spline correctly
if errs is None:
errs = npfull_like(mags, 0.005)
# sigclip the magnitude time series
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# phase the mag series
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
# now figure out the number of knots up to max knots (=100)
nobs = len(phase)
nknots = int(npfloor(knotfraction*nobs))
nknots = maxknots if nknots > maxknots else nknots
splineknots = nplinspace(phase[0] + 0.01,
phase[-1] - 0.01,
num=nknots)
# NOTE: newer scipy needs x to be strictly increasing. this means we should
# filter out anything that doesn't have np.diff(phase) > 0.0
# FIXME: this needs to be tested
phase_diffs_ind = npdiff(phase) > 0.0
incphase_ind = npconcatenate((nparray([True]), phase_diffs_ind))
phase, pmags, perrs = (phase[incphase_ind],
pmags[incphase_ind],
perrs[incphase_ind])
# generate and fit the spline
spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=1.0/perrs)
# calculate the spline fit to the actual phases, the chisq and red-chisq
fitmags = spl(phase)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - nknots - 1)
if verbose:
LOGINFO(
'spline fit done. nknots = %s, '
'chisq = %.5f, reduced chisq = %.5f' %
(nknots, fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'spline',
'fitinfo':{
'nknots':nknots,
'fitmags':fitmags,
'fitepoch':magseriesepoch
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
|
This fits a univariate cubic spline to the phased light curve.
This fit may be better than the Fourier fit for sharply variable objects,
like EBs, so can be used to distinguish them from other types of variables.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a spline to.
period : float
The period to use for the spline fit.
knotfraction : float
The knot fraction is the number of internal knots to use for the
spline. A value of 0.01 (or 1%) of the total number of non-nan
observations appears to work quite well, without over-fitting. maxknots
controls the maximum number of knots that will be allowed.
maxknots : int
The maximum number of knots that will be used even if `knotfraction`
gives a value to use larger than `maxknots`. This helps dealing with
over-fitting to short time-scale variations.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'spline',
'fitinfo':{
'nknots': the number of knots used for the fit
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
|
def cublasDspr2(handle, uplo, n, alpha, x, incx, y, incy, AP):
"""
Rank-2 operation on real symmetric-packed matrix.
"""
status = _libcublas.cublasDspr2_v2(handle,
_CUBLAS_FILL_MODE[uplo], n,
ctypes.byref(ctypes.c_double(alpha)),
int(x), incx, int(y), incy, int(AP))
cublasCheckStatus(status)
|
Rank-2 operation on real symmetric-packed matrix.
|
def get_object(self, queryset=None):
"""
Return the object the view is displaying.
Same as rest_framework.generics.GenericAPIView, but:
- Failed assertions instead of deprecations
"""
# Determine the base queryset to use.
assert queryset is None, "Passing a queryset is disabled"
queryset = self.filter_queryset(self.get_queryset())
# Perform the lookup filtering.
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
lookup = self.kwargs.get(lookup_url_kwarg, None)
assert lookup is not None, "Other lookup methods are disabled"
filter_kwargs = {self.lookup_field: lookup}
obj = self.get_object_or_404(queryset, **filter_kwargs)
# May raise a permission denied
self.check_object_permissions(self.request, obj)
return obj
|
Return the object the view is displaying.
Same as rest_framework.generics.GenericAPIView, but:
- Failed assertions instead of deprecations
|
def dateint_to_datetime(dateint):
"""Converts the given dateint to a datetime object, in local timezone.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
datetime.datetime
A timezone-unaware datetime object representing the start of the given
day (so at 0 hours, 0 minutes, etc...) in the local timezone.
"""
if len(str(dateint)) != 8:
raise ValueError(
'Dateints must have exactly 8 digits; the first four representing '
'the year, the next two the months, and the last two the days.')
year, month, day = decompose_dateint(dateint)
return datetime(year=year, month=month, day=day)
|
Converts the given dateint to a datetime object, in local timezone.
Arguments
---------
dateint : int
An integer object decipting a specific calendaric day; e.g. 20161225.
Returns
-------
datetime.datetime
A timezone-unaware datetime object representing the start of the given
day (so at 0 hours, 0 minutes, etc...) in the local timezone.
|
def master_ref(self):
"""
Filters the current DataFrame to only contain those rows whose reference is master.
>>> master_df = refs_df.master_ref
:rtype: ReferencesDataFrame
"""
return ReferencesDataFrame(self._engine_dataframe.getMaster(),
self._session, self._implicits)
return self.ref('refs/heads/master')
|
Filters the current DataFrame to only contain those rows whose reference is master.
>>> master_df = refs_df.master_ref
:rtype: ReferencesDataFrame
|
def _upgrade_schema(engine):
"""
Ensure the database schema is up to date with the codebase.
:param engine: SQLAlchemy engine of the underlying database.
"""
inspector = reflection.Inspector.from_engine(engine)
with engine.connect() as conn:
# Upgrade 1. Add task_id column and index to tasks
if 'task_id' not in [x['name'] for x in inspector.get_columns('tasks')]:
logger.warning('Upgrading DbTaskHistory schema: Adding tasks.task_id')
conn.execute('ALTER TABLE tasks ADD COLUMN task_id VARCHAR(200)')
conn.execute('CREATE INDEX ix_task_id ON tasks (task_id)')
# Upgrade 2. Alter value column to be TEXT, note that this is idempotent so no if-guard
if 'mysql' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters MODIFY COLUMN value TEXT')
elif 'oracle' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters MODIFY value TEXT')
elif 'mssql' in engine.dialect.name:
conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TEXT')
elif 'postgresql' in engine.dialect.name:
if str([x for x in inspector.get_columns('task_parameters')
if x['name'] == 'value'][0]['type']) != 'TEXT':
conn.execute('ALTER TABLE task_parameters ALTER COLUMN value TYPE TEXT')
elif 'sqlite' in engine.dialect.name:
# SQLite does not support changing column types. A database file will need
# to be used to pickup this migration change.
for i in conn.execute('PRAGMA table_info(task_parameters);').fetchall():
if i['name'] == 'value' and i['type'] != 'TEXT':
logger.warning(
'SQLite can not change column types. Please use a new database '
'to pickup column type changes.'
)
else:
logger.warning(
'SQLAlcheny dialect {} could not be migrated to the TEXT type'.format(
engine.dialect
)
)
|
Ensure the database schema is up to date with the codebase.
:param engine: SQLAlchemy engine of the underlying database.
|
def all_pairs_normalized_distances_reference(X):
"""
Reference implementation of normalized all-pairs distance, used
for testing the more efficient implementation above for equivalence.
"""
n_samples, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_samples, n_samples), dtype="float32") * np.inf
for i in range(n_samples):
diffs = X - X[i, :].reshape((1, n_cols))
missing_diffs = np.isnan(diffs)
missing_counts_per_row = missing_diffs.sum(axis=1)
valid_rows = missing_counts_per_row < n_cols
D[i, valid_rows] = np.nanmean(
diffs[valid_rows, :] ** 2,
axis=1)
return D
|
Reference implementation of normalized all-pairs distance, used
for testing the more efficient implementation above for equivalence.
|
def do_lmfit(data, params, B=None, errs=None, dojac=True):
"""
Fit the model to the data
data may contain 'flagged' or 'masked' data with the value of np.NaN
Parameters
----------
data : 2d-array
Image data
params : lmfit.Parameters
Initial model guess.
B : 2d-array
B matrix to be used in residual calculations.
Default = None.
errs : 1d-array
dojac : bool
If true then an analytic jacobian will be passed to the fitting routine.
Returns
-------
result : ?
lmfit.minimize result.
params : lmfit.Params
Fitted model.
See Also
--------
:func:`AegeanTools.fitting.lmfit_jacobian`
"""
# copy the params so as not to change the initial conditions
# in case we want to use them elsewhere
params = copy.deepcopy(params)
data = np.array(data)
mask = np.where(np.isfinite(data))
def residual(params, **kwargs):
"""
The residual function required by lmfit
Parameters
----------
params: lmfit.Params
The parameters of the model being fit
Returns
-------
result : numpy.ndarray
Model - Data
"""
f = ntwodgaussian_lmfit(params) # A function describing the model
model = f(*mask) # The actual model
if B is None:
return model - data[mask]
else:
return (model - data[mask]).dot(B)
if dojac:
result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}, Dfun=lmfit_jacobian)
else:
result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs})
# Remake the residual so that it is once again (model - data)
if B is not None:
result.residual = result.residual.dot(inv(B))
return result, params
|
Fit the model to the data
data may contain 'flagged' or 'masked' data with the value of np.NaN
Parameters
----------
data : 2d-array
Image data
params : lmfit.Parameters
Initial model guess.
B : 2d-array
B matrix to be used in residual calculations.
Default = None.
errs : 1d-array
dojac : bool
If true then an analytic jacobian will be passed to the fitting routine.
Returns
-------
result : ?
lmfit.minimize result.
params : lmfit.Params
Fitted model.
See Also
--------
:func:`AegeanTools.fitting.lmfit_jacobian`
|
def _get_containing_contigs(self, hits_dict):
'''Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key.'''
containing = {}
for qry_name in hits_dict:
d = self._containing_contigs(hits_dict[qry_name])
if len(d):
containing[qry_name] = d
return containing
|
Given dictionary of nucmer hits (made by self._load_nucmer_hits()), returns a dictionary.
key=contig name. Value = set of contigs that contain the key.
|
def print_attrs(data_file, node_name='/', which='user', compress=False):
"""Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
"""
node = data_file.get_node(node_name)
print ('List of attributes for:\n %s\n' % node)
for attr in node._v_attrs._f_list():
print ('\t%s' % attr)
attr_content = repr(node._v_attrs[attr])
if compress:
attr_content = attr_content.split('\n')[0]
print ("\t %s" % attr_content)
|
Print the HDF5 attributes for `node_name`.
Parameters:
data_file (pytables HDF5 file object): the data file to print
node_name (string): name of the path inside the file to be printed.
Can be either a group or a leaf-node. Default: '/', the root node.
which (string): Valid values are 'user' for user-defined attributes,
'sys' for pytables-specific attributes and 'all' to print both
groups of attributes. Default 'user'.
compress (bool): if True displays at most a line for each attribute.
Default False.
|
def EMAIL_REQUIRED(self):
"""
The user is required to hand over an e-mail address when signing up
"""
from allauth.account import app_settings as account_settings
return self._setting("EMAIL_REQUIRED", account_settings.EMAIL_REQUIRED)
|
The user is required to hand over an e-mail address when signing up
|
def plugins(self):
"""
:returns: [(plugin_name, plugin_package, plugin_config), ...]
:rtype: list of tuple
"""
if not self._plugins:
self._plugins = [
(plugin_name,
plugin_cfg['package'],
plugin_cfg) for plugin_name, plugin_cfg in self.validated.items() if (
plugin_name not in self.base_schema.keys()) and plugin_cfg['enabled']]
return self._plugins
|
:returns: [(plugin_name, plugin_package, plugin_config), ...]
:rtype: list of tuple
|
def send_UDP_message(self, message):
"""Send UDP message."""
x = 0
if self.tracking_enabled:
try:
proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
self.procs.append(proc)
except Exception as e:
logger.debug("Usage tracking failed: {}".format(e))
else:
x = -1
return x
|
Send UDP message.
|
def render(self, dt):
'''Draw all bodies in the world.'''
for frame in self._frozen:
for body in frame:
self.draw_body(body)
for body in self.world.bodies:
self.draw_body(body)
if hasattr(self.world, 'markers'):
# draw line between anchor1 and anchor2 for marker joints.
window.glColor4f(0.9, 0.1, 0.1, 0.9)
window.glLineWidth(3)
for j in self.world.markers.joints.values():
window.glBegin(window.GL_LINES)
window.glVertex3f(*j.getAnchor())
window.glVertex3f(*j.getAnchor2())
window.glEnd()
|
Draw all bodies in the world.
|
def find_table_links(self):
"""
When given a url, this function will find all the available table names
for that EPA dataset.
"""
html = urlopen(self.model_url).read()
doc = lh.fromstring(html)
href_list = [area.attrib['href'] for area in doc.cssselect('map area')]
tables = self._inception_table_links(href_list)
return tables
|
When given a url, this function will find all the available table names
for that EPA dataset.
|
def ref(self, tickers, flds, ovrds=None):
"""
Make a reference data request, get tickers and fields, return long
pandas DataFrame with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
Example
-------
>>> import pdblp
>>> con = pdblp.BCon()
>>> con.start()
>>> con.ref("CL1 Comdty", ["FUT_GEN_MONTH"])
Notes
-----
This returns reference data which has singleton values. In raw format
the messages passed back contain data of the form
fieldData = {
FUT_GEN_MONTH = "FGHJKMNQUVXZ"
}
"""
ovrds = [] if not ovrds else ovrds
logger = _get_logger(self.debug)
if type(tickers) is not list:
tickers = [tickers]
if type(flds) is not list:
flds = [flds]
request = self._create_req('ReferenceDataRequest', tickers, flds,
ovrds, [])
logger.info('Sending Request:\n{}'.format(request))
self._session.sendRequest(request, identity=self._identity)
data = self._parse_ref(flds)
data = pd.DataFrame(data)
data.columns = ['ticker', 'field', 'value']
return data
|
Make a reference data request, get tickers and fields, return long
pandas DataFrame with columns [ticker, field, value]
Parameters
----------
tickers: {list, string}
String or list of strings corresponding to tickers
flds: {list, string}
String or list of strings corresponding to FLDS
ovrds: list of tuples
List of tuples where each tuple corresponds to the override
field and value
Example
-------
>>> import pdblp
>>> con = pdblp.BCon()
>>> con.start()
>>> con.ref("CL1 Comdty", ["FUT_GEN_MONTH"])
Notes
-----
This returns reference data which has singleton values. In raw format
the messages passed back contain data of the form
fieldData = {
FUT_GEN_MONTH = "FGHJKMNQUVXZ"
}
|
def ot_validate(nexson, **kwargs):
"""Returns three objects:
an annotation dict (NexSON formmatted),
the validation_log object created when NexSON validation was performed, and
the object of class NexSON which was created from nexson. This object may
alias parts of the nexson dict that is passed in as an argument.
Currently the only kwargs used is 'max_num_trees_per_study'
"""
# stub function for hooking into NexSON validation
codes_to_skip = [NexsonWarningCodes.UNVALIDATED_ANNOTATION] # pylint: disable=E1101
v_log, adaptor = validate_nexson(nexson, codes_to_skip, **kwargs)
annotation = v_log.prepare_annotation(author_name='api.opentreeoflife.org/validate',
description='Open Tree NexSON validation')
return annotation, v_log, adaptor
|
Returns three objects:
an annotation dict (NexSON formmatted),
the validation_log object created when NexSON validation was performed, and
the object of class NexSON which was created from nexson. This object may
alias parts of the nexson dict that is passed in as an argument.
Currently the only kwargs used is 'max_num_trees_per_study'
|
def save_file(self, filename, text):
"""Save the given text under the given condition filename and the
current path.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation end date. If
such an directory does not exist, it is created immediately.
"""
_defaultdir = self.DEFAULTDIR
try:
if not filename.endswith('.py'):
filename += '.py'
try:
self.DEFAULTDIR = (
'init_' + hydpy.pub.timegrids.sim.lastdate.to_string('os'))
except AttributeError:
pass
path = os.path.join(self.currentpath, filename)
with open(path, 'w', encoding="utf-8") as file_:
file_.write(text)
except BaseException:
objecttools.augment_excmessage(
'While trying to write the conditions file `%s`'
% filename)
finally:
self.DEFAULTDIR = _defaultdir
|
Save the given text under the given condition filename and the
current path.
If the current directory is not defined explicitly, the directory
name is constructed with the actual simulation end date. If
such an directory does not exist, it is created immediately.
|
def close(self):
"""
Close and release the current usb device.
:return: None
"""
# This may not be absolutely necessary, but it is safe.
# It's the closest thing to a close() method.
if self._dev is not None:
usb.util.dispose_resources(self._dev)
self._dev = None
|
Close and release the current usb device.
:return: None
|
def save(self, savefile):
"""Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
"""
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile)
|
Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
|
def get(self, path_or_index, default=None):
"""
Get details about a given result
:param path_or_index: The path (or index) of the result to fetch.
:param default: If the given result does not exist, return this value
instead
:return: A tuple of `(error, value)`. If the entry does not exist
then `(err, default)` is returned, where `err` is the actual error
which occurred.
You can use :meth:`couchbase.exceptions.CouchbaseError.rc_to_exctype`
to convert the error code to a proper exception class
:raise: :exc:`IndexError` or :exc:`KeyError` if `path_or_index`
is not an initially requested path. This is a programming error
as opposed to a constraint error where the path is not found.
"""
err, value = self._resolve(path_or_index)
value = default if err else value
return err, value
|
Get details about a given result
:param path_or_index: The path (or index) of the result to fetch.
:param default: If the given result does not exist, return this value
instead
:return: A tuple of `(error, value)`. If the entry does not exist
then `(err, default)` is returned, where `err` is the actual error
which occurred.
You can use :meth:`couchbase.exceptions.CouchbaseError.rc_to_exctype`
to convert the error code to a proper exception class
:raise: :exc:`IndexError` or :exc:`KeyError` if `path_or_index`
is not an initially requested path. This is a programming error
as opposed to a constraint error where the path is not found.
|
def analyze(fqdn, result, argl, argd):
"""Analyzes the result from calling the method with the specified FQDN.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
"""
package = fqdn.split('.')[0]
if package not in _methods:
_load_methods(package)
if _methods[package] is not None and fqdn in _methods[package]:
return _methods[package][fqdn](fqdn, result, *argl, **argd)
|
Analyzes the result from calling the method with the specified FQDN.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
|
def expand_hostdef(self, hostdef):
"""
Expand a host definition (e.g. "foo[001:010].bar.com") into seperate
hostnames. Supports zero-padding, numbered ranges and alphabetical
ranges. Multiple patterns in a host defnition are also supported.
Returns a list of the fully expanded hostnames. Ports are also removed
from hostnames as a bonus (e.g. "foo.bar.com:8022" -> "foo.bar.com")
"""
try:
hosts_todo = [hostdef]
hosts_done = []
# Keep going through the todo list of hosts until they no longer have a
# pattern in them. We only handle the first pattern found in the host for
# each iteration of the while loop. If more patterns are present, the
# partially expanded host(s) gets added back to the todo list.
while hosts_todo:
host = hosts_todo.pop(0)
if '[' not in host:
hosts_done.append(host)
continue
# Extract the head, first pattern and tail. E.g. foo[0:3].bar.com ->
# head="foo", pattern="0:3", tail=".bar.com"
head, rest = host.split('[', 1)
pattern, tail = rest.split(']', 1)
start, end = pattern.split(':')
fill = False
if start.startswith('0') and len(start) > 0:
fill = len(start)
try:
for i in range(int(start), int(end) + 1):
if fill:
range_nr = str(i).zfill(fill)
else:
range_nr = i
new_host = '{0}{1}{2}'.format(head, range_nr, tail)
if '[' in new_host:
hosts_todo.append(new_host)
else:
hosts_done.append(new_host)
except ValueError:
for i in range(ord(start), ord(end) + 1):
new_host = '{0}{1}{2}'.format(head, chr(i), tail)
if '[' in new_host:
hosts_todo.append(new_host)
else:
hosts_done.append(new_host)
# Strip port numbers off and return
return [host_name.split(':')[0] for host_name in hosts_done]
except Exception as e:
self.log.warning("Couldn't parse host definition '{0}': {1}".format(hostdef, e))
return []
|
Expand a host definition (e.g. "foo[001:010].bar.com") into seperate
hostnames. Supports zero-padding, numbered ranges and alphabetical
ranges. Multiple patterns in a host defnition are also supported.
Returns a list of the fully expanded hostnames. Ports are also removed
from hostnames as a bonus (e.g. "foo.bar.com:8022" -> "foo.bar.com")
|
def norm(self, order=2):
"""Find the vector norm, with the given order, of the values"""
return (sum(val**order for val in abs(self).values()))**(1/order)
|
Find the vector norm, with the given order, of the values
|
def add_pool(arg, opts, shell_opts):
""" Add a pool.
"""
p = Pool()
p.name = opts.get('name')
p.description = opts.get('description')
p.default_type = opts.get('default-type')
p.ipv4_default_prefix_length = opts.get('ipv4_default_prefix_length')
p.ipv6_default_prefix_length = opts.get('ipv6_default_prefix_length')
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
return
p.avps[key] = value
try:
p.save()
except pynipap.NipapError as exc:
print("Could not add pool to NIPAP: %s" % str(exc), file=sys.stderr)
sys.exit(1)
print("Pool '%s' created." % (p.name))
|
Add a pool.
|
def cmd_center(self, args):
'''control center of view'''
if len(args) < 3:
print("map center LAT LON")
return
lat = float(args[1])
lon = float(args[2])
self.map.set_center(lat, lon)
|
control center of view
|
def search(connect_spec, base, scope='subtree', filterstr='(objectClass=*)',
attrlist=None, attrsonly=0):
'''Search an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param base:
Distinguished name of the entry at which to start the search.
:param scope:
One of the following:
* ``'subtree'``
Search the base and all of its descendants.
* ``'base'``
Search only the base itself.
* ``'onelevel'``
Search only the base's immediate children.
:param filterstr:
String representation of the filter to apply in the search.
:param attrlist:
Limit the returned attributes to those in the specified list.
If ``None``, all attributes of each entry are returned.
:param attrsonly:
If non-zero, don't return any attribute values.
:returns:
a dict of results. The dict is empty if there are no results.
The dict maps each returned entry's distinguished name to a
dict that maps each of the matching attribute names to a list
of its values.
CLI example:
.. code-block:: bash
salt '*' ldap3.search "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret',
},
}" "base='dc=example,dc=com'"
'''
l = connect(connect_spec)
scope = getattr(ldap, 'SCOPE_' + scope.upper())
try:
results = l.c.search_s(base, scope, filterstr, attrlist, attrsonly)
except ldap.NO_SUCH_OBJECT:
results = []
except ldap.LDAPError as e:
_convert_exception(e)
return dict(results)
|
Search an LDAP database.
:param connect_spec:
See the documentation for the ``connect_spec`` parameter for
:py:func:`connect`.
:param base:
Distinguished name of the entry at which to start the search.
:param scope:
One of the following:
* ``'subtree'``
Search the base and all of its descendants.
* ``'base'``
Search only the base itself.
* ``'onelevel'``
Search only the base's immediate children.
:param filterstr:
String representation of the filter to apply in the search.
:param attrlist:
Limit the returned attributes to those in the specified list.
If ``None``, all attributes of each entry are returned.
:param attrsonly:
If non-zero, don't return any attribute values.
:returns:
a dict of results. The dict is empty if there are no results.
The dict maps each returned entry's distinguished name to a
dict that maps each of the matching attribute names to a list
of its values.
CLI example:
.. code-block:: bash
salt '*' ldap3.search "{
'url': 'ldaps://ldap.example.com/',
'bind': {
'method': 'simple',
'dn': 'cn=admin,dc=example,dc=com',
'password': 'secret',
},
}" "base='dc=example,dc=com'"
|
def get_active_sessions(self):
"""
Retrieves the active, unexpired sessions.
:Returns:
A generator of :class:`~mwsessions.Session`
"""
for last_timestamp, i, events in self.recently_active:
yield Session(events[-1].user, unpack_events(events))
|
Retrieves the active, unexpired sessions.
:Returns:
A generator of :class:`~mwsessions.Session`
|
def make_dict(name, words, *args, **kwargs):
"""
make_dict(name, words, *args, **kwargs) -> mapping subclass
Takes a sequence of words (or a pre-built Czech HashInfo) and returns a
mapping subclass called `name` (used a dict) that employs the use of the
minimal perfect hash.
This mapping subclass has guaranteed O(1) worst-case lookups, additions,
and deletions, however is slower than dict() in practice.
>>> months = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
>>> MyDict = make_dict('MyDict', months)
>>> d = MyDict(dec=21, feb=None, may='hello')
>>> d['jul'] = False
>>> d
MyDict([('feb', None), ('may', 'hello'), ('jul', False), ('dec', 21)])
>>> del d['may']
>>> del d['apr']
Traceback (most recent call last):
...
KeyError: 'apr'
>>> len(d)
3
"""
info = CzechHashBuilder(words, *args, **kwargs)
# Create a docstring that at least describes where the class came from...
doc = """
Dictionary-like object that uses minimal perfect hashing, perserving
original order. This class was generated by `%s.%s(%r, ...)`.
""" % (__name__, make_dict.__name__, name)
# Delegate to create_dict.
return create_dict_subclass(name, info.hash_function, info.words, doc)
|
make_dict(name, words, *args, **kwargs) -> mapping subclass
Takes a sequence of words (or a pre-built Czech HashInfo) and returns a
mapping subclass called `name` (used a dict) that employs the use of the
minimal perfect hash.
This mapping subclass has guaranteed O(1) worst-case lookups, additions,
and deletions, however is slower than dict() in practice.
>>> months = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
>>> MyDict = make_dict('MyDict', months)
>>> d = MyDict(dec=21, feb=None, may='hello')
>>> d['jul'] = False
>>> d
MyDict([('feb', None), ('may', 'hello'), ('jul', False), ('dec', 21)])
>>> del d['may']
>>> del d['apr']
Traceback (most recent call last):
...
KeyError: 'apr'
>>> len(d)
3
|
def parse_date(date, default=None):
""" Parse a valid date """
if date == "":
if default is not None:
return default
else:
raise Exception("Unknown format for " + date)
for format_type in ["%Y-%m-%d %H:%M:%S", "%Y-%m-%d %H:%M", "%Y-%m-%d %H", "%Y-%m-%d", "%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M", "%d/%m/%Y %H",
"%d/%m/%Y"]:
try:
return datetime.strptime(date, format_type)
except ValueError:
pass
raise Exception("Unknown format for " + date)
|
Parse a valid date
|
def find_by_id(self, attachment, params={}, **options):
"""Returns the full record for a single attachment.
Parameters
----------
attachment : {Id} Globally unique identifier for the attachment.
[params] : {Object} Parameters for the request
"""
path = "/attachments/%s" % (attachment)
return self.client.get(path, params, **options)
|
Returns the full record for a single attachment.
Parameters
----------
attachment : {Id} Globally unique identifier for the attachment.
[params] : {Object} Parameters for the request
|
def random_matrix(rows, cols, mean=0, std=1, sparsity=0, radius=0, diagonal=0, rng=None):
'''Create a matrix of randomly-initialized weights.
Parameters
----------
rows : int
Number of rows of the weight matrix -- equivalently, the number of
"input" units that the weight matrix connects.
cols : int
Number of columns of the weight matrix -- equivalently, the number
of "output" units that the weight matrix connects.
mean : float, optional
Draw initial weight values from a normal with this mean. Defaults to 0.
std : float, optional
Draw initial weight values from a normal with this standard deviation.
Defaults to 1.
sparsity : float in (0, 1), optional
If given, ensure that the given fraction of the weight matrix is
set to zero. Defaults to 0, meaning all weights are nonzero.
radius : float, optional
If given, rescale the initial weights to have this spectral radius.
No scaling is performed by default.
diagonal : float, optional
If nonzero, create a matrix containing all zeros except for this value
along the diagonal. If nonzero, other arguments (except for rows and
cols) will be ignored.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
matrix : numpy array
An array containing random values. These often represent the weights
connecting each "input" unit to each "output" unit in a layer.
'''
if rng is None or isinstance(rng, int):
rng = np.random.RandomState(rng)
arr = mean + std * rng.randn(rows, cols)
if 1 > sparsity > 0:
k = min(rows, cols)
mask = rng.binomial(n=1, p=1 - sparsity, size=(rows, cols)).astype(bool)
mask[:k, :k] |= np.eye(k).astype(bool)
arr *= mask
if radius > 0:
# rescale weights to have the appropriate spectral radius.
u, s, vT = np.linalg.svd(arr, full_matrices=False)
arr = np.dot(np.dot(u, np.diag(radius * s / abs(s[0]))), vT)
if diagonal != 0:
# generate a diagonal weight matrix. ignore other options.
arr = diagonal * np.eye(max(rows, cols))[:rows, :cols]
return arr.astype(FLOAT)
|
Create a matrix of randomly-initialized weights.
Parameters
----------
rows : int
Number of rows of the weight matrix -- equivalently, the number of
"input" units that the weight matrix connects.
cols : int
Number of columns of the weight matrix -- equivalently, the number
of "output" units that the weight matrix connects.
mean : float, optional
Draw initial weight values from a normal with this mean. Defaults to 0.
std : float, optional
Draw initial weight values from a normal with this standard deviation.
Defaults to 1.
sparsity : float in (0, 1), optional
If given, ensure that the given fraction of the weight matrix is
set to zero. Defaults to 0, meaning all weights are nonzero.
radius : float, optional
If given, rescale the initial weights to have this spectral radius.
No scaling is performed by default.
diagonal : float, optional
If nonzero, create a matrix containing all zeros except for this value
along the diagonal. If nonzero, other arguments (except for rows and
cols) will be ignored.
rng : :class:`numpy.random.RandomState` or int, optional
A random number generator, or an integer seed for a random number
generator. If not provided, the random number generator will be created
with an automatically chosen seed.
Returns
-------
matrix : numpy array
An array containing random values. These often represent the weights
connecting each "input" unit to each "output" unit in a layer.
|
def open_config(self,type="shared"):
"""
Opens the configuration of the currently connected device
Args:
:type: The type of configuration you want to open. Any string can be provided, however the standard supported options are: **exclusive**, **private**, and **shared**. The default mode is **shared**.
Examples:
.. code-block:: python
#Open shared config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.close_config()
dev.close()
#Open private config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config("private")
dev.close_config()
dev.close()
"""
try:
#attempt to open a configuration
output = self.dev.rpc("<open-configuration><{0}/></open-configuration>".format(type))
except Exception as err:
#output an error if the configuration is not availble
print err
|
Opens the configuration of the currently connected device
Args:
:type: The type of configuration you want to open. Any string can be provided, however the standard supported options are: **exclusive**, **private**, and **shared**. The default mode is **shared**.
Examples:
.. code-block:: python
#Open shared config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config()
dev.close_config()
dev.close()
#Open private config
from pyJunosManager import JunosDevice
dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper")
dev.open()
dev.open_config("private")
dev.close_config()
dev.close()
|
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
)
|
checker is a ContentChecker
|
def param_extract(args, short_form, long_form, default=None):
"""
Quick extraction of a parameter from the command line argument list.
In some cases we need to parse a few arguments before the official
arg-parser starts.
Returns parameter value, or None if not present.
"""
val = default
for i, a in enumerate(args):
# Long form may use "--xyz=foo", so need to split on '=', but it
# doesn't necessarily do that, can also be "--xyz foo".
elems = a.split("=", 1)
if elems[0] in [short_form, long_form]:
# At least make sure that an actual name was specified
if len(elems) == 1:
if i + 1 < len(args) and not args[i + 1].startswith("-"):
val = args[i + 1]
else:
val = "" # Invalid value was specified
else:
val = elems[1]
break
return val
|
Quick extraction of a parameter from the command line argument list.
In some cases we need to parse a few arguments before the official
arg-parser starts.
Returns parameter value, or None if not present.
|
def extern_store_tuple(self, context_handle, vals_ptr, vals_len):
"""Given storage and an array of Handles, return a new Handle to represent the list."""
c = self._ffi.from_handle(context_handle)
return c.to_value(tuple(c.from_value(val[0]) for val in self._ffi.unpack(vals_ptr, vals_len)))
|
Given storage and an array of Handles, return a new Handle to represent the list.
|
def followers(self):
"""
:return: Iterator of :class:`stravalib.model.Athlete` followers objects for this athlete.
"""
if self._followers is None:
self.assert_bind_client()
if self.follower_count > 0:
self._followers = self.bind_client.get_athlete_followers(self.id)
else:
# Shortcut if we know there aren't any
self._followers = []
return self._followers
|
:return: Iterator of :class:`stravalib.model.Athlete` followers objects for this athlete.
|
def toDict(self):
"""
Get information about this read in a dictionary.
@return: A C{dict} with keys/values for the attributes of self.
"""
if six.PY3:
result = super().toDict()
else:
result = AARead.toDict(self)
result.update({
'start': self.start,
'stop': self.stop,
'openLeft': self.openLeft,
'openRight': self.openRight,
})
return result
|
Get information about this read in a dictionary.
@return: A C{dict} with keys/values for the attributes of self.
|
def SetAndLoadTagFile(self, tagging_file_path):
"""Sets the tag file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
"""
tag_file = tagging_file.TaggingFile(tagging_file_path)
self._tagging_rules = tag_file.GetEventTaggingRules()
|
Sets the tag file to be used by the plugin.
Args:
tagging_file_path (str): path of the tagging file.
|
def create_address(kwargs=None, call=None):
'''
Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_address function must be called with -f or --function.'
)
if not kwargs or 'name' not in kwargs:
log.error(
'A name must be specified when creating an address.'
)
return False
if 'region' not in kwargs:
log.error(
'A region must be specified for the address.'
)
return False
name = kwargs['name']
ex_region = kwargs['region']
ex_address = kwargs.get("address", None)
kwargs['region'] = _expand_region(kwargs['region'])
conn = get_conn()
__utils__['cloud.fire_event'](
'event',
'create address',
'salt/cloud/address/creating',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
addy = conn.ex_create_address(name, ex_region, ex_address)
__utils__['cloud.fire_event'](
'event',
'created address',
'salt/cloud/address/created',
args=salt.utils.data.simple_types_filter(kwargs),
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.info('Created GCE Address %s', name)
return _expand_address(addy)
|
Create a static address in a region.
CLI Example:
.. code-block:: bash
salt-cloud -f create_address gce name=my-ip region=us-central1 address=IP
|
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
fig = Gcf.get_active().canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: send_figure(fig)
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
|
Is called after every pylab drawing command
|
def get_order(self, order_id):
'''Get an order'''
resp = self.get('/orders/{}'.format(order_id))
return Order(resp)
|
Get an order
|
def raw(self, raw):
"""Sets the raw of this RuntimeRawExtension.
Raw is the underlying serialization of this object. # noqa: E501
:param raw: The raw of this RuntimeRawExtension. # noqa: E501
:type: str
"""
if raw is None:
raise ValueError("Invalid value for `raw`, must not be `None`") # noqa: E501
if raw is not None and not re.search(r'^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$', raw): # noqa: E501
raise ValueError(r"Invalid value for `raw`, must be a follow pattern or equal to `/^(?:[A-Za-z0-9+\/]{4})*(?:[A-Za-z0-9+\/]{2}==|[A-Za-z0-9+\/]{3}=)?$/`") # noqa: E501
self._raw = raw
|
Sets the raw of this RuntimeRawExtension.
Raw is the underlying serialization of this object. # noqa: E501
:param raw: The raw of this RuntimeRawExtension. # noqa: E501
:type: str
|
def get_system_data() -> typing.Union[None, dict]:
"""
Returns information about the system in which Cauldron is running.
If the information cannot be found, None is returned instead.
:return:
Dictionary containing information about the Cauldron system, whic
includes:
* name
* location
* version
"""
site_packages = get_site_packages()
path_prefixes = [('[SP]', p) for p in site_packages]
path_prefixes.append(('[CORE]', sys.exec_prefix))
packages = [
module_to_package_data(name, entry, path_prefixes)
for name, entry in list(sys.modules.items())
]
python_data = dict(
version=list(sys.version_info),
executable=simplify_path(sys.executable),
directory=simplify_path(sys.exec_prefix),
site_packages=[simplify_path(sp) for sp in site_packages]
)
return dict(
python=python_data,
packages=[p for p in packages if p is not None]
)
|
Returns information about the system in which Cauldron is running.
If the information cannot be found, None is returned instead.
:return:
Dictionary containing information about the Cauldron system, whic
includes:
* name
* location
* version
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.