code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]:
"""
Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the
list at the end if the list is not divisable by ``count``.
For example:
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it.
"""
return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)] | Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the
list at the end if the list is not divisable by ``count``.
For example:
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it. | Below is the the instruction that describes the task:
### Input:
Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the
list at the end if the list is not divisable by ``count``.
For example:
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it.
### Response:
def group_by_count(iterable: List[Any], count: int, default_value: Any) -> List[List[Any]]:
"""
Takes a list and groups it into sublists of size ``count``, using ``default_value`` to pad the
list at the end if the list is not divisable by ``count``.
For example:
>>> group_by_count([1, 2, 3, 4, 5, 6, 7], 3, 0)
[[1, 2, 3], [4, 5, 6], [7, 0, 0]]
This is a short method, but it's complicated and hard to remember as a one-liner, so we just
make a function out of it.
"""
return [list(l) for l in zip_longest(*[iter(iterable)] * count, fillvalue=default_value)] |
def _maybe_run_callbacks(self, latest_block):
""" Run the callbacks if there is at least one new block.
The callbacks are executed only if there is a new block, otherwise the
filters may try to poll for an inexisting block number and the Ethereum
client can return an JSON-RPC error.
"""
assert self.known_block_number is not None, 'known_block_number not set'
latest_block_number = latest_block['number']
missed_blocks = latest_block_number - self.known_block_number
if missed_blocks < 0:
log.critical(
'Block number decreased',
chain_id=self.chain_id,
known_block_number=self.known_block_number,
old_block_number=latest_block['number'],
old_gas_limit=latest_block['gasLimit'],
old_block_hash=to_hex(latest_block['hash']),
)
elif missed_blocks > 0:
log_details = dict(
known_block_number=self.known_block_number,
latest_block_number=latest_block_number,
latest_block_hash=to_hex(latest_block['hash']),
latest_block_gas_limit=latest_block['gasLimit'],
)
if missed_blocks > 1:
log_details['num_missed_blocks'] = missed_blocks - 1
log.debug(
'Received new block',
**log_details,
)
remove = list()
for callback in self.callbacks:
result = callback(latest_block)
if result is REMOVE_CALLBACK:
remove.append(callback)
for callback in remove:
self.callbacks.remove(callback)
self.known_block_number = latest_block_number | Run the callbacks if there is at least one new block.
The callbacks are executed only if there is a new block, otherwise the
filters may try to poll for an inexisting block number and the Ethereum
client can return an JSON-RPC error. | Below is the the instruction that describes the task:
### Input:
Run the callbacks if there is at least one new block.
The callbacks are executed only if there is a new block, otherwise the
filters may try to poll for an inexisting block number and the Ethereum
client can return an JSON-RPC error.
### Response:
def _maybe_run_callbacks(self, latest_block):
""" Run the callbacks if there is at least one new block.
The callbacks are executed only if there is a new block, otherwise the
filters may try to poll for an inexisting block number and the Ethereum
client can return an JSON-RPC error.
"""
assert self.known_block_number is not None, 'known_block_number not set'
latest_block_number = latest_block['number']
missed_blocks = latest_block_number - self.known_block_number
if missed_blocks < 0:
log.critical(
'Block number decreased',
chain_id=self.chain_id,
known_block_number=self.known_block_number,
old_block_number=latest_block['number'],
old_gas_limit=latest_block['gasLimit'],
old_block_hash=to_hex(latest_block['hash']),
)
elif missed_blocks > 0:
log_details = dict(
known_block_number=self.known_block_number,
latest_block_number=latest_block_number,
latest_block_hash=to_hex(latest_block['hash']),
latest_block_gas_limit=latest_block['gasLimit'],
)
if missed_blocks > 1:
log_details['num_missed_blocks'] = missed_blocks - 1
log.debug(
'Received new block',
**log_details,
)
remove = list()
for callback in self.callbacks:
result = callback(latest_block)
if result is REMOVE_CALLBACK:
remove.append(callback)
for callback in remove:
self.callbacks.remove(callback)
self.known_block_number = latest_block_number |
def write_classifier(self, clf):
""" Writes classifier object to pickle file """
with open(os.path.join(self.repopath, 'classifier.pkl'), 'w') as fp:
pickle.dump(clf, fp) | Writes classifier object to pickle file | Below is the the instruction that describes the task:
### Input:
Writes classifier object to pickle file
### Response:
def write_classifier(self, clf):
""" Writes classifier object to pickle file """
with open(os.path.join(self.repopath, 'classifier.pkl'), 'w') as fp:
pickle.dump(clf, fp) |
def _check_bam_contigs(in_bam, ref_file, config):
"""Ensure a pre-aligned BAM file matches the expected reference genome.
"""
# GATK allows chromosome M to be in multiple locations, skip checking it
allowed_outoforder = ["chrM", "MT"]
ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)]
with pysam.Samfile(in_bam, "rb") as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
extra_bcs = [x for x in bam_contigs if x not in ref_contigs]
extra_rcs = [x for x in ref_contigs if x not in bam_contigs]
problems = []
warnings = []
for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and
x not in allowed_outoforder)],
[x for x in ref_contigs if (x not in extra_rcs and
x not in allowed_outoforder)]):
if bc != rc:
if bc and rc:
problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc))
elif bc:
warnings.append("Extra BAM chromosomes: %s" % bc)
elif rc:
warnings.append("Extra reference chromosomes: %s" % rc)
for bc in extra_bcs:
warnings.append("Extra BAM chromosomes: %s" % bc)
for rc in extra_rcs:
warnings.append("Extra reference chromosomes: %s" % rc)
if problems:
raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n"
"Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue."
% "\n".join(problems))
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings)) | Ensure a pre-aligned BAM file matches the expected reference genome. | Below is the the instruction that describes the task:
### Input:
Ensure a pre-aligned BAM file matches the expected reference genome.
### Response:
def _check_bam_contigs(in_bam, ref_file, config):
"""Ensure a pre-aligned BAM file matches the expected reference genome.
"""
# GATK allows chromosome M to be in multiple locations, skip checking it
allowed_outoforder = ["chrM", "MT"]
ref_contigs = [c.name for c in ref.file_contigs(ref_file, config)]
with pysam.Samfile(in_bam, "rb") as bamfile:
bam_contigs = [c["SN"] for c in bamfile.header["SQ"]]
extra_bcs = [x for x in bam_contigs if x not in ref_contigs]
extra_rcs = [x for x in ref_contigs if x not in bam_contigs]
problems = []
warnings = []
for bc, rc in zip_longest([x for x in bam_contigs if (x not in extra_bcs and
x not in allowed_outoforder)],
[x for x in ref_contigs if (x not in extra_rcs and
x not in allowed_outoforder)]):
if bc != rc:
if bc and rc:
problems.append("Reference mismatch. BAM: %s Reference: %s" % (bc, rc))
elif bc:
warnings.append("Extra BAM chromosomes: %s" % bc)
elif rc:
warnings.append("Extra reference chromosomes: %s" % rc)
for bc in extra_bcs:
warnings.append("Extra BAM chromosomes: %s" % bc)
for rc in extra_rcs:
warnings.append("Extra reference chromosomes: %s" % rc)
if problems:
raise ValueError("Unexpected order, name or contig mismatches between input BAM and reference file:\n%s\n"
"Setting `bam_clean: remove_extracontigs` in the configuration can often fix this issue."
% "\n".join(problems))
if warnings:
print("*** Potential problems in input BAM compared to reference:\n%s\n" %
"\n".join(warnings)) |
def delete_cache_settings(self, service_id, version_number, name):
"""Delete a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content) | Delete a specific cache settings object. | Below is the the instruction that describes the task:
### Input:
Delete a specific cache settings object.
### Response:
def delete_cache_settings(self, service_id, version_number, name):
"""Delete a specific cache settings object."""
content = self._fetch("/service/%s/version/%d/cache_settings/%s" % (service_id, version_number, name), method="DELETE")
return self._status(content) |
def is_dir(self, follow_symlinks=True):
"""
Return True if this entry is a directory or a symbolic link pointing to
a directory; return False if the entry is or points to any other kind
of file, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists.
"""
try:
return (self._system.isdir(
path=self._path, client_kwargs=self._client_kwargs,
virtual_dir=False) or
# Some directories only exists virtually in object path and
# don't have headers.
bool(S_ISDIR(self.stat().st_mode)))
except ObjectPermissionError:
# The directory was listed, but unable to head it or access to its
# content
return True | Return True if this entry is a directory or a symbolic link pointing to
a directory; return False if the entry is or points to any other kind
of file, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists. | Below is the the instruction that describes the task:
### Input:
Return True if this entry is a directory or a symbolic link pointing to
a directory; return False if the entry is or points to any other kind
of file, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists.
### Response:
def is_dir(self, follow_symlinks=True):
"""
Return True if this entry is a directory or a symbolic link pointing to
a directory; return False if the entry is or points to any other kind
of file, or if it doesn’t exist anymore.
The result is cached on the os.DirEntry object.
Args:
follow_symlinks (bool): Follow symlinks.
Not supported on cloud storage objects.
Returns:
bool: True if directory exists.
"""
try:
return (self._system.isdir(
path=self._path, client_kwargs=self._client_kwargs,
virtual_dir=False) or
# Some directories only exists virtually in object path and
# don't have headers.
bool(S_ISDIR(self.stat().st_mode)))
except ObjectPermissionError:
# The directory was listed, but unable to head it or access to its
# content
return True |
def _set_other(self):
"""Sets other specific sections"""
# manage not setting if not mandatory for numpy
if self.dst.style['in'] == 'numpydoc':
if self.docs['in']['raw'] is not None:
self.docs['out']['post'] = self.dst.numpydoc.get_raw_not_managed(self.docs['in']['raw'])
elif 'post' not in self.docs['out'] or self.docs['out']['post'] is None:
self.docs['out']['post'] = '' | Sets other specific sections | Below is the the instruction that describes the task:
### Input:
Sets other specific sections
### Response:
def _set_other(self):
"""Sets other specific sections"""
# manage not setting if not mandatory for numpy
if self.dst.style['in'] == 'numpydoc':
if self.docs['in']['raw'] is not None:
self.docs['out']['post'] = self.dst.numpydoc.get_raw_not_managed(self.docs['in']['raw'])
elif 'post' not in self.docs['out'] or self.docs['out']['post'] is None:
self.docs['out']['post'] = '' |
def calculate_between_class_scatter_matrix(X, y):
"""Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
"""
mean_vectors = calculate_mean_vectors(X, y)
n_features = X.shape[1]
Sb = np.zeros((n_features, n_features))
m = np.mean(X, axis=0).reshape(n_features, 1)
for cl, m_i in zip(np.unique(y), mean_vectors):
v = m_i.reshape(n_features, 1) - m
Sb += X[y == cl, :].shape[0] * v @ v.T
return Sb | Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n) | Below is the the instruction that describes the task:
### Input:
Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
### Response:
def calculate_between_class_scatter_matrix(X, y):
"""Calculates the Between-Class Scatter matrix
Parameters:
-----------
X : array-like, shape (m, n) - the samples
y : array-like, shape (m, ) - the class labels
Returns:
--------
between_class_scatter_matrix : array-like, shape (n, n)
"""
mean_vectors = calculate_mean_vectors(X, y)
n_features = X.shape[1]
Sb = np.zeros((n_features, n_features))
m = np.mean(X, axis=0).reshape(n_features, 1)
for cl, m_i in zip(np.unique(y), mean_vectors):
v = m_i.reshape(n_features, 1) - m
Sb += X[y == cl, :].shape[0] * v @ v.T
return Sb |
def get_blast2(pdb_id, chain_id='A', output_form='HTML'):
'''Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'\n')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
'''
raw_results = get_raw_blast(pdb_id, chain_id=chain_id, output_form=output_form)
out = parse_blast(raw_results)
return out | Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'\n')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
... | Below is the the instruction that describes the task:
### Input:
Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'\n')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
### Response:
def get_blast2(pdb_id, chain_id='A', output_form='HTML'):
'''Alternative way to look up BLAST for a given PDB ID. This function is a wrapper
for get_raw_blast and parse_blast
Parameters
----------
pdb_id : string
A 4 character string giving a pdb entry of interest
chain_id : string
A single character designating the chain ID of interest
output_form : string
TXT, HTML, or XML formatting of the BLAST page
Returns
-------
out : 2-tuple
A tuple consisting of a list of PDB matches, and a list
of their alignment text files (unformatted)
Examples
--------
>>> blast_results = get_blast2('2F5N', chain_id='A', output_form='HTML')
>>> print('Total Results: ' + str(len(blast_results[0])) +'\n')
>>> print(blast_results[1][0])
Total Results: 84
<pre>
><a name="45354"></a>2F5P:3:A|pdbid|entity|chain(s)|sequence
Length = 274
Score = 545 bits (1404), Expect = e-155, Method: Composition-based stats.
Identities = 274/274 (100%), Positives = 274/274 (100%)
Query: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK
Sbjct: 1 MPELPEVETIRRTLLPLIVGKTIEDVRIFWPNIIRHPRDSEAFAARMIGQTVRGLERRGK 60
...
'''
raw_results = get_raw_blast(pdb_id, chain_id=chain_id, output_form=output_form)
out = parse_blast(raw_results)
return out |
def setContextNode(self, node):
"""Set the current node of an xpathContext """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlXPathSetContextNode(self._o, node__o) | Set the current node of an xpathContext | Below is the the instruction that describes the task:
### Input:
Set the current node of an xpathContext
### Response:
def setContextNode(self, node):
"""Set the current node of an xpathContext """
if node is None: node__o = None
else: node__o = node._o
libxml2mod.xmlXPathSetContextNode(self._o, node__o) |
def suggest_accumulation_rate(chron):
"""From core age-depth data, suggest mean accumulation rate (cm/y)
"""
# Follow's Bacon's method @ Bacon.R ln 30 - 44
# Suggested round vals.
sugg = np.tile([1, 2, 5], (4, 1)) * np.reshape(np.repeat([0.1, 1.0, 10, 100], 3), (4, 3))
# Get ballpark accumulation rates, uncalibrated dates.
ballpacc = stats.linregress(x=chron.depth, y=chron.age * 1.1).slope
ballpacc = np.abs(sugg - ballpacc)
sugg = sugg.flat[ballpacc.argmin()] # Suggest rounded acc.rate with lowest abs diff.
return sugg | From core age-depth data, suggest mean accumulation rate (cm/y) | Below is the the instruction that describes the task:
### Input:
From core age-depth data, suggest mean accumulation rate (cm/y)
### Response:
def suggest_accumulation_rate(chron):
"""From core age-depth data, suggest mean accumulation rate (cm/y)
"""
# Follow's Bacon's method @ Bacon.R ln 30 - 44
# Suggested round vals.
sugg = np.tile([1, 2, 5], (4, 1)) * np.reshape(np.repeat([0.1, 1.0, 10, 100], 3), (4, 3))
# Get ballpark accumulation rates, uncalibrated dates.
ballpacc = stats.linregress(x=chron.depth, y=chron.age * 1.1).slope
ballpacc = np.abs(sugg - ballpacc)
sugg = sugg.flat[ballpacc.argmin()] # Suggest rounded acc.rate with lowest abs diff.
return sugg |
def _parse(partial_dt):
"""
parse a partial datetime object to a complete datetime object
"""
dt = None
try:
if isinstance(partial_dt, datetime):
dt = partial_dt
if isinstance(partial_dt, date):
dt = _combine_date_time(partial_dt, time(0, 0, 0))
if isinstance(partial_dt, time):
dt = _combine_date_time(date.today(), partial_dt)
if isinstance(partial_dt, (int, float)):
dt = datetime.fromtimestamp(partial_dt)
if isinstance(partial_dt, (str, bytes)):
dt = parser.parse(partial_dt, default=timezone.now())
if dt is not None and timezone.is_naive(dt):
dt = timezone.make_aware(dt)
return dt
except ValueError:
return None | parse a partial datetime object to a complete datetime object | Below is the the instruction that describes the task:
### Input:
parse a partial datetime object to a complete datetime object
### Response:
def _parse(partial_dt):
"""
parse a partial datetime object to a complete datetime object
"""
dt = None
try:
if isinstance(partial_dt, datetime):
dt = partial_dt
if isinstance(partial_dt, date):
dt = _combine_date_time(partial_dt, time(0, 0, 0))
if isinstance(partial_dt, time):
dt = _combine_date_time(date.today(), partial_dt)
if isinstance(partial_dt, (int, float)):
dt = datetime.fromtimestamp(partial_dt)
if isinstance(partial_dt, (str, bytes)):
dt = parser.parse(partial_dt, default=timezone.now())
if dt is not None and timezone.is_naive(dt):
dt = timezone.make_aware(dt)
return dt
except ValueError:
return None |
def advance(self, myDateTime):
"""
Advances to the next value and returns an appropriate value for the given
time.
:param myDateTime: (datetime) when to fetch the value for
:return: (float|int) value for given time
"""
if self.getTime() == myDateTime:
out = self.next()
# Sometimes, the stream has no value for this field and returns None, in
# this case we'll use the last value as well.
if out is None:
out = self.last()
else:
out = self.last()
# If there's no more data, we must fetch more
if len(self) is 0:
self._fetchNextData()
self._updateMinMax(out)
if isinstance(out, float):
self._dataType = "float"
# Convert to proper data type
if self._dataType is "float":
out = float(out)
else:
out = int(out)
return out | Advances to the next value and returns an appropriate value for the given
time.
:param myDateTime: (datetime) when to fetch the value for
:return: (float|int) value for given time | Below is the the instruction that describes the task:
### Input:
Advances to the next value and returns an appropriate value for the given
time.
:param myDateTime: (datetime) when to fetch the value for
:return: (float|int) value for given time
### Response:
def advance(self, myDateTime):
"""
Advances to the next value and returns an appropriate value for the given
time.
:param myDateTime: (datetime) when to fetch the value for
:return: (float|int) value for given time
"""
if self.getTime() == myDateTime:
out = self.next()
# Sometimes, the stream has no value for this field and returns None, in
# this case we'll use the last value as well.
if out is None:
out = self.last()
else:
out = self.last()
# If there's no more data, we must fetch more
if len(self) is 0:
self._fetchNextData()
self._updateMinMax(out)
if isinstance(out, float):
self._dataType = "float"
# Convert to proper data type
if self._dataType is "float":
out = float(out)
else:
out = int(out)
return out |
def set_pipeline(self, pipeline):
"""
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.pipeline = pipeline | Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string. | Below is the the instruction that describes the task:
### Input:
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
### Response:
def set_pipeline(self, pipeline):
"""
Specify the pipeline. See get_pipeline_alternatives to see what are avaialble. Input should be a string.
"""
self.add_history(inspect.stack()[0][3], locals(), 1)
if not os.path.exists(self.BIDS_dir + '/derivatives/' + pipeline):
print('Specified direvative directory not found.')
self.get_pipeline_alternatives()
else:
# Todo: perform check that pipeline is valid
self.pipeline = pipeline |
def wallet_export(self, wallet):
"""
Return a json representation of **wallet**
:param wallet: Wallet to export
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_export(wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F")
{
"0000000000000000000000000000000000000000000000000000000000000000": "0000000000000000000000000000000000000000000000000000000000000001"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_export', payload)
return json.loads(resp['json']) | Return a json representation of **wallet**
:param wallet: Wallet to export
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_export(wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F")
{
"0000000000000000000000000000000000000000000000000000000000000000": "0000000000000000000000000000000000000000000000000000000000000001"
} | Below is the the instruction that describes the task:
### Input:
Return a json representation of **wallet**
:param wallet: Wallet to export
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_export(wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F")
{
"0000000000000000000000000000000000000000000000000000000000000000": "0000000000000000000000000000000000000000000000000000000000000001"
}
### Response:
def wallet_export(self, wallet):
"""
Return a json representation of **wallet**
:param wallet: Wallet to export
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_export(wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F")
{
"0000000000000000000000000000000000000000000000000000000000000000": "0000000000000000000000000000000000000000000000000000000000000001"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_export', payload)
return json.loads(resp['json']) |
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True):
"""
Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character.
"""
if ngrams is None:
ngrams = 1
text = re.sub(re.compile('\'s'), '', text) # Simple heuristic
text = re.sub(_re_punctuation, '', text)
matched_tokens = re.findall(_re_token, text.lower())
for tokens in get_ngrams(matched_tokens, ngrams):
for i in range(len(tokens)):
tokens[i] = tokens[i].strip(punctuation)
if len(tokens[i]) < min_length or tokens[i] in stopwords:
break
if ignore_numeric and isnumeric(tokens[i]):
break
else:
yield tuple(tokens) | Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character. | Below is the the instruction that describes the task:
### Input:
Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character.
### Response:
def word_tokenize(text, stopwords=_stopwords, ngrams=None, min_length=0, ignore_numeric=True):
"""
Parses the given text and yields tokens which represent words within
the given text. Tokens are assumed to be divided by any form of
whitespace character.
"""
if ngrams is None:
ngrams = 1
text = re.sub(re.compile('\'s'), '', text) # Simple heuristic
text = re.sub(_re_punctuation, '', text)
matched_tokens = re.findall(_re_token, text.lower())
for tokens in get_ngrams(matched_tokens, ngrams):
for i in range(len(tokens)):
tokens[i] = tokens[i].strip(punctuation)
if len(tokens[i]) < min_length or tokens[i] in stopwords:
break
if ignore_numeric and isnumeric(tokens[i]):
break
else:
yield tuple(tokens) |
def delete_compliance_task(self, id):
'''**Description**
Delete the compliance task with the given id
**Arguments**
- id: the id of the compliance task to delete
'''
res = requests.delete(self.url + '/api/complianceTasks/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return False, self.lasterr
return True, None | **Description**
Delete the compliance task with the given id
**Arguments**
- id: the id of the compliance task to delete | Below is the the instruction that describes the task:
### Input:
**Description**
Delete the compliance task with the given id
**Arguments**
- id: the id of the compliance task to delete
### Response:
def delete_compliance_task(self, id):
'''**Description**
Delete the compliance task with the given id
**Arguments**
- id: the id of the compliance task to delete
'''
res = requests.delete(self.url + '/api/complianceTasks/{}'.format(id), headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return False, self.lasterr
return True, None |
def cluster_assignments(self):
"""
Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "getClusterAssignments", "()[D")
if array is None:
return None
else:
return javabridge.get_env().get_double_array_elements(array) | Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray | Below is the the instruction that describes the task:
### Input:
Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray
### Response:
def cluster_assignments(self):
"""
Return an array of cluster assignments corresponding to the most recent set of instances clustered.
:return: the cluster assignments
:rtype: ndarray
"""
array = javabridge.call(self.jobject, "getClusterAssignments", "()[D")
if array is None:
return None
else:
return javabridge.get_env().get_double_array_elements(array) |
def deleteProfile(self, profile):
""" Removes a profile from the persistent settings
"""
profGroupName = self.profileGroupName(profile)
logger.debug("Resetting profile settings: {}".format(profGroupName))
settings = QtCore.QSettings()
settings.remove(profGroupName) | Removes a profile from the persistent settings | Below is the the instruction that describes the task:
### Input:
Removes a profile from the persistent settings
### Response:
def deleteProfile(self, profile):
""" Removes a profile from the persistent settings
"""
profGroupName = self.profileGroupName(profile)
logger.debug("Resetting profile settings: {}".format(profGroupName))
settings = QtCore.QSettings()
settings.remove(profGroupName) |
def same_color(self, objects: Set[Object]) -> Set[Object]:
"""
Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``blue`` filters objects to
those that are blue, this filters objects to those that are of the same color.
"""
return self._get_objects_with_same_attribute(objects, lambda x: x.color) | Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``blue`` filters objects to
those that are blue, this filters objects to those that are of the same color. | Below is the the instruction that describes the task:
### Input:
Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``blue`` filters objects to
those that are blue, this filters objects to those that are of the same color.
### Response:
def same_color(self, objects: Set[Object]) -> Set[Object]:
"""
Filters the set of objects, and returns those objects whose color is the most frequent
color in the initial set of objects, if the highest frequency is greater than 1, or an
empty set otherwise.
This is an unusual name for what the method does, but just as ``blue`` filters objects to
those that are blue, this filters objects to those that are of the same color.
"""
return self._get_objects_with_same_attribute(objects, lambda x: x.color) |
def append(self, record):
"""
Adds the passed +record+ to satisfy the query. Only intended to be
used in conjunction with associations (i.e. do not use if self.record
is None).
Intended use case (DO THIS):
post.comments.append(comment)
NOT THIS:
Query(Post).where(content="foo").append(post)
"""
if self.record:
self._validate_record(record)
if self.join_args:
# As always, the related record is created when the primary
# record is saved
build_args = dict(self.where_query)
# The +final_join+ is what connects the record chain to the
# passed +record+
final_join = self.join_args[-2]
# don't need to worry about one-to-many through because
# there is not enough information to find or create the
# joining record
# i.e. in the Forum -> Thread -> Post example
# forum.posts.append(post) doesn't make sense since there
# is no information about what thread it will be attached to
# Thus, this only makes sense on many-to-many. BUT we still
# have to consider the case where there is a one-many-many
# To make that work, we need to treat this like when doing
# building
joining_relation = getattr(self.record, final_join['table'])
# Uses the lookup info in the join to figure out what ids to
# set, and where to get the id value from
joining_args = {final_join['on'][0]:
getattr(record, final_join['on'][1])}
build_args.update(joining_args)
joining_record = joining_relation.build(**build_args)
self.record._related_records.append(joining_record)
else:
# Add our id to their foreign key so that the relationship is
# created
setattr(record,
foreign_key(record, self.record),
self.record.id)
# Add to the list of related records so that it is saved when
# we are
self.record._related_records.append(record) | Adds the passed +record+ to satisfy the query. Only intended to be
used in conjunction with associations (i.e. do not use if self.record
is None).
Intended use case (DO THIS):
post.comments.append(comment)
NOT THIS:
Query(Post).where(content="foo").append(post) | Below is the the instruction that describes the task:
### Input:
Adds the passed +record+ to satisfy the query. Only intended to be
used in conjunction with associations (i.e. do not use if self.record
is None).
Intended use case (DO THIS):
post.comments.append(comment)
NOT THIS:
Query(Post).where(content="foo").append(post)
### Response:
def append(self, record):
"""
Adds the passed +record+ to satisfy the query. Only intended to be
used in conjunction with associations (i.e. do not use if self.record
is None).
Intended use case (DO THIS):
post.comments.append(comment)
NOT THIS:
Query(Post).where(content="foo").append(post)
"""
if self.record:
self._validate_record(record)
if self.join_args:
# As always, the related record is created when the primary
# record is saved
build_args = dict(self.where_query)
# The +final_join+ is what connects the record chain to the
# passed +record+
final_join = self.join_args[-2]
# don't need to worry about one-to-many through because
# there is not enough information to find or create the
# joining record
# i.e. in the Forum -> Thread -> Post example
# forum.posts.append(post) doesn't make sense since there
# is no information about what thread it will be attached to
# Thus, this only makes sense on many-to-many. BUT we still
# have to consider the case where there is a one-many-many
# To make that work, we need to treat this like when doing
# building
joining_relation = getattr(self.record, final_join['table'])
# Uses the lookup info in the join to figure out what ids to
# set, and where to get the id value from
joining_args = {final_join['on'][0]:
getattr(record, final_join['on'][1])}
build_args.update(joining_args)
joining_record = joining_relation.build(**build_args)
self.record._related_records.append(joining_record)
else:
# Add our id to their foreign key so that the relationship is
# created
setattr(record,
foreign_key(record, self.record),
self.record.id)
# Add to the list of related records so that it is saved when
# we are
self.record._related_records.append(record) |
def p_statement_draw3_attr(p):
""" statement : DRAW attr_list expr COMMA expr COMMA expr
"""
p[0] = make_sentence('DRAW3',
make_typecast(TYPE.integer, p[3], p.lineno(4)),
make_typecast(TYPE.integer, p[5], p.lineno(6)),
make_typecast(TYPE.float_, p[7], p.lineno(6)), p[2]) | statement : DRAW attr_list expr COMMA expr COMMA expr | Below is the the instruction that describes the task:
### Input:
statement : DRAW attr_list expr COMMA expr COMMA expr
### Response:
def p_statement_draw3_attr(p):
""" statement : DRAW attr_list expr COMMA expr COMMA expr
"""
p[0] = make_sentence('DRAW3',
make_typecast(TYPE.integer, p[3], p.lineno(4)),
make_typecast(TYPE.integer, p[5], p.lineno(6)),
make_typecast(TYPE.float_, p[7], p.lineno(6)), p[2]) |
def save_colormap(self, name=None):
"""
Saves the colormap with the specified name. None means use internal
name. (See get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# get the colormaps directory
colormaps = _os.path.join(_settings.path_home, 'colormaps')
# make sure we have the colormaps directory
_settings.MakeDir(colormaps)
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
# open the file and overwrite
f = open(path, 'w')
f.write(str(self._colorpoint_list))
f.close()
return self | Saves the colormap with the specified name. None means use internal
name. (See get_name()) | Below is the the instruction that describes the task:
### Input:
Saves the colormap with the specified name. None means use internal
name. (See get_name())
### Response:
def save_colormap(self, name=None):
"""
Saves the colormap with the specified name. None means use internal
name. (See get_name())
"""
if name == None: name = self.get_name()
if name == "" or not type(name)==str: return "Error: invalid name."
# get the colormaps directory
colormaps = _os.path.join(_settings.path_home, 'colormaps')
# make sure we have the colormaps directory
_settings.MakeDir(colormaps)
# assemble the path to the colormap
path = _os.path.join(_settings.path_home, 'colormaps', name+".cmap")
# open the file and overwrite
f = open(path, 'w')
f.write(str(self._colorpoint_list))
f.close()
return self |
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
super(EncapsulatedControlMessage, self).sanitize()
# S: This is the Security bit. When set to 1 the following
# authentication information will be appended to the end of the Map-
# Reply. The detailed format of the Authentication Data Content is
# for further study.
if not isinstance(self.security, bool):
raise ValueError('Security flag must be a boolean')
if self.security:
raise NotImplementedError('Handling security data is not ' +
'implemented yet')
# "D" is the "DDT-originated" flag and is set by a DDT client to
# indicate that the receiver can and should return Map-Referral
# messages as appropriate.
if not isinstance(self.ddt_originated, bool):
raise ValueError('DDT originated flag must be a boolean')
# The 6th bit in the ECM LISP header is allocated as the "R"
# bit. The R bit indicates that the encapsulated Map-Register is
# to be processed by an RTR.
if not isinstance(self.for_rtr, bool):
raise ValueError('For-RTR flag must be a boolean')
# The 7th bit in the ECM header is allocated as the "N" bit. The
# N bit indicates that this Map-Register is being relayed by an
# RTR. When an RTR relays the ECM-ed Map-Register to a Map-Server,
# the N bit must be set to 1.
if not isinstance(self.relayed_by_rtr, bool):
raise ValueError('Relayed-by-RTR flag must be a boolean') | Check if the current settings conform to the LISP specifications and
fix them where possible. | Below is the the instruction that describes the task:
### Input:
Check if the current settings conform to the LISP specifications and
fix them where possible.
### Response:
def sanitize(self):
'''
Check if the current settings conform to the LISP specifications and
fix them where possible.
'''
super(EncapsulatedControlMessage, self).sanitize()
# S: This is the Security bit. When set to 1 the following
# authentication information will be appended to the end of the Map-
# Reply. The detailed format of the Authentication Data Content is
# for further study.
if not isinstance(self.security, bool):
raise ValueError('Security flag must be a boolean')
if self.security:
raise NotImplementedError('Handling security data is not ' +
'implemented yet')
# "D" is the "DDT-originated" flag and is set by a DDT client to
# indicate that the receiver can and should return Map-Referral
# messages as appropriate.
if not isinstance(self.ddt_originated, bool):
raise ValueError('DDT originated flag must be a boolean')
# The 6th bit in the ECM LISP header is allocated as the "R"
# bit. The R bit indicates that the encapsulated Map-Register is
# to be processed by an RTR.
if not isinstance(self.for_rtr, bool):
raise ValueError('For-RTR flag must be a boolean')
# The 7th bit in the ECM header is allocated as the "N" bit. The
# N bit indicates that this Map-Register is being relayed by an
# RTR. When an RTR relays the ECM-ed Map-Register to a Map-Server,
# the N bit must be set to 1.
if not isinstance(self.relayed_by_rtr, bool):
raise ValueError('Relayed-by-RTR flag must be a boolean') |
def registered(self, driver, executorInfo, frameworkInfo, agentInfo):
"""
Invoked once the executor driver has been able to successfully connect with Mesos.
"""
# Get the ID we have been assigned, if we have it
self.id = executorInfo.executor_id.get('value', None)
log.debug("Registered executor %s with framework", self.id)
self.address = socket.gethostbyname(agentInfo.hostname)
nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver])
nodeInfoThread.daemon = True
nodeInfoThread.start() | Invoked once the executor driver has been able to successfully connect with Mesos. | Below is the the instruction that describes the task:
### Input:
Invoked once the executor driver has been able to successfully connect with Mesos.
### Response:
def registered(self, driver, executorInfo, frameworkInfo, agentInfo):
"""
Invoked once the executor driver has been able to successfully connect with Mesos.
"""
# Get the ID we have been assigned, if we have it
self.id = executorInfo.executor_id.get('value', None)
log.debug("Registered executor %s with framework", self.id)
self.address = socket.gethostbyname(agentInfo.hostname)
nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver])
nodeInfoThread.daemon = True
nodeInfoThread.start() |
def get_umbrella_sampling_data(ntherm=11, us_fc=20.0, us_length=500, md_length=1000, nmd=20):
"""
Continuous MCMC process in an asymmetric double well potential using umbrella sampling.
Parameters
----------
ntherm: int, optional, default=11
Number of umbrella states.
us_fc: double, optional, default=20.0
Force constant in kT/length^2 for each umbrella.
us_length: int, optional, default=500
Length in steps of each umbrella trajectory.
md_length: int, optional, default=1000
Length in steps of each unbiased trajectory.
nmd: int, optional, default=20
Number of unbiased trajectories.
Returns
-------
dict - keys shown below in brackets
Trajectory data from umbrella sampling (us_trajs) and unbiased (md_trajs) MCMC runs and
their discretised counterparts (us_dtrajs + md_dtrajs + centers). The umbrella sampling
parameters (us_centers + us_force_constants) are in the same order as the umbrella sampling
trajectories. Energies are given in kT, lengths in arbitrary units.
"""
dws = _DWS()
us_data = dws.us_sample(
ntherm=ntherm, us_fc=us_fc, us_length=us_length, md_length=md_length, nmd=nmd)
us_data.update(centers=dws.centers)
return us_data | Continuous MCMC process in an asymmetric double well potential using umbrella sampling.
Parameters
----------
ntherm: int, optional, default=11
Number of umbrella states.
us_fc: double, optional, default=20.0
Force constant in kT/length^2 for each umbrella.
us_length: int, optional, default=500
Length in steps of each umbrella trajectory.
md_length: int, optional, default=1000
Length in steps of each unbiased trajectory.
nmd: int, optional, default=20
Number of unbiased trajectories.
Returns
-------
dict - keys shown below in brackets
Trajectory data from umbrella sampling (us_trajs) and unbiased (md_trajs) MCMC runs and
their discretised counterparts (us_dtrajs + md_dtrajs + centers). The umbrella sampling
parameters (us_centers + us_force_constants) are in the same order as the umbrella sampling
trajectories. Energies are given in kT, lengths in arbitrary units. | Below is the the instruction that describes the task:
### Input:
Continuous MCMC process in an asymmetric double well potential using umbrella sampling.
Parameters
----------
ntherm: int, optional, default=11
Number of umbrella states.
us_fc: double, optional, default=20.0
Force constant in kT/length^2 for each umbrella.
us_length: int, optional, default=500
Length in steps of each umbrella trajectory.
md_length: int, optional, default=1000
Length in steps of each unbiased trajectory.
nmd: int, optional, default=20
Number of unbiased trajectories.
Returns
-------
dict - keys shown below in brackets
Trajectory data from umbrella sampling (us_trajs) and unbiased (md_trajs) MCMC runs and
their discretised counterparts (us_dtrajs + md_dtrajs + centers). The umbrella sampling
parameters (us_centers + us_force_constants) are in the same order as the umbrella sampling
trajectories. Energies are given in kT, lengths in arbitrary units.
### Response:
def get_umbrella_sampling_data(ntherm=11, us_fc=20.0, us_length=500, md_length=1000, nmd=20):
"""
Continuous MCMC process in an asymmetric double well potential using umbrella sampling.
Parameters
----------
ntherm: int, optional, default=11
Number of umbrella states.
us_fc: double, optional, default=20.0
Force constant in kT/length^2 for each umbrella.
us_length: int, optional, default=500
Length in steps of each umbrella trajectory.
md_length: int, optional, default=1000
Length in steps of each unbiased trajectory.
nmd: int, optional, default=20
Number of unbiased trajectories.
Returns
-------
dict - keys shown below in brackets
Trajectory data from umbrella sampling (us_trajs) and unbiased (md_trajs) MCMC runs and
their discretised counterparts (us_dtrajs + md_dtrajs + centers). The umbrella sampling
parameters (us_centers + us_force_constants) are in the same order as the umbrella sampling
trajectories. Energies are given in kT, lengths in arbitrary units.
"""
dws = _DWS()
us_data = dws.us_sample(
ntherm=ntherm, us_fc=us_fc, us_length=us_length, md_length=md_length, nmd=nmd)
us_data.update(centers=dws.centers)
return us_data |
def get(self, obj_id):
"""
Get a document or a page using its ID
Won't instantiate them if they are not yet available
"""
if BasicPage.PAGE_ID_SEPARATOR in obj_id:
(docid, page_nb) = obj_id.split(BasicPage.PAGE_ID_SEPARATOR)
page_nb = int(page_nb)
return self._docs_by_id[docid].pages[page_nb]
return self._docs_by_id[obj_id] | Get a document or a page using its ID
Won't instantiate them if they are not yet available | Below is the the instruction that describes the task:
### Input:
Get a document or a page using its ID
Won't instantiate them if they are not yet available
### Response:
def get(self, obj_id):
"""
Get a document or a page using its ID
Won't instantiate them if they are not yet available
"""
if BasicPage.PAGE_ID_SEPARATOR in obj_id:
(docid, page_nb) = obj_id.split(BasicPage.PAGE_ID_SEPARATOR)
page_nb = int(page_nb)
return self._docs_by_id[docid].pages[page_nb]
return self._docs_by_id[obj_id] |
def default_loader(obj, defaults=None):
"""Loads default settings and check if there are overridings
exported as environment variables"""
defaults = defaults or {}
default_settings_values = {
key: value
for key, value in default_settings.__dict__.items() # noqa
if key.isupper()
}
all_keys = deduplicate(
list(defaults.keys()) + list(default_settings_values.keys())
)
for key in all_keys:
if not obj.exists(key):
value = defaults.get(key, default_settings_values.get(key))
obj.logger.debug("loading: %s:%s", key, value)
obj.set(key, value)
# start dotenv to get default env vars from there
# check overrides in env vars
default_settings.start_dotenv(obj)
# Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED
# Example: Flask and Django Extensions
env_switcher = defaults.get(
"ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
)
for key in all_keys:
if key not in default_settings_values.keys():
continue
env_value = obj.get_environ(
env_switcher if key == "ENV_FOR_DYNACONF" else key,
default="_not_found",
)
if env_value != "_not_found":
obj.logger.debug("overriding from envvar: %s:%s", key, env_value)
obj.set(key, env_value, tomlfy=True) | Loads default settings and check if there are overridings
exported as environment variables | Below is the the instruction that describes the task:
### Input:
Loads default settings and check if there are overridings
exported as environment variables
### Response:
def default_loader(obj, defaults=None):
"""Loads default settings and check if there are overridings
exported as environment variables"""
defaults = defaults or {}
default_settings_values = {
key: value
for key, value in default_settings.__dict__.items() # noqa
if key.isupper()
}
all_keys = deduplicate(
list(defaults.keys()) + list(default_settings_values.keys())
)
for key in all_keys:
if not obj.exists(key):
value = defaults.get(key, default_settings_values.get(key))
obj.logger.debug("loading: %s:%s", key, value)
obj.set(key, value)
# start dotenv to get default env vars from there
# check overrides in env vars
default_settings.start_dotenv(obj)
# Deal with cases where a custom ENV_SWITCHER_IS_PROVIDED
# Example: Flask and Django Extensions
env_switcher = defaults.get(
"ENV_SWITCHER_FOR_DYNACONF", "ENV_FOR_DYNACONF"
)
for key in all_keys:
if key not in default_settings_values.keys():
continue
env_value = obj.get_environ(
env_switcher if key == "ENV_FOR_DYNACONF" else key,
default="_not_found",
)
if env_value != "_not_found":
obj.logger.debug("overriding from envvar: %s:%s", key, env_value)
obj.set(key, env_value, tomlfy=True) |
def __parse_enrollments(self, user):
"""Parse user enrollments"""
enrollments = []
for company in user['companies']:
name = company['company_name']
org = self._organizations.get(name, None)
if not org:
org = Organization(name=name)
self._organizations[name] = org
start_date = MIN_PERIOD_DATE
end_date = MAX_PERIOD_DATE
if company['end_date']:
end_date = str_to_datetime(company['end_date'])
rol = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(rol)
return enrollments | Parse user enrollments | Below is the the instruction that describes the task:
### Input:
Parse user enrollments
### Response:
def __parse_enrollments(self, user):
"""Parse user enrollments"""
enrollments = []
for company in user['companies']:
name = company['company_name']
org = self._organizations.get(name, None)
if not org:
org = Organization(name=name)
self._organizations[name] = org
start_date = MIN_PERIOD_DATE
end_date = MAX_PERIOD_DATE
if company['end_date']:
end_date = str_to_datetime(company['end_date'])
rol = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(rol)
return enrollments |
def classifyParameters(self):
"""Return (arguments, options, outputs) tuple. Together, the
three lists contain all parameters (recursively fetched from
all parameter groups), classified into optional parameters,
required ones (with an index), and simple output parameters
(that would get written to a file using
--returnparameterfile). `arguments` contains the required
arguments, already sorted by index."""
arguments = []
options = []
outputs = []
for parameter in self.parameters():
if parameter.channel == 'output' and not parameter.isExternalType():
outputs.append(parameter)
elif parameter.index is not None:
arguments.append(parameter)
if parameter.flag is not None or parameter.longflag is not None:
logger.warning("Parameter '%s' has both index=%d and flag set." % (
parameter.identifier(), parameter.index))
elif parameter.flag or parameter.longflag:
options.append(parameter)
else:
logger.warning("Parameter '%s' cannot be passed (missing flag, longflag, or index)!" % parameter.name)
arguments.sort(key = lambda parameter: parameter.index)
return (arguments, options, outputs) | Return (arguments, options, outputs) tuple. Together, the
three lists contain all parameters (recursively fetched from
all parameter groups), classified into optional parameters,
required ones (with an index), and simple output parameters
(that would get written to a file using
--returnparameterfile). `arguments` contains the required
arguments, already sorted by index. | Below is the the instruction that describes the task:
### Input:
Return (arguments, options, outputs) tuple. Together, the
three lists contain all parameters (recursively fetched from
all parameter groups), classified into optional parameters,
required ones (with an index), and simple output parameters
(that would get written to a file using
--returnparameterfile). `arguments` contains the required
arguments, already sorted by index.
### Response:
def classifyParameters(self):
"""Return (arguments, options, outputs) tuple. Together, the
three lists contain all parameters (recursively fetched from
all parameter groups), classified into optional parameters,
required ones (with an index), and simple output parameters
(that would get written to a file using
--returnparameterfile). `arguments` contains the required
arguments, already sorted by index."""
arguments = []
options = []
outputs = []
for parameter in self.parameters():
if parameter.channel == 'output' and not parameter.isExternalType():
outputs.append(parameter)
elif parameter.index is not None:
arguments.append(parameter)
if parameter.flag is not None or parameter.longflag is not None:
logger.warning("Parameter '%s' has both index=%d and flag set." % (
parameter.identifier(), parameter.index))
elif parameter.flag or parameter.longflag:
options.append(parameter)
else:
logger.warning("Parameter '%s' cannot be passed (missing flag, longflag, or index)!" % parameter.name)
arguments.sort(key = lambda parameter: parameter.index)
return (arguments, options, outputs) |
def columnNameAt( self, index ):
"""
Returns the name of the column at the inputed index.
:param index | <int>
:return <str>
"""
columns = self.columns()
if ( 0 <= index and index < len(columns) ):
return columns[index]
return '' | Returns the name of the column at the inputed index.
:param index | <int>
:return <str> | Below is the the instruction that describes the task:
### Input:
Returns the name of the column at the inputed index.
:param index | <int>
:return <str>
### Response:
def columnNameAt( self, index ):
"""
Returns the name of the column at the inputed index.
:param index | <int>
:return <str>
"""
columns = self.columns()
if ( 0 <= index and index < len(columns) ):
return columns[index]
return '' |
def overlapping_spheres(shape: List[int], radius: int, porosity: float,
iter_max: int = 10, tol: float = 0.01):
r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
radius : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
iter_max : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image.
"""
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
ndim = (shape != 1).sum()
s_vol = ps_disk(radius).sum() if ndim == 2 else ps_ball(radius).sum()
bulk_vol = sp.prod(shape)
N = int(sp.ceil((1 - porosity)*bulk_vol/s_vol))
im = sp.random.random(size=shape)
# Helper functions for calculating porosity: phi = g(f(N))
f = lambda N: spim.distance_transform_edt(im > N/bulk_vol) < radius
g = lambda im: 1 - im.sum() / sp.prod(shape)
# # Newton's method for getting image porosity match the given
# w = 1.0 # Damping factor
# dN = 5 if ndim == 2 else 25 # Perturbation
# for i in range(iter_max):
# err = g(f(N)) - porosity
# d_err = (g(f(N+dN)) - g(f(N))) / dN
# if d_err == 0:
# break
# if abs(err) <= tol:
# break
# N2 = N - int(err/d_err) # xnew = xold - f/df
# N = w * N2 + (1-w) * N
# Bisection search: N is always undershoot (bc. of overlaps)
N_low, N_high = N, 4*N
for i in range(iter_max):
N = sp.mean([N_high, N_low], dtype=int)
err = g(f(N)) - porosity
if err > 0:
N_low = N
else:
N_high = N
if abs(err) <= tol:
break
return ~f(N) | r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
radius : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
iter_max : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image. | Below is the the instruction that describes the task:
### Input:
r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
radius : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
iter_max : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image.
### Response:
def overlapping_spheres(shape: List[int], radius: int, porosity: float,
iter_max: int = 10, tol: float = 0.01):
r"""
Generate a packing of overlapping mono-disperse spheres
Parameters
----------
shape : list
The size of the image to generate in [Nx, Ny, Nz] where Ni is the
number of voxels in the i-th direction.
radius : scalar
The radius of spheres in the packing.
porosity : scalar
The porosity of the final image, accurate to the given tolerance.
iter_max : int
Maximum number of iterations for the iterative algorithm that improves
the porosity of the final image to match the given value.
tol : float
Tolerance for porosity of the final image compared to the given value.
Returns
-------
image : ND-array
A boolean array with ``True`` values denoting the pore space
Notes
-----
This method can also be used to generate a dispersion of hollows by
treating ``porosity`` as solid volume fraction and inverting the
returned image.
"""
shape = sp.array(shape)
if sp.size(shape) == 1:
shape = sp.full((3, ), int(shape))
ndim = (shape != 1).sum()
s_vol = ps_disk(radius).sum() if ndim == 2 else ps_ball(radius).sum()
bulk_vol = sp.prod(shape)
N = int(sp.ceil((1 - porosity)*bulk_vol/s_vol))
im = sp.random.random(size=shape)
# Helper functions for calculating porosity: phi = g(f(N))
f = lambda N: spim.distance_transform_edt(im > N/bulk_vol) < radius
g = lambda im: 1 - im.sum() / sp.prod(shape)
# # Newton's method for getting image porosity match the given
# w = 1.0 # Damping factor
# dN = 5 if ndim == 2 else 25 # Perturbation
# for i in range(iter_max):
# err = g(f(N)) - porosity
# d_err = (g(f(N+dN)) - g(f(N))) / dN
# if d_err == 0:
# break
# if abs(err) <= tol:
# break
# N2 = N - int(err/d_err) # xnew = xold - f/df
# N = w * N2 + (1-w) * N
# Bisection search: N is always undershoot (bc. of overlaps)
N_low, N_high = N, 4*N
for i in range(iter_max):
N = sp.mean([N_high, N_low], dtype=int)
err = g(f(N)) - porosity
if err > 0:
N_low = N
else:
N_high = N
if abs(err) <= tol:
break
return ~f(N) |
def is_sqlatype_binary(coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type a binary type?
"""
# Several binary types inherit internally from _Binary, making that the
# easiest to check.
coltype = _coltype_to_typeengine(coltype)
# noinspection PyProtectedMember
return isinstance(coltype, sqltypes._Binary) | Is the SQLAlchemy column type a binary type? | Below is the the instruction that describes the task:
### Input:
Is the SQLAlchemy column type a binary type?
### Response:
def is_sqlatype_binary(coltype: Union[TypeEngine, VisitableType]) -> bool:
"""
Is the SQLAlchemy column type a binary type?
"""
# Several binary types inherit internally from _Binary, making that the
# easiest to check.
coltype = _coltype_to_typeengine(coltype)
# noinspection PyProtectedMember
return isinstance(coltype, sqltypes._Binary) |
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object) | Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info | Below is the the instruction that describes the task:
### Input:
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
### Response:
def ped_parser(self, family_info):
"""
Parse .ped formatted family info.
Add all family info to the parser object
Arguments:
family_info (iterator): An iterator with family info
"""
for line in family_info:
# Check if commented line or empty line:
if not line.startswith('#') and not all(c in whitespace for c in line.rstrip()):
splitted_line = line.rstrip().split('\t')
if len(splitted_line) != 6:
# Try to split the line on another symbol:
splitted_line = line.rstrip().split()
try:
self.check_line_length(splitted_line, 6)
except WrongLineFormat as e:
self.logger.error(e)
self.logger.info("Ped line: {0}".format(e.ped_line))
raise e
sample_dict = dict(zip(self.header, splitted_line))
family_id = sample_dict['family_id']
if sample_dict['family_id'] not in self.families:
self.families[family_id] = Family(family_id, {})
ind_object = self.get_individual(**sample_dict)
self.individuals[ind_object.individual_id] = ind_object
self.families[ind_object.family].add_individual(ind_object) |
def parse_header(header):
"""
Convert a list of the form `['fieldname:fieldtype:fieldsize',...]`
into a numpy composite dtype. The parser understands headers generated
by :func:`openquake.commonlib.writers.build_header`.
Here is an example:
>>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2'])
(['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))]))
:params header: a list of type descriptions
:returns: column names and the corresponding composite dtype
"""
triples = []
fields = []
for col_str in header:
col = col_str.strip().split(':')
n = len(col)
if n == 1: # default dtype and no shape
col = [col[0], 'float32', '']
elif n == 2:
if castable_to_int(col[1]): # default dtype and shape
col = [col[0], 'float32', col[1]]
else: # dtype and no shape
col = [col[0], col[1], '']
elif n > 3:
raise ValueError('Invalid column description: %s' % col_str)
field = col[0]
numpytype = col[1]
shape = () if not col[2].strip() else (int(col[2]),)
triples.append((field, numpytype, shape))
fields.append(field)
return fields, numpy.dtype(triples) | Convert a list of the form `['fieldname:fieldtype:fieldsize',...]`
into a numpy composite dtype. The parser understands headers generated
by :func:`openquake.commonlib.writers.build_header`.
Here is an example:
>>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2'])
(['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))]))
:params header: a list of type descriptions
:returns: column names and the corresponding composite dtype | Below is the the instruction that describes the task:
### Input:
Convert a list of the form `['fieldname:fieldtype:fieldsize',...]`
into a numpy composite dtype. The parser understands headers generated
by :func:`openquake.commonlib.writers.build_header`.
Here is an example:
>>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2'])
(['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))]))
:params header: a list of type descriptions
:returns: column names and the corresponding composite dtype
### Response:
def parse_header(header):
"""
Convert a list of the form `['fieldname:fieldtype:fieldsize',...]`
into a numpy composite dtype. The parser understands headers generated
by :func:`openquake.commonlib.writers.build_header`.
Here is an example:
>>> parse_header(['PGA:float32', 'PGV', 'avg:float32:2'])
(['PGA', 'PGV', 'avg'], dtype([('PGA', '<f4'), ('PGV', '<f4'), ('avg', '<f4', (2,))]))
:params header: a list of type descriptions
:returns: column names and the corresponding composite dtype
"""
triples = []
fields = []
for col_str in header:
col = col_str.strip().split(':')
n = len(col)
if n == 1: # default dtype and no shape
col = [col[0], 'float32', '']
elif n == 2:
if castable_to_int(col[1]): # default dtype and shape
col = [col[0], 'float32', col[1]]
else: # dtype and no shape
col = [col[0], col[1], '']
elif n > 3:
raise ValueError('Invalid column description: %s' % col_str)
field = col[0]
numpytype = col[1]
shape = () if not col[2].strip() else (int(col[2]),)
triples.append((field, numpytype, shape))
fields.append(field)
return fields, numpy.dtype(triples) |
def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False,
draw=False, remove=False, priority=None):
"""
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
"""
if data is None and self.data is not None:
data = self.data
else:
self.data = data
self.ax = ax
if data is None: # nothing to do if no data is given
return
self.no_auto_update = not (
not self.no_auto_update or not data.psy.no_auto_update)
data.psy.plotter = self
if not make_plot: # stop here if we shall not plot
return
self.logger.debug("Initializing plot...")
if remove:
self.logger.debug(" Removing old formatoptions...")
for fmto in self._fmtos:
try:
fmto.remove()
except Exception:
self.logger.debug(
"Could not remove %s while initializing", fmto.key,
exc_info=True)
if clear:
self.logger.debug(" Clearing axes...")
self.ax.clear()
self.cleared = True
# get the formatoptions. We sort them here by key to make sure that the
# order always stays the same (easier for debugging)
fmto_groups = self._grouped_fmtos(self._sorted_by_priority(
sorted(self._fmtos, key=lambda fmto: fmto.key)))
self.plot_data = self.data
self._updating = True
for fmto_priority, grouper in fmto_groups:
if priority is None or fmto_priority == priority:
self._plot_by_priority(fmto_priority, grouper,
initializing=True)
self._release_all(True) # finish the update
self.cleared = False
self.replot = False
self._initialized = True
self._updating = False
if draw is None:
draw = rcParams['auto_draw']
if draw:
self.draw()
if rcParams['auto_show']:
self.show() | Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END` | Below is the the instruction that describes the task:
### Input:
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
### Response:
def initialize_plot(self, data=None, ax=None, make_plot=True, clear=False,
draw=False, remove=False, priority=None):
"""
Initialize the plot for a data array
Parameters
----------
data: InteractiveArray or ArrayList, optional
Data object that shall be visualized.
- If not None and `plot` is True, the given data is visualized.
- If None and the :attr:`data` attribute is not None, the data in
the :attr:`data` attribute is visualized
- If both are None, nothing is done.
%(Plotter.parameters.ax|make_plot|clear)s
%(InteractiveBase.start_update.parameters.draw)s
remove: bool
If True, old effects by the formatoptions in this plotter are
undone first
priority: int
If given, initialize only the formatoption with the given priority.
This value must be out of :data:`START`, :data:`BEFOREPLOTTING` or
:data:`END`
"""
if data is None and self.data is not None:
data = self.data
else:
self.data = data
self.ax = ax
if data is None: # nothing to do if no data is given
return
self.no_auto_update = not (
not self.no_auto_update or not data.psy.no_auto_update)
data.psy.plotter = self
if not make_plot: # stop here if we shall not plot
return
self.logger.debug("Initializing plot...")
if remove:
self.logger.debug(" Removing old formatoptions...")
for fmto in self._fmtos:
try:
fmto.remove()
except Exception:
self.logger.debug(
"Could not remove %s while initializing", fmto.key,
exc_info=True)
if clear:
self.logger.debug(" Clearing axes...")
self.ax.clear()
self.cleared = True
# get the formatoptions. We sort them here by key to make sure that the
# order always stays the same (easier for debugging)
fmto_groups = self._grouped_fmtos(self._sorted_by_priority(
sorted(self._fmtos, key=lambda fmto: fmto.key)))
self.plot_data = self.data
self._updating = True
for fmto_priority, grouper in fmto_groups:
if priority is None or fmto_priority == priority:
self._plot_by_priority(fmto_priority, grouper,
initializing=True)
self._release_all(True) # finish the update
self.cleared = False
self.replot = False
self._initialized = True
self._updating = False
if draw is None:
draw = rcParams['auto_draw']
if draw:
self.draw()
if rcParams['auto_show']:
self.show() |
def calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
# test for use of function pre-v0.6.0 API change
if isinstance(a_ref, dict) or \
(isinstance(a_ref, pd.Series) and ('a_ref' in a_ref.keys())):
import warnings
warnings.warn('module_parameters detected as fourth positional'
+ ' argument of calcparams_desoto. calcparams_desoto'
+ ' will require one argument for each module model'
+ ' parameter in v0.7.0 and later', DeprecationWarning)
try:
module_parameters = a_ref
a_ref = module_parameters['a_ref']
I_L_ref = module_parameters['I_L_ref']
I_o_ref = module_parameters['I_o_ref']
R_sh_ref = module_parameters['R_sh_ref']
R_s = module_parameters['R_s']
except Exception as e:
raise e('Module parameters could not be extracted from fourth'
+ ' positional argument of calcparams_desoto. Check that'
+ ' parameters are from the CEC database and/or update'
+ ' your code for the new API for calcparams_desoto')
# Boltzmann constant in eV/K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
# In the equation for IL, the single factor effective_irradiance is
# used, in place of the product S*M in [1]. effective_irradiance is
# equivalent to the product of S (irradiance reaching a module's cells) *
# M (spectral adjustment factor) as described in [1].
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = (I_o_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))))
# Note that the equation for Rsh differs from [1]. In [1] Rsh is given as
# Rsh = Rsh_ref * (S_ref / S) where S is broadband irradiance reaching
# the module's cells. If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier, i.e.,
# spectral_modifier = 1.0.
Rsh = R_sh_ref * (irrad_ref / effective_irradiance)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth | Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4] | Below is the the instruction that describes the task:
### Input:
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
### Response:
def calcparams_desoto(effective_irradiance, temp_cell,
alpha_sc, a_ref, I_L_ref, I_o_ref, R_sh_ref, R_s,
EgRef=1.121, dEgdT=-0.0002677,
irrad_ref=1000, temp_ref=25):
'''
Calculates five parameter values for the single diode equation at
effective irradiance and cell temperature using the De Soto et al.
model described in [1]. The five values returned by calcparams_desoto
can be used by singlediode to calculate an IV curve.
Parameters
----------
effective_irradiance : numeric
The irradiance (W/m2) that is converted to photocurrent.
temp_cell : numeric
The average cell temperature of cells within a module in C.
alpha_sc : float
The short-circuit current temperature coefficient of the
module in units of A/C.
a_ref : float
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at reference
conditions, in units of V.
I_L_ref : float
The light-generated current (or photocurrent) at reference conditions,
in amperes.
I_o_ref : float
The dark or diode reverse saturation current at reference conditions,
in amperes.
R_sh_ref : float
The shunt resistance at reference conditions, in ohms.
R_s : float
The series resistance at reference conditions, in ohms.
EgRef : float
The energy bandgap at reference temperature in units of eV.
1.121 eV for crystalline silicon. EgRef must be >0. For parameters
from the SAM CEC module database, EgRef=1.121 is implicit for all
cell types in the parameter estimation algorithm used by NREL.
dEgdT : float
The temperature dependence of the energy bandgap at reference
conditions in units of 1/K. May be either a scalar value
(e.g. -0.0002677 as in [1]) or a DataFrame (this may be useful if
dEgdT is a modeled as a function of temperature). For parameters from
the SAM CEC module database, dEgdT=-0.0002677 is implicit for all cell
types in the parameter estimation algorithm used by NREL.
irrad_ref : float (optional, default=1000)
Reference irradiance in W/m^2.
temp_ref : float (optional, default=25)
Reference cell temperature in C.
Returns
-------
Tuple of the following results:
photocurrent : numeric
Light-generated current in amperes
saturation_current : numeric
Diode saturation curent in amperes
resistance_series : float
Series resistance in ohms
resistance_shunt : numeric
Shunt resistance in ohms
nNsVth : numeric
The product of the usual diode ideality factor (n, unitless),
number of cells in series (Ns), and cell thermal voltage at
specified effective irradiance and cell temperature.
References
----------
[1] W. De Soto et al., "Improvement and validation of a model for
photovoltaic array performance", Solar Energy, vol 80, pp. 78-88,
2006.
[2] System Advisor Model web page. https://sam.nrel.gov.
[3] A. Dobos, "An Improved Coefficient Calculator for the California
Energy Commission 6 Parameter Photovoltaic Module Model", Journal of
Solar Energy Engineering, vol 134, 2012.
[4] O. Madelung, "Semiconductors: Data Handbook, 3rd ed." ISBN
3-540-40488-0
See Also
--------
singlediode
retrieve_sam
Notes
-----
If the reference parameters in the ModuleParameters struct are read
from a database or library of parameters (e.g. System Advisor
Model), it is important to use the same EgRef and dEgdT values that
were used to generate the reference parameters, regardless of the
actual bandgap characteristics of the semiconductor. For example, in
the case of the System Advisor Model library, created as described
in [3], EgRef and dEgdT for all modules were 1.121 and -0.0002677,
respectively.
This table of reference bandgap energies (EgRef), bandgap energy
temperature dependence (dEgdT), and "typical" airmass response (M)
is provided purely as reference to those who may generate their own
reference module parameters (a_ref, IL_ref, I0_ref, etc.) based upon
the various PV semiconductors. Again, we stress the importance of
using identical EgRef and dEgdT when generation reference parameters
and modifying the reference parameters (for irradiance, temperature,
and airmass) per DeSoto's equations.
Crystalline Silicon (Si):
* EgRef = 1.121
* dEgdT = -0.0002677
>>> M = np.polyval([-1.26E-4, 2.816E-3, -0.024459, 0.086257, 0.9181],
... AMa) # doctest: +SKIP
Source: [1]
Cadmium Telluride (CdTe):
* EgRef = 1.475
* dEgdT = -0.0003
>>> M = np.polyval([-2.46E-5, 9.607E-4, -0.0134, 0.0716, 0.9196],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium diSelenide (CIS):
* EgRef = 1.010
* dEgdT = -0.00011
>>> M = np.polyval([-3.74E-5, 0.00125, -0.01462, 0.0718, 0.9210],
... AMa) # doctest: +SKIP
Source: [4]
Copper Indium Gallium diSelenide (CIGS):
* EgRef = 1.15
* dEgdT = ????
>>> M = np.polyval([-9.07E-5, 0.0022, -0.0202, 0.0652, 0.9417],
... AMa) # doctest: +SKIP
Source: Wikipedia
Gallium Arsenide (GaAs):
* EgRef = 1.424
* dEgdT = -0.000433
* M = unknown
Source: [4]
'''
# test for use of function pre-v0.6.0 API change
if isinstance(a_ref, dict) or \
(isinstance(a_ref, pd.Series) and ('a_ref' in a_ref.keys())):
import warnings
warnings.warn('module_parameters detected as fourth positional'
+ ' argument of calcparams_desoto. calcparams_desoto'
+ ' will require one argument for each module model'
+ ' parameter in v0.7.0 and later', DeprecationWarning)
try:
module_parameters = a_ref
a_ref = module_parameters['a_ref']
I_L_ref = module_parameters['I_L_ref']
I_o_ref = module_parameters['I_o_ref']
R_sh_ref = module_parameters['R_sh_ref']
R_s = module_parameters['R_s']
except Exception as e:
raise e('Module parameters could not be extracted from fourth'
+ ' positional argument of calcparams_desoto. Check that'
+ ' parameters are from the CEC database and/or update'
+ ' your code for the new API for calcparams_desoto')
# Boltzmann constant in eV/K
k = 8.617332478e-05
# reference temperature
Tref_K = temp_ref + 273.15
Tcell_K = temp_cell + 273.15
E_g = EgRef * (1 + dEgdT*(Tcell_K - Tref_K))
nNsVth = a_ref * (Tcell_K / Tref_K)
# In the equation for IL, the single factor effective_irradiance is
# used, in place of the product S*M in [1]. effective_irradiance is
# equivalent to the product of S (irradiance reaching a module's cells) *
# M (spectral adjustment factor) as described in [1].
IL = effective_irradiance / irrad_ref * \
(I_L_ref + alpha_sc * (Tcell_K - Tref_K))
I0 = (I_o_ref * ((Tcell_K / Tref_K) ** 3) *
(np.exp(EgRef / (k*(Tref_K)) - (E_g / (k*(Tcell_K))))))
# Note that the equation for Rsh differs from [1]. In [1] Rsh is given as
# Rsh = Rsh_ref * (S_ref / S) where S is broadband irradiance reaching
# the module's cells. If desired this model behavior can be duplicated
# by applying reflection and soiling losses to broadband plane of array
# irradiance and not applying a spectral loss modifier, i.e.,
# spectral_modifier = 1.0.
Rsh = R_sh_ref * (irrad_ref / effective_irradiance)
Rs = R_s
return IL, I0, Rs, Rsh, nNsVth |
async def update_flags(self, messages: Sequence[MessageT],
flag_set: FrozenSet[Flag], mode: FlagOp) -> None:
"""Update the permanent flags of each messages.
Args:
messages: The message objects.
flag_set: The set of flags for the update operation.
flag_op: The mode to change the flags.
"""
... | Update the permanent flags of each messages.
Args:
messages: The message objects.
flag_set: The set of flags for the update operation.
flag_op: The mode to change the flags. | Below is the the instruction that describes the task:
### Input:
Update the permanent flags of each messages.
Args:
messages: The message objects.
flag_set: The set of flags for the update operation.
flag_op: The mode to change the flags.
### Response:
async def update_flags(self, messages: Sequence[MessageT],
flag_set: FrozenSet[Flag], mode: FlagOp) -> None:
"""Update the permanent flags of each messages.
Args:
messages: The message objects.
flag_set: The set of flags for the update operation.
flag_op: The mode to change the flags.
"""
... |
def output_notebook(inline=True, logo=False):
"""
Load the notebook extension
Parameters
----------
inline : boolean (optional)
Whether to inline JS code or load it from a CDN
logo : boolean (optional)
Whether to show the logo(s)
"""
try:
import hvplot
except ImportError:
raise ImportError("The intake plotting API requires hvplot."
"hvplot may be installed with:\n\n"
"`conda install -c pyviz hvplot` or "
"`pip install hvplot`.")
import holoviews as hv
return hv.extension('bokeh', inline=inline, logo=logo) | Load the notebook extension
Parameters
----------
inline : boolean (optional)
Whether to inline JS code or load it from a CDN
logo : boolean (optional)
Whether to show the logo(s) | Below is the the instruction that describes the task:
### Input:
Load the notebook extension
Parameters
----------
inline : boolean (optional)
Whether to inline JS code or load it from a CDN
logo : boolean (optional)
Whether to show the logo(s)
### Response:
def output_notebook(inline=True, logo=False):
"""
Load the notebook extension
Parameters
----------
inline : boolean (optional)
Whether to inline JS code or load it from a CDN
logo : boolean (optional)
Whether to show the logo(s)
"""
try:
import hvplot
except ImportError:
raise ImportError("The intake plotting API requires hvplot."
"hvplot may be installed with:\n\n"
"`conda install -c pyviz hvplot` or "
"`pip install hvplot`.")
import holoviews as hv
return hv.extension('bokeh', inline=inline, logo=logo) |
def dec2dms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
sign = np.copysign(1.0,dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
deg = int(deg * sign)
return (deg, minute, second) | ADW: This should really be replaced by astropy | Below is the the instruction that describes the task:
### Input:
ADW: This should really be replaced by astropy
### Response:
def dec2dms(dec):
"""
ADW: This should really be replaced by astropy
"""
DEGREE = 360.
HOUR = 24.
MINUTE = 60.
SECOND = 3600.
dec = float(dec)
sign = np.copysign(1.0,dec)
fdeg = np.abs(dec)
deg = int(fdeg)
fminute = (fdeg - deg)*MINUTE
minute = int(fminute)
second = (fminute - minute)*MINUTE
deg = int(deg * sign)
return (deg, minute, second) |
def positionlesscrop(self,x,y,sheet_coord_system):
"""
Return the correct slice for a weights/mask matrix at this
ConnectionField's location on the sheet (i.e. for getting the
correct submatrix of the weights or mask in case the unit is
near the edge of the sheet).
"""
slice_inds = self.findinputslice(
sheet_coord_system.sheet2matrixidx(x,y),
self.shape_on_sheet(), sheet_coord_system.shape)
self.set(slice_inds) | Return the correct slice for a weights/mask matrix at this
ConnectionField's location on the sheet (i.e. for getting the
correct submatrix of the weights or mask in case the unit is
near the edge of the sheet). | Below is the the instruction that describes the task:
### Input:
Return the correct slice for a weights/mask matrix at this
ConnectionField's location on the sheet (i.e. for getting the
correct submatrix of the weights or mask in case the unit is
near the edge of the sheet).
### Response:
def positionlesscrop(self,x,y,sheet_coord_system):
"""
Return the correct slice for a weights/mask matrix at this
ConnectionField's location on the sheet (i.e. for getting the
correct submatrix of the weights or mask in case the unit is
near the edge of the sheet).
"""
slice_inds = self.findinputslice(
sheet_coord_system.sheet2matrixidx(x,y),
self.shape_on_sheet(), sheet_coord_system.shape)
self.set(slice_inds) |
def timestamp_YmdHMS(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
)) | Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT. | Below is the the instruction that describes the task:
### Input:
Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
### Response:
def timestamp_YmdHMS(value):
"""Convert timestamp string to time in seconds since epoch.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
The time in seconds since epoch as an integer.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return int(calendar.timegm((
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0)
)) |
def create_secret_link(self, title, description=None, expires_at=None):
"""Create a secret link from request."""
self.link = SecretLink.create(
title,
self.receiver,
extra_data=dict(recid=self.recid),
description=description,
expires_at=expires_at,
)
return self.link | Create a secret link from request. | Below is the the instruction that describes the task:
### Input:
Create a secret link from request.
### Response:
def create_secret_link(self, title, description=None, expires_at=None):
"""Create a secret link from request."""
self.link = SecretLink.create(
title,
self.receiver,
extra_data=dict(recid=self.recid),
description=description,
expires_at=expires_at,
)
return self.link |
def _handle_ticker(self, ts, chan_id, data):
"""
Adds received ticker data to self.tickers dict, filed under its channel
id.
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: tuple or list of data received via wss
:return:
"""
pair = self.channel_labels[chan_id][1]['pair']
entry = (*data, ts)
self.data_q.put(('ticker', pair, entry)) | Adds received ticker data to self.tickers dict, filed under its channel
id.
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: tuple or list of data received via wss
:return: | Below is the the instruction that describes the task:
### Input:
Adds received ticker data to self.tickers dict, filed under its channel
id.
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: tuple or list of data received via wss
:return:
### Response:
def _handle_ticker(self, ts, chan_id, data):
"""
Adds received ticker data to self.tickers dict, filed under its channel
id.
:param ts: timestamp, declares when data was received by the client
:param chan_id: int, channel id
:param data: tuple or list of data received via wss
:return:
"""
pair = self.channel_labels[chan_id][1]['pair']
entry = (*data, ts)
self.data_q.put(('ticker', pair, entry)) |
def ensure_alt_ids_in_nest_spec_are_ints(nest_spec, list_elements):
"""
Ensures that the alternative id's in `nest_spec` are integers. Raises a
helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of lists of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
Returns
-------
None.
"""
try:
assert all([isinstance(x, int) for x in list_elements])
except AssertionError:
msg = "All elements of the nest_spec values should be integers"
raise ValueError(msg)
return None | Ensures that the alternative id's in `nest_spec` are integers. Raises a
helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of lists of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
Returns
-------
None. | Below is the the instruction that describes the task:
### Input:
Ensures that the alternative id's in `nest_spec` are integers. Raises a
helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of lists of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
Returns
-------
None.
### Response:
def ensure_alt_ids_in_nest_spec_are_ints(nest_spec, list_elements):
"""
Ensures that the alternative id's in `nest_spec` are integers. Raises a
helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of lists of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
Returns
-------
None.
"""
try:
assert all([isinstance(x, int) for x in list_elements])
except AssertionError:
msg = "All elements of the nest_spec values should be integers"
raise ValueError(msg)
return None |
def subontology(self, minimal=False):
"""
Generates a sub-ontology based on associations
"""
return self.ontology.subontology(self.objects, minimal=minimal) | Generates a sub-ontology based on associations | Below is the the instruction that describes the task:
### Input:
Generates a sub-ontology based on associations
### Response:
def subontology(self, minimal=False):
"""
Generates a sub-ontology based on associations
"""
return self.ontology.subontology(self.objects, minimal=minimal) |
def load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding YAMLDict objects.
"""
loader = YAMLDictLoader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose() | Parse all YAML documents in a stream
and produce corresponding YAMLDict objects. | Below is the the instruction that describes the task:
### Input:
Parse all YAML documents in a stream
and produce corresponding YAMLDict objects.
### Response:
def load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding YAMLDict objects.
"""
loader = YAMLDictLoader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose() |
def connect(self, interface=None):
"""Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError:
"""
if self._simulate:
return True
if not interface:
match = self._autodiscover_usb()
self._log.debug("Auto-discovered USB port: %s" % match)
else:
self.USB_PORT = interface
try:
self._conn = serial.Serial(self.USB_PORT, baudrate=self.BAUDRATE,
bytesize=self.BYTE_SIZE,
parity=self.PARITY,
stopbits=self.STOPBITS,
timeout=self.TIMEOUT)
except serial.serialutil.SerialException as e:
raise SerialConnectionError(str(e))
self._log.debug("Serial connection set")
if not self._conn.isOpen():
self._conn.open()
self._log.debug("Serial connection opened")
return True | Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError: | Below is the the instruction that describes the task:
### Input:
Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError:
### Response:
def connect(self, interface=None):
"""Connect to the USB for the hottop.
Attempt to discover the USB port used for the Hottop and then form a
connection using the serial library.
:returns: bool
:raises SerialConnectionError:
"""
if self._simulate:
return True
if not interface:
match = self._autodiscover_usb()
self._log.debug("Auto-discovered USB port: %s" % match)
else:
self.USB_PORT = interface
try:
self._conn = serial.Serial(self.USB_PORT, baudrate=self.BAUDRATE,
bytesize=self.BYTE_SIZE,
parity=self.PARITY,
stopbits=self.STOPBITS,
timeout=self.TIMEOUT)
except serial.serialutil.SerialException as e:
raise SerialConnectionError(str(e))
self._log.debug("Serial connection set")
if not self._conn.isOpen():
self._conn.open()
self._log.debug("Serial connection opened")
return True |
def regex_query(expression):
"""Apply regular expression to result of expression."""
def _regex(index, expression=expression):
"""Return store key for documents that satisfy expression."""
pattern = re.compile(expression)
return [
store_key
for value, store_keys
in index.get_index().items()
if (isinstance(value, six.string_types)
and re.match(pattern, value))
for store_key in store_keys
]
return _regex | Apply regular expression to result of expression. | Below is the the instruction that describes the task:
### Input:
Apply regular expression to result of expression.
### Response:
def regex_query(expression):
"""Apply regular expression to result of expression."""
def _regex(index, expression=expression):
"""Return store key for documents that satisfy expression."""
pattern = re.compile(expression)
return [
store_key
for value, store_keys
in index.get_index().items()
if (isinstance(value, six.string_types)
and re.match(pattern, value))
for store_key in store_keys
]
return _regex |
def create_schema(self, connection):
"""
Will create the schema in the database
"""
if '.' not in self.table:
return
query = 'CREATE SCHEMA IF NOT EXISTS {schema_name};'.format(schema_name=self.table.split('.')[0])
connection.cursor().execute(query) | Will create the schema in the database | Below is the the instruction that describes the task:
### Input:
Will create the schema in the database
### Response:
def create_schema(self, connection):
"""
Will create the schema in the database
"""
if '.' not in self.table:
return
query = 'CREATE SCHEMA IF NOT EXISTS {schema_name};'.format(schema_name=self.table.split('.')[0])
connection.cursor().execute(query) |
def _numeric_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean(),
]
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix) | Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique) | Below is the the instruction that describes the task:
### Input:
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
### Response:
def _numeric_summary(arg, exact_nunique=False, prefix=None):
"""
Compute a set of summary metrics from the input numeric value expression
Parameters
----------
arg : numeric value expression
exact_nunique : boolean, default False
prefix : string, default None
String prefix for metric names
Returns
-------
summary : (count, # nulls, min, max, sum, mean, nunique)
"""
metrics = [
arg.count(),
arg.isnull().sum().name('nulls'),
arg.min(),
arg.max(),
arg.sum(),
arg.mean(),
]
if exact_nunique:
unique_metric = arg.nunique().name('nunique')
else:
unique_metric = arg.approx_nunique().name('approx_nunique')
metrics.append(unique_metric)
return _wrap_summary_metrics(metrics, prefix) |
def update_positions(tree, positions):
"""Updates the tree with new positions"""
for step, pos in zip(tree.findall('step'), positions):
for key in sorted(pos):
value = pos.get(key)
if key.endswith("-rel"):
abs_key = key[:key.index("-rel")]
if value is not None:
els = tree.findall(".//*[@id='" + value + "']")
for el in els :
pos[abs_key] = num(el.get(abs_key)) + pos.get(abs_key)
step.attrib[abs_key] = str(pos.get(abs_key))
else:
step.attrib[key] = str(pos[key])
if 'hovercraft-path' in step.attrib:
del step.attrib['hovercraft-path'] | Updates the tree with new positions | Below is the the instruction that describes the task:
### Input:
Updates the tree with new positions
### Response:
def update_positions(tree, positions):
"""Updates the tree with new positions"""
for step, pos in zip(tree.findall('step'), positions):
for key in sorted(pos):
value = pos.get(key)
if key.endswith("-rel"):
abs_key = key[:key.index("-rel")]
if value is not None:
els = tree.findall(".//*[@id='" + value + "']")
for el in els :
pos[abs_key] = num(el.get(abs_key)) + pos.get(abs_key)
step.attrib[abs_key] = str(pos.get(abs_key))
else:
step.attrib[key] = str(pos[key])
if 'hovercraft-path' in step.attrib:
del step.attrib['hovercraft-path'] |
def has_operator_manifest(self):
"""
Check if Dockerfile sets the operator manifest label
:return: bool
"""
dockerfile = df_parser(self.workflow.builder.df_path, workflow=self.workflow)
labels = Labels(dockerfile.labels)
try:
_, operator_label = labels.get_name_and_value(Labels.LABEL_TYPE_OPERATOR_MANIFESTS)
except KeyError:
operator_label = 'false'
return operator_label.lower() == 'true' | Check if Dockerfile sets the operator manifest label
:return: bool | Below is the the instruction that describes the task:
### Input:
Check if Dockerfile sets the operator manifest label
:return: bool
### Response:
def has_operator_manifest(self):
"""
Check if Dockerfile sets the operator manifest label
:return: bool
"""
dockerfile = df_parser(self.workflow.builder.df_path, workflow=self.workflow)
labels = Labels(dockerfile.labels)
try:
_, operator_label = labels.get_name_and_value(Labels.LABEL_TYPE_OPERATOR_MANIFESTS)
except KeyError:
operator_label = 'false'
return operator_label.lower() == 'true' |
def set_iomem(self, iomem):
"""
Set I/O memory size for this router.
:param iomem: I/O memory size
"""
yield from self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
old_iomem=self._iomem,
new_iomem=iomem))
self._iomem = iomem | Set I/O memory size for this router.
:param iomem: I/O memory size | Below is the the instruction that describes the task:
### Input:
Set I/O memory size for this router.
:param iomem: I/O memory size
### Response:
def set_iomem(self, iomem):
"""
Set I/O memory size for this router.
:param iomem: I/O memory size
"""
yield from self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
old_iomem=self._iomem,
new_iomem=iomem))
self._iomem = iomem |
def save(self):
"""
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
"""
project_paths = OrderedDict()
for project, d in OrderedDict(self).items():
if isinstance(d, dict):
project_path = d['root']
fname = osp.join(project_path, '.project', '.project.yml')
if not osp.exists(osp.dirname(fname)):
os.makedirs(osp.dirname(fname))
if osp.exists(fname):
os.rename(fname, fname + '~')
d = self.rel_paths(copy.deepcopy(d))
safe_dump(d, fname, default_flow_style=False)
project_paths[project] = project_path
else:
project_paths = self.project_paths[project]
self.project_paths = project_paths
safe_dump(project_paths, self.all_projects, default_flow_style=False) | Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive | Below is the the instruction that describes the task:
### Input:
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
### Response:
def save(self):
"""
Save the project configuration
This method dumps the configuration for each project and the project
paths (see the :attr:`all_projects` attribute) to the hard drive
"""
project_paths = OrderedDict()
for project, d in OrderedDict(self).items():
if isinstance(d, dict):
project_path = d['root']
fname = osp.join(project_path, '.project', '.project.yml')
if not osp.exists(osp.dirname(fname)):
os.makedirs(osp.dirname(fname))
if osp.exists(fname):
os.rename(fname, fname + '~')
d = self.rel_paths(copy.deepcopy(d))
safe_dump(d, fname, default_flow_style=False)
project_paths[project] = project_path
else:
project_paths = self.project_paths[project]
self.project_paths = project_paths
safe_dump(project_paths, self.all_projects, default_flow_style=False) |
def _attach(self, instruction, qargs, cargs):
"""DEPRECATED after 0.8"""
self.append(instruction, qargs, cargs) | DEPRECATED after 0.8 | Below is the the instruction that describes the task:
### Input:
DEPRECATED after 0.8
### Response:
def _attach(self, instruction, qargs, cargs):
"""DEPRECATED after 0.8"""
self.append(instruction, qargs, cargs) |
def get_list_subtasks(client, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
params = {
'list_id' : int(list_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() | Gets subtasks for the list with given ID | Below is the the instruction that describes the task:
### Input:
Gets subtasks for the list with given ID
### Response:
def get_list_subtasks(client, list_id, completed=False):
''' Gets subtasks for the list with given ID '''
params = {
'list_id' : int(list_id),
'completed' : completed,
}
response = client.authenticated_request(client.api.Endpoints.SUBTASKS, params=params)
return response.json() |
def disconnect_async(self, conn_id, callback):
"""Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
"""
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout'))
topics = context['topics']
disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'}
self.client.publish(topics.action, disconn_message) | Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout. | Below is the the instruction that describes the task:
### Input:
Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
### Response:
def disconnect_async(self, conn_id, callback):
"""Asynchronously disconnect from a device that has previously been connected
Args:
conn_id (int): a unique identifier for this connection on the DeviceManager
that owns this adapter.
callback (callable): A function called as callback(conn_id, adapter_id, success, failure_reason)
when the disconnection finishes. Disconnection can only either succeed or timeout.
"""
try:
context = self.conns.get_context(conn_id)
except ArgumentError:
callback(conn_id, self.id, False, "Could not find connection information")
return
self.conns.begin_disconnection(conn_id, callback, self.get_config('default_timeout'))
topics = context['topics']
disconn_message = {'key': context['key'], 'client': self.name, 'type': 'command', 'operation': 'disconnect'}
self.client.publish(topics.action, disconn_message) |
def predict_normal_binding(job, binding_result, transgened_files, allele, peplen, univ_options,
mhc_options):
"""
Predict the binding score for the normal counterparts of the peptides in mhc_dict and then
return the results in a properly formatted structure.
:param str binding_result: The results from running predict_mhci_binding or
predict_mhcii_binding on a single allele
:param dict transgened_files: A dictionary containing the jobstore IDs for "T_<peplen>_mer.faa"
and "N_<peplen>_mer.faa"
:param str allele: The allele to get binding for
:param str peplen: The peptide length
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhc_options: Options specific to mhci or mhcii binding predictions
:return: A fully filled out mhc_dict with normal information
output_dict:
|- 'tumor': fsID
+- 'normal': fsID or (fsID, str) -- Depending on MHCI or MHCII
:rtype: dict
"""
work_dir = os.getcwd()
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
input_files = get_files_from_filestore(job, transgened_files, work_dir)
iars = read_fastas(input_files)
if peplen == '15': # MHCII
mhc_file = job.fileStore.readGlobalFile(binding_result[0],
os.path.join(work_dir, 'mhci_results'))
predictor = binding_result[1]
core_col = None # Variable to hold the column number with the core
if predictor is None:
return {'tumor': None,
'normal': None,
'predictor': None}
elif predictor == 'Consensus':
results = _process_consensus_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhcii_binding, peptfile, allele,
univ_options, mhc_options, disk='100M',
memory='100M', cores=1).rv(),
'predictor': 'Consensus'}
elif predictor == 'Sturniolo':
results = _process_sturniolo_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhcii_binding, peptfile, allele,
univ_options, mhc_options, disk='100M',
memory='100M', cores=1).rv(),
'predictor': 'Sturniolo'}
elif predictor == 'netMHCIIpan':
results = _process_net_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_netmhcii_binding, peptfile, allele,
univ_options, mhc_options['netmhciipan'],
disk='100M', memory='100M',
cores=1).rv(),
'predictor': 'netMHCIIpan'}
else:
raise RuntimeError('Shouldn\'t ever see this!!!')
else: # MHCI
mhc_file = job.fileStore.readGlobalFile(binding_result,
os.path.join(work_dir, 'mhci_results'))
results = _process_mhci(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
job.fileStore.logToMaster('Ran predict_normal_binding on %s for allele %s and length %s '
'successfully' % (univ_options['patient'], allele, peplen))
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhci_binding, peptfile, allele, peplen,
univ_options, mhc_options, disk='100M', memory='100M',
cores=1).rv()} | Predict the binding score for the normal counterparts of the peptides in mhc_dict and then
return the results in a properly formatted structure.
:param str binding_result: The results from running predict_mhci_binding or
predict_mhcii_binding on a single allele
:param dict transgened_files: A dictionary containing the jobstore IDs for "T_<peplen>_mer.faa"
and "N_<peplen>_mer.faa"
:param str allele: The allele to get binding for
:param str peplen: The peptide length
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhc_options: Options specific to mhci or mhcii binding predictions
:return: A fully filled out mhc_dict with normal information
output_dict:
|- 'tumor': fsID
+- 'normal': fsID or (fsID, str) -- Depending on MHCI or MHCII
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Predict the binding score for the normal counterparts of the peptides in mhc_dict and then
return the results in a properly formatted structure.
:param str binding_result: The results from running predict_mhci_binding or
predict_mhcii_binding on a single allele
:param dict transgened_files: A dictionary containing the jobstore IDs for "T_<peplen>_mer.faa"
and "N_<peplen>_mer.faa"
:param str allele: The allele to get binding for
:param str peplen: The peptide length
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhc_options: Options specific to mhci or mhcii binding predictions
:return: A fully filled out mhc_dict with normal information
output_dict:
|- 'tumor': fsID
+- 'normal': fsID or (fsID, str) -- Depending on MHCI or MHCII
:rtype: dict
### Response:
def predict_normal_binding(job, binding_result, transgened_files, allele, peplen, univ_options,
mhc_options):
"""
Predict the binding score for the normal counterparts of the peptides in mhc_dict and then
return the results in a properly formatted structure.
:param str binding_result: The results from running predict_mhci_binding or
predict_mhcii_binding on a single allele
:param dict transgened_files: A dictionary containing the jobstore IDs for "T_<peplen>_mer.faa"
and "N_<peplen>_mer.faa"
:param str allele: The allele to get binding for
:param str peplen: The peptide length
:param dict univ_options: Dict of universal options used by almost all tools
:param dict mhc_options: Options specific to mhci or mhcii binding predictions
:return: A fully filled out mhc_dict with normal information
output_dict:
|- 'tumor': fsID
+- 'normal': fsID or (fsID, str) -- Depending on MHCI or MHCII
:rtype: dict
"""
work_dir = os.getcwd()
results = pandas.DataFrame(columns=['allele', 'pept', 'tumor_pred', 'core'])
input_files = get_files_from_filestore(job, transgened_files, work_dir)
iars = read_fastas(input_files)
if peplen == '15': # MHCII
mhc_file = job.fileStore.readGlobalFile(binding_result[0],
os.path.join(work_dir, 'mhci_results'))
predictor = binding_result[1]
core_col = None # Variable to hold the column number with the core
if predictor is None:
return {'tumor': None,
'normal': None,
'predictor': None}
elif predictor == 'Consensus':
results = _process_consensus_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhcii_binding, peptfile, allele,
univ_options, mhc_options, disk='100M',
memory='100M', cores=1).rv(),
'predictor': 'Consensus'}
elif predictor == 'Sturniolo':
results = _process_sturniolo_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhcii_binding, peptfile, allele,
univ_options, mhc_options, disk='100M',
memory='100M', cores=1).rv(),
'predictor': 'Sturniolo'}
elif predictor == 'netMHCIIpan':
results = _process_net_mhcii(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_netmhcii_binding, peptfile, allele,
univ_options, mhc_options['netmhciipan'],
disk='100M', memory='100M',
cores=1).rv(),
'predictor': 'netMHCIIpan'}
else:
raise RuntimeError('Shouldn\'t ever see this!!!')
else: # MHCI
mhc_file = job.fileStore.readGlobalFile(binding_result,
os.path.join(work_dir, 'mhci_results'))
results = _process_mhci(mhc_file)
results, peptides = _get_normal_peptides(job, results, iars, peplen)
with open('peptides.faa', 'w') as pfile:
for pept in peptides:
print('>', pept, '\n', pept, sep='', file=pfile)
peptfile = job.fileStore.writeGlobalFile(pfile.name)
with open('results.json', 'w') as rj:
json.dump(results.to_json(), rj)
job.fileStore.logToMaster('Ran predict_normal_binding on %s for allele %s and length %s '
'successfully' % (univ_options['patient'], allele, peplen))
return {'tumor': job.fileStore.writeGlobalFile(rj.name),
'normal': job.addChildJobFn(predict_mhci_binding, peptfile, allele, peplen,
univ_options, mhc_options, disk='100M', memory='100M',
cores=1).rv()} |
def compute_tab_title(self, vte):
"""Abbreviate and cut vte terminal title when necessary
"""
vte_title = vte.get_window_title() or _("Terminal")
try:
current_directory = vte.get_current_directory()
if self.abbreviate and vte_title.endswith(current_directory):
parts = current_directory.split('/')
parts = [s[:1] for s in parts[:-1]] + [parts[-1]]
vte_title = vte_title[:len(vte_title) - len(current_directory)] + '/'.join(parts)
except OSError:
pass
return TabNameUtils.shorten(vte_title, self.settings) | Abbreviate and cut vte terminal title when necessary | Below is the the instruction that describes the task:
### Input:
Abbreviate and cut vte terminal title when necessary
### Response:
def compute_tab_title(self, vte):
"""Abbreviate and cut vte terminal title when necessary
"""
vte_title = vte.get_window_title() or _("Terminal")
try:
current_directory = vte.get_current_directory()
if self.abbreviate and vte_title.endswith(current_directory):
parts = current_directory.split('/')
parts = [s[:1] for s in parts[:-1]] + [parts[-1]]
vte_title = vte_title[:len(vte_title) - len(current_directory)] + '/'.join(parts)
except OSError:
pass
return TabNameUtils.shorten(vte_title, self.settings) |
def get(self, timeout=10):
"""get() -> {'id': 32-byte-md5, 'body': msg-body}"""
req = self.req({'op': 'GET', 'timeout': timeout})
if req.status_code != 200:
return None
result = req.json()
if result.get('status') != 'ok':
return False
return result | get() -> {'id': 32-byte-md5, 'body': msg-body} | Below is the the instruction that describes the task:
### Input:
get() -> {'id': 32-byte-md5, 'body': msg-body}
### Response:
def get(self, timeout=10):
"""get() -> {'id': 32-byte-md5, 'body': msg-body}"""
req = self.req({'op': 'GET', 'timeout': timeout})
if req.status_code != 200:
return None
result = req.json()
if result.get('status') != 'ok':
return False
return result |
def make_avro_schema(i, # type: List[Any]
loader # type: Loader
): # type: (...) -> Names
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
"""
names = Names()
avro = make_avro(i, loader)
make_avsc_object(convert_to_dict(avro), names)
return names | All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output. | Below is the the instruction that describes the task:
### Input:
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
### Response:
def make_avro_schema(i, # type: List[Any]
loader # type: Loader
): # type: (...) -> Names
"""
All in one convenience function.
Call make_avro() and make_avro_schema_from_avro() separately if you need
the intermediate result for diagnostic output.
"""
names = Names()
avro = make_avro(i, loader)
make_avsc_object(convert_to_dict(avro), names)
return names |
def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f=0.0
if self.isVideo() or self.isAudio():
if self.__dict__['duration']:
try:
f=float(self.__dict__['duration'])
except Exception as e:
print "None numeric duration"
return f | Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream. | Below is the the instruction that describes the task:
### Input:
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
### Response:
def durationSeconds(self):
"""
Returns the runtime duration of the video stream as a floating point number of seconds.
Returns 0.0 if not a video stream.
"""
f=0.0
if self.isVideo() or self.isAudio():
if self.__dict__['duration']:
try:
f=float(self.__dict__['duration'])
except Exception as e:
print "None numeric duration"
return f |
def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size) | Increment the number of files downloaded.
Args:
size: The size of the file | Below is the the instruction that describes the task:
### Input:
Increment the number of files downloaded.
Args:
size: The size of the file
### Response:
def increment(self, size: int):
'''Increment the number of files downloaded.
Args:
size: The size of the file
'''
assert size >= 0, size
self.files += 1
self.size += size
self.bandwidth_meter.feed(size) |
def _build_settings(config_data):
"""
Build the django CMS settings dictionary
:param config_data: configuration data
"""
spacer = ' '
text = []
vars = get_settings()
vars.MIDDLEWARE_CLASSES.insert(0, vars.APPHOOK_RELOAD_MIDDLEWARE_CLASS)
processors = vars.TEMPLATE_CONTEXT_PROCESSORS + vars.TEMPLATE_CONTEXT_PROCESSORS_3
text.append(data.TEMPLATES_1_8.format(
loaders=(',\n' + spacer * 4).join([
"'{0}'".format(var) for var in vars.TEMPLATE_LOADERS
if (
LooseVersion(config_data.django_version) < LooseVersion('2.0') or
'eggs' not in var
)
]),
processors=(',\n' + spacer * 4).join(["'{0}'".format(var) for var in processors]),
dirs="os.path.join(BASE_DIR, '{0}', 'templates'),".format(config_data.project_name)
))
if LooseVersion(config_data.django_version) >= LooseVersion('1.10'):
text.append('MIDDLEWARE = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(['\'{0}\''.format(var)
for var in vars.MIDDLEWARE_CLASSES])
))
else:
text.append('MIDDLEWARE_CLASSES = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(["'{0}'".format(var)
for var in vars.MIDDLEWARE_CLASSES])
))
apps = list(vars.INSTALLED_APPS)
apps = list(vars.CMS_3_HEAD) + apps
apps.extend(vars.TREEBEARD_APPS)
apps.extend(vars.CMS_3_APPLICATIONS)
if not config_data.no_plugins:
apps.extend(vars.FILER_PLUGINS_3)
if config_data.aldryn: # pragma: no cover
apps.extend(vars.ALDRYN_APPLICATIONS)
if config_data.reversion and LooseVersion(config_data.cms_version) < LooseVersion('3.4'):
apps.extend(vars.REVERSION_APPLICATIONS)
text.append('INSTALLED_APPS = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(['\'{0}\''.format(var) for var in apps] +
['\'{0}\''.format(config_data.project_name)])
))
text.append('LANGUAGES = (\n{0}{1}\n{0}{2}\n)'.format(
spacer, '## Customize this',
('\n' + spacer).join(['(\'{0}\', gettext(\'{0}\')),'.format(item) for item in config_data.languages]) # NOQA
))
cms_langs = deepcopy(vars.CMS_LANGUAGES)
for lang in config_data.languages:
lang_dict = {'code': lang, 'name': lang}
lang_dict.update(copy(cms_langs['default']))
cms_langs[1].append(lang_dict)
cms_text = ['CMS_LANGUAGES = {']
cms_text.append('{0}{1}'.format(spacer, '## Customize this'))
for key, value in iteritems(cms_langs):
if key == 'default':
cms_text.append('{0}\'{1}\': {{'.format(spacer, key))
for config_name, config_value in iteritems(value):
cms_text.append('{0}\'{1}\': {2},'.format(spacer * 2, config_name, config_value))
cms_text.append('{0}}},'.format(spacer))
else:
cms_text.append('{0}{1}: ['.format(spacer, key))
for lang in value:
cms_text.append('{0}{{'.format(spacer * 2))
for config_name, config_value in iteritems(lang):
if config_name == 'code':
cms_text.append('{0}\'{1}\': \'{2}\','.format(spacer * 3, config_name, config_value)) # NOQA
elif config_name == 'name':
cms_text.append('{0}\'{1}\': gettext(\'{2}\'),'.format(spacer * 3, config_name, config_value)) # NOQA
else:
cms_text.append('{0}\'{1}\': {2},'.format(
spacer * 3, config_name, config_value
))
cms_text.append('{0}}},'.format(spacer * 2))
cms_text.append('{0}],'.format(spacer))
cms_text.append('}')
text.append('\n'.join(cms_text))
if config_data.bootstrap:
cms_templates = 'CMS_TEMPLATES_BOOTSTRAP'
else:
cms_templates = 'CMS_TEMPLATES'
text.append('CMS_TEMPLATES = (\n{0}{1}\n{0}{2}\n)'.format(
spacer, '## Customize this',
(',\n' + spacer).join(
['(\'{0}\', \'{1}\')'.format(*item) for item in getattr(vars, cms_templates)]
)
))
text.append('CMS_PERMISSION = {0}'.format(vars.CMS_PERMISSION))
text.append('CMS_PLACEHOLDER_CONF = {0}'.format(vars.CMS_PLACEHOLDER_CONF))
database = ['\'{0}\': {1}'.format(key, format_val(val)) for key, val in sorted(config_data.db_parsed.items(), key=lambda x: x[0])] # NOQA
text.append(textwrap.dedent("""
DATABASES = {{
'default': {{
{0}
}}
}}""").strip().format((',\n' + spacer * 2).join(database))) # NOQA
DJANGO_MIGRATION_MODULES = _detect_migration_layout(vars, apps)
text.append('MIGRATION_MODULES = {{\n{0}{1}\n}}'.format(
spacer, (',\n' + spacer).join(
['\'{0}\': \'{1}\''.format(*item) for item in DJANGO_MIGRATION_MODULES.items()]
)
))
if config_data.filer:
text.append('THUMBNAIL_PROCESSORS = (\n{0}{1}\n)'.format(
spacer, (',\n' + spacer).join(
['\'{0}\''.format(var) for var in vars.THUMBNAIL_PROCESSORS]
)
))
return '\n\n'.join(text) | Build the django CMS settings dictionary
:param config_data: configuration data | Below is the the instruction that describes the task:
### Input:
Build the django CMS settings dictionary
:param config_data: configuration data
### Response:
def _build_settings(config_data):
"""
Build the django CMS settings dictionary
:param config_data: configuration data
"""
spacer = ' '
text = []
vars = get_settings()
vars.MIDDLEWARE_CLASSES.insert(0, vars.APPHOOK_RELOAD_MIDDLEWARE_CLASS)
processors = vars.TEMPLATE_CONTEXT_PROCESSORS + vars.TEMPLATE_CONTEXT_PROCESSORS_3
text.append(data.TEMPLATES_1_8.format(
loaders=(',\n' + spacer * 4).join([
"'{0}'".format(var) for var in vars.TEMPLATE_LOADERS
if (
LooseVersion(config_data.django_version) < LooseVersion('2.0') or
'eggs' not in var
)
]),
processors=(',\n' + spacer * 4).join(["'{0}'".format(var) for var in processors]),
dirs="os.path.join(BASE_DIR, '{0}', 'templates'),".format(config_data.project_name)
))
if LooseVersion(config_data.django_version) >= LooseVersion('1.10'):
text.append('MIDDLEWARE = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(['\'{0}\''.format(var)
for var in vars.MIDDLEWARE_CLASSES])
))
else:
text.append('MIDDLEWARE_CLASSES = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(["'{0}'".format(var)
for var in vars.MIDDLEWARE_CLASSES])
))
apps = list(vars.INSTALLED_APPS)
apps = list(vars.CMS_3_HEAD) + apps
apps.extend(vars.TREEBEARD_APPS)
apps.extend(vars.CMS_3_APPLICATIONS)
if not config_data.no_plugins:
apps.extend(vars.FILER_PLUGINS_3)
if config_data.aldryn: # pragma: no cover
apps.extend(vars.ALDRYN_APPLICATIONS)
if config_data.reversion and LooseVersion(config_data.cms_version) < LooseVersion('3.4'):
apps.extend(vars.REVERSION_APPLICATIONS)
text.append('INSTALLED_APPS = [\n{0}{1}\n]'.format(
spacer, (',\n' + spacer).join(['\'{0}\''.format(var) for var in apps] +
['\'{0}\''.format(config_data.project_name)])
))
text.append('LANGUAGES = (\n{0}{1}\n{0}{2}\n)'.format(
spacer, '## Customize this',
('\n' + spacer).join(['(\'{0}\', gettext(\'{0}\')),'.format(item) for item in config_data.languages]) # NOQA
))
cms_langs = deepcopy(vars.CMS_LANGUAGES)
for lang in config_data.languages:
lang_dict = {'code': lang, 'name': lang}
lang_dict.update(copy(cms_langs['default']))
cms_langs[1].append(lang_dict)
cms_text = ['CMS_LANGUAGES = {']
cms_text.append('{0}{1}'.format(spacer, '## Customize this'))
for key, value in iteritems(cms_langs):
if key == 'default':
cms_text.append('{0}\'{1}\': {{'.format(spacer, key))
for config_name, config_value in iteritems(value):
cms_text.append('{0}\'{1}\': {2},'.format(spacer * 2, config_name, config_value))
cms_text.append('{0}}},'.format(spacer))
else:
cms_text.append('{0}{1}: ['.format(spacer, key))
for lang in value:
cms_text.append('{0}{{'.format(spacer * 2))
for config_name, config_value in iteritems(lang):
if config_name == 'code':
cms_text.append('{0}\'{1}\': \'{2}\','.format(spacer * 3, config_name, config_value)) # NOQA
elif config_name == 'name':
cms_text.append('{0}\'{1}\': gettext(\'{2}\'),'.format(spacer * 3, config_name, config_value)) # NOQA
else:
cms_text.append('{0}\'{1}\': {2},'.format(
spacer * 3, config_name, config_value
))
cms_text.append('{0}}},'.format(spacer * 2))
cms_text.append('{0}],'.format(spacer))
cms_text.append('}')
text.append('\n'.join(cms_text))
if config_data.bootstrap:
cms_templates = 'CMS_TEMPLATES_BOOTSTRAP'
else:
cms_templates = 'CMS_TEMPLATES'
text.append('CMS_TEMPLATES = (\n{0}{1}\n{0}{2}\n)'.format(
spacer, '## Customize this',
(',\n' + spacer).join(
['(\'{0}\', \'{1}\')'.format(*item) for item in getattr(vars, cms_templates)]
)
))
text.append('CMS_PERMISSION = {0}'.format(vars.CMS_PERMISSION))
text.append('CMS_PLACEHOLDER_CONF = {0}'.format(vars.CMS_PLACEHOLDER_CONF))
database = ['\'{0}\': {1}'.format(key, format_val(val)) for key, val in sorted(config_data.db_parsed.items(), key=lambda x: x[0])] # NOQA
text.append(textwrap.dedent("""
DATABASES = {{
'default': {{
{0}
}}
}}""").strip().format((',\n' + spacer * 2).join(database))) # NOQA
DJANGO_MIGRATION_MODULES = _detect_migration_layout(vars, apps)
text.append('MIGRATION_MODULES = {{\n{0}{1}\n}}'.format(
spacer, (',\n' + spacer).join(
['\'{0}\': \'{1}\''.format(*item) for item in DJANGO_MIGRATION_MODULES.items()]
)
))
if config_data.filer:
text.append('THUMBNAIL_PROCESSORS = (\n{0}{1}\n)'.format(
spacer, (',\n' + spacer).join(
['\'{0}\''.format(var) for var in vars.THUMBNAIL_PROCESSORS]
)
))
return '\n\n'.join(text) |
def saturate_colors(colors, amount):
"""Saturate all colors."""
if amount and float(amount) <= 1.0:
for i, _ in enumerate(colors):
if i not in [0, 7, 8, 15]:
colors[i] = util.saturate_color(colors[i], float(amount))
return colors | Saturate all colors. | Below is the the instruction that describes the task:
### Input:
Saturate all colors.
### Response:
def saturate_colors(colors, amount):
"""Saturate all colors."""
if amount and float(amount) <= 1.0:
for i, _ in enumerate(colors):
if i not in [0, 7, 8, 15]:
colors[i] = util.saturate_color(colors[i], float(amount))
return colors |
def owner(*paths, **kwargs): # pylint: disable=unused-argument
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
'''
if not paths:
return ''
ret = {}
cmd_search = ['opkg', 'search']
for path in paths:
cmd = cmd_search[:]
cmd.append(path)
output = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
if output:
ret[path] = output.split(' - ')[0].strip()
else:
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret | Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename | Below is the the instruction that describes the task:
### Input:
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
### Response:
def owner(*paths, **kwargs): # pylint: disable=unused-argument
'''
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.opkg.version`, if a single
path is passed, a string will be returned, and if multiple paths are passed,
a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
'''
if not paths:
return ''
ret = {}
cmd_search = ['opkg', 'search']
for path in paths:
cmd = cmd_search[:]
cmd.append(path)
output = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
if output:
ret[path] = output.split(' - ')[0].strip()
else:
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret |
def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule) | Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof. | Below is the the instruction that describes the task:
### Input:
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
### Response:
def parse_css(self, css):
"""
Parse a css style sheet into the CSS object.
For the moment this will only work for very simple css
documents. It works by using regular expression matching css
syntax. This is not bullet proof.
"""
rulesets = self.ruleset_re.findall(css)
for (selector, declarations) in rulesets:
rule = Rule(self.parse_selector(selector))
rule.properties = self.parse_declarations(declarations)
self.rules.append(rule) |
def _set_mpls_traffic_bypasses(self, v, load=False):
"""
Setter method for mpls_traffic_bypasses, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass/mpls_traffic_bypasses (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_traffic_bypasses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_traffic_bypasses() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_traffic_bypasses must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__mpls_traffic_bypasses = t
if hasattr(self, '_set'):
self._set() | Setter method for mpls_traffic_bypasses, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass/mpls_traffic_bypasses (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_traffic_bypasses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_traffic_bypasses() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mpls_traffic_bypasses, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass/mpls_traffic_bypasses (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_traffic_bypasses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_traffic_bypasses() directly.
### Response:
def _set_mpls_traffic_bypasses(self, v, load=False):
"""
Setter method for mpls_traffic_bypasses, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass/mpls_traffic_bypasses (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_traffic_bypasses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_traffic_bypasses() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_traffic_bypasses must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("mpls_traffic_bypass_name",mpls_traffic_bypasses.mpls_traffic_bypasses, yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='mpls-traffic-bypass-name', extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}), is_container='list', yang_name="mpls-traffic-bypasses", rest_name="bypass-lsp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'Mplstrafficbypass', u'cli-suppress-mode': None, u'alt-name': u'bypass-lsp', u'info': u'MPLS Stats profile by Bypass LSP name', u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__mpls_traffic_bypasses = t
if hasattr(self, '_set'):
self._set() |
def check_configuration(self, file_path, test_program, custom_args):
"""Checks if configuration is ok."""
# checking filepath
if not os.path.isdir(file_path):
raise InvalidFilePath("INVALID CONFIGURATION: file path %s is not a directory" %
os.path.abspath(file_path)
)
if not test_program in IMPLEMENTED_TEST_PROGRAMS:
raise InvalidTestProgram('The `%s` is unknown, or not yet implemented. Please chose another one.' % test_program)
if custom_args:
if not self.quiet and not ask("WARNING!!!\nYou are about to run the following command\n\n $ %s\n\nAre you sure you still want to proceed [y/N]? " % self.get_cmd()):
raise CancelDueToUserRequest('Test cancelled...') | Checks if configuration is ok. | Below is the the instruction that describes the task:
### Input:
Checks if configuration is ok.
### Response:
def check_configuration(self, file_path, test_program, custom_args):
"""Checks if configuration is ok."""
# checking filepath
if not os.path.isdir(file_path):
raise InvalidFilePath("INVALID CONFIGURATION: file path %s is not a directory" %
os.path.abspath(file_path)
)
if not test_program in IMPLEMENTED_TEST_PROGRAMS:
raise InvalidTestProgram('The `%s` is unknown, or not yet implemented. Please chose another one.' % test_program)
if custom_args:
if not self.quiet and not ask("WARNING!!!\nYou are about to run the following command\n\n $ %s\n\nAre you sure you still want to proceed [y/N]? " % self.get_cmd()):
raise CancelDueToUserRequest('Test cancelled...') |
def _chunk_report_(bytes_so_far, total_size, initial_size, t_0):
"""Show downloading percentage.
:param int bytes_so_far: number of downloaded bytes
:param int total_size: total size of the file (may be 0/None, depending
on download method).
:param int t_0: the time in seconds (as returned by time.time()) at which
the download was resumed / started.
:param int initial_size: if resuming, indicate the initial size of the
file. If not resuming, set to zero.
"""
if not total_size:
sys.stderr.write("\rDownloaded {0:d} of ? bytes.".format(bytes_so_far))
else:
# Estimate remaining download time
total_percent = float(bytes_so_far) / total_size
current_download_size = bytes_so_far - initial_size
bytes_remaining = total_size - bytes_so_far
delta_t = time.time() - t_0
download_rate = current_download_size / max(1e-8, float(delta_t))
# Minimum rate of 0.01 bytes/s, to avoid dividing by zero.
time_remaining = bytes_remaining / max(0.01, download_rate)
# Trailing whitespace is to erase extra char when message length
# varies
sys.stderr.write(
"\rDownloaded {0:d} of {1:d} bytes ({2:.1f}%, {3!s} remaining)".format(
bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining))) | Show downloading percentage.
:param int bytes_so_far: number of downloaded bytes
:param int total_size: total size of the file (may be 0/None, depending
on download method).
:param int t_0: the time in seconds (as returned by time.time()) at which
the download was resumed / started.
:param int initial_size: if resuming, indicate the initial size of the
file. If not resuming, set to zero. | Below is the the instruction that describes the task:
### Input:
Show downloading percentage.
:param int bytes_so_far: number of downloaded bytes
:param int total_size: total size of the file (may be 0/None, depending
on download method).
:param int t_0: the time in seconds (as returned by time.time()) at which
the download was resumed / started.
:param int initial_size: if resuming, indicate the initial size of the
file. If not resuming, set to zero.
### Response:
def _chunk_report_(bytes_so_far, total_size, initial_size, t_0):
"""Show downloading percentage.
:param int bytes_so_far: number of downloaded bytes
:param int total_size: total size of the file (may be 0/None, depending
on download method).
:param int t_0: the time in seconds (as returned by time.time()) at which
the download was resumed / started.
:param int initial_size: if resuming, indicate the initial size of the
file. If not resuming, set to zero.
"""
if not total_size:
sys.stderr.write("\rDownloaded {0:d} of ? bytes.".format(bytes_so_far))
else:
# Estimate remaining download time
total_percent = float(bytes_so_far) / total_size
current_download_size = bytes_so_far - initial_size
bytes_remaining = total_size - bytes_so_far
delta_t = time.time() - t_0
download_rate = current_download_size / max(1e-8, float(delta_t))
# Minimum rate of 0.01 bytes/s, to avoid dividing by zero.
time_remaining = bytes_remaining / max(0.01, download_rate)
# Trailing whitespace is to erase extra char when message length
# varies
sys.stderr.write(
"\rDownloaded {0:d} of {1:d} bytes ({2:.1f}%, {3!s} remaining)".format(
bytes_so_far, total_size, total_percent * 100, _format_time(time_remaining))) |
def checkOnline(self, userId):
"""
检查用户在线状态 方法 方法
@param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传)
@return code:返回码,200 为正常。
@return status:在线状态,1为在线,0为不在线。
@return errorMessage:错误信息。
"""
desc = {
"name": "CheckOnlineReslut",
"desc": "checkOnlineUser返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "status",
"type": "String",
"desc": "在线状态,1为在线,0为不在线。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/user/checkOnline.json',
params={"userId": userId})
return Response(r, desc) | 检查用户在线状态 方法 方法
@param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传)
@return code:返回码,200 为正常。
@return status:在线状态,1为在线,0为不在线。
@return errorMessage:错误信息。 | Below is the the instruction that describes the task:
### Input:
检查用户在线状态 方法 方法
@param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传)
@return code:返回码,200 为正常。
@return status:在线状态,1为在线,0为不在线。
@return errorMessage:错误信息。
### Response:
def checkOnline(self, userId):
"""
检查用户在线状态 方法 方法
@param userId:用户 Id,最大长度 64 字节。是用户在 App 中的唯一标识码,必须保证在同一个 App 内不重复,重复的用户 Id 将被当作是同一用户。(必传)
@return code:返回码,200 为正常。
@return status:在线状态,1为在线,0为不在线。
@return errorMessage:错误信息。
"""
desc = {
"name": "CheckOnlineReslut",
"desc": "checkOnlineUser返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "status",
"type": "String",
"desc": "在线状态,1为在线,0为不在线。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/user/checkOnline.json',
params={"userId": userId})
return Response(r, desc) |
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) | Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'. | Below is the the instruction that describes the task:
### Input:
Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
### Response:
def get_chunk_size(N, n):
"""Given a two-dimensional array with a dimension of size 'N',
determine the number of rows or columns that can fit into memory.
Parameters
----------
N : int
The size of one of the dimensions of a two-dimensional array.
n : int
The number of arrays of size 'N' times 'chunk_size' that can fit in memory.
Returns
-------
chunk_size : int
The size of the dimension orthogonal to the one of size 'N'.
"""
mem_free = memory()['free']
if mem_free > 60000000:
chunk_size = int(((mem_free - 10000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 40000000:
chunk_size = int(((mem_free - 7000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 14000000:
chunk_size = int(((mem_free - 2000000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 8000000:
chunk_size = int(((mem_free - 1400000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 2000000:
chunk_size = int(((mem_free - 900000) * 1000) / (4 * n * N))
return chunk_size
elif mem_free > 1000000:
chunk_size = int(((mem_free - 400000) * 1000) / (4 * n * N))
return chunk_size
else:
print("\nERROR: Cluster_Ensembles: get_chunk_size: "
"this machine does not have enough free memory resources "
"to perform ensemble clustering.\n")
sys.exit(1) |
def init_running_properties(self):
"""
Initialize the running_properties.
Each instance have own property.
:return: None
"""
for prop, entry in list(self.__class__.running_properties.items()):
val = entry.default
# Make a copy of the value for complex iterable types
# As such, each instance has its own copy and not a simple reference
setattr(self, prop, copy(val) if isinstance(val, (set, list, dict)) else val) | Initialize the running_properties.
Each instance have own property.
:return: None | Below is the the instruction that describes the task:
### Input:
Initialize the running_properties.
Each instance have own property.
:return: None
### Response:
def init_running_properties(self):
"""
Initialize the running_properties.
Each instance have own property.
:return: None
"""
for prop, entry in list(self.__class__.running_properties.items()):
val = entry.default
# Make a copy of the value for complex iterable types
# As such, each instance has its own copy and not a simple reference
setattr(self, prop, copy(val) if isinstance(val, (set, list, dict)) else val) |
def shrink(self):
"""
Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
x = np.nan_to_num(self.X.values)
# de-mean returns
t, n = np.shape(x)
meanx = x.mean(axis=0)
x = x - np.tile(meanx, (t, 1))
# compute sample covariance matrix
sample = (1.0 / t) * np.dot(x.T, x)
# compute prior
var = np.diag(sample).reshape(-1, 1)
sqrtvar = np.sqrt(var)
_var = np.tile(var, (n,))
_sqrtvar = np.tile(sqrtvar, (n,))
r_bar = (np.sum(sample / (_sqrtvar * _sqrtvar.T)) - n) / (n * (n - 1))
prior = r_bar * (_sqrtvar * _sqrtvar.T)
prior[np.eye(n) == 1] = var.reshape(-1)
# compute shrinkage parameters and constant
if self.delta is None:
# what we call pi-hat
y = x ** 2.0
phi_mat = np.dot(y.T, y) / t - 2 * np.dot(x.T, x) * sample / t + sample ** 2
phi = np.sum(phi_mat)
# what we call rho-hat
term1 = np.dot((x ** 3).T, x) / t
help_ = np.dot(x.T, x) / t
help_diag = np.diag(help_)
term2 = np.tile(help_diag, (n, 1)).T * sample
term3 = help_ * _var
term4 = _var * sample
theta_mat = term1 - term2 - term3 + term4
theta_mat[np.eye(n) == 1] = np.zeros(n)
rho = sum(np.diag(phi_mat)) + r_bar * np.sum(
np.dot((1.0 / sqrtvar), sqrtvar.T) * theta_mat
)
# what we call gamma-hat
gamma = np.linalg.norm(sample - prior, "fro") ** 2
# compute shrinkage constant
kappa = (phi - rho) / gamma
shrinkage = max(0.0, min(1.0, kappa / t))
self.delta = shrinkage
else:
# use specified constant
shrinkage = self.delta
# compute the estimator
sigma = shrinkage * prior + (1 - shrinkage) * sample
return self.format_and_annualise(sigma) | Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray | Below is the the instruction that describes the task:
### Input:
Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray
### Response:
def shrink(self):
"""
Calculate the Constant-Correlation covariance matrix.
:return: shrunk sample covariance matrix
:rtype: np.ndarray
"""
x = np.nan_to_num(self.X.values)
# de-mean returns
t, n = np.shape(x)
meanx = x.mean(axis=0)
x = x - np.tile(meanx, (t, 1))
# compute sample covariance matrix
sample = (1.0 / t) * np.dot(x.T, x)
# compute prior
var = np.diag(sample).reshape(-1, 1)
sqrtvar = np.sqrt(var)
_var = np.tile(var, (n,))
_sqrtvar = np.tile(sqrtvar, (n,))
r_bar = (np.sum(sample / (_sqrtvar * _sqrtvar.T)) - n) / (n * (n - 1))
prior = r_bar * (_sqrtvar * _sqrtvar.T)
prior[np.eye(n) == 1] = var.reshape(-1)
# compute shrinkage parameters and constant
if self.delta is None:
# what we call pi-hat
y = x ** 2.0
phi_mat = np.dot(y.T, y) / t - 2 * np.dot(x.T, x) * sample / t + sample ** 2
phi = np.sum(phi_mat)
# what we call rho-hat
term1 = np.dot((x ** 3).T, x) / t
help_ = np.dot(x.T, x) / t
help_diag = np.diag(help_)
term2 = np.tile(help_diag, (n, 1)).T * sample
term3 = help_ * _var
term4 = _var * sample
theta_mat = term1 - term2 - term3 + term4
theta_mat[np.eye(n) == 1] = np.zeros(n)
rho = sum(np.diag(phi_mat)) + r_bar * np.sum(
np.dot((1.0 / sqrtvar), sqrtvar.T) * theta_mat
)
# what we call gamma-hat
gamma = np.linalg.norm(sample - prior, "fro") ** 2
# compute shrinkage constant
kappa = (phi - rho) / gamma
shrinkage = max(0.0, min(1.0, kappa / t))
self.delta = shrinkage
else:
# use specified constant
shrinkage = self.delta
# compute the estimator
sigma = shrinkage * prior + (1 - shrinkage) * sample
return self.format_and_annualise(sigma) |
def save(self, data, xparent=None):
"""
Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
elem = ElementTree.SubElement(xparent, 'bool')
else:
elem = ElementTree.Element('bool')
elem.text = nstr(data)
return elem | Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element> | Below is the the instruction that describes the task:
### Input:
Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
### Response:
def save(self, data, xparent=None):
"""
Parses the element from XML to Python.
:param data | <variant>
xparent | <xml.etree.ElementTree.Element> || None
:return <xml.etree.ElementTree.Element>
"""
if xparent is not None:
elem = ElementTree.SubElement(xparent, 'bool')
else:
elem = ElementTree.Element('bool')
elem.text = nstr(data)
return elem |
def stop(self):
"""
Stop the node process.
"""
if self._wrapper_telnet_server:
self._wrapper_telnet_server.close()
yield from self._wrapper_telnet_server.wait_closed()
self.status = "stopped" | Stop the node process. | Below is the the instruction that describes the task:
### Input:
Stop the node process.
### Response:
def stop(self):
"""
Stop the node process.
"""
if self._wrapper_telnet_server:
self._wrapper_telnet_server.close()
yield from self._wrapper_telnet_server.wait_closed()
self.status = "stopped" |
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
instance = self.get_option('instance', group.project)
project = (
form_data.get('project') or
self.get_option('default_project', group.project)
)
client = self.get_client(request.user)
title = form_data['title']
description = form_data['description']
link = absolute_uri(group.get_absolute_url(params={'referrer': 'vsts_plugin'}))
try:
created_item = client.create_work_item(
instance=instance,
project=project,
title=title,
comment=markdown(description),
link=link,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
'id': created_item['id'],
'url': created_item['_links']['html']['href'],
'title': title,
} | Creates the issue on the remote service and returns an issue ID. | Below is the the instruction that describes the task:
### Input:
Creates the issue on the remote service and returns an issue ID.
### Response:
def create_issue(self, request, group, form_data, **kwargs):
"""
Creates the issue on the remote service and returns an issue ID.
"""
instance = self.get_option('instance', group.project)
project = (
form_data.get('project') or
self.get_option('default_project', group.project)
)
client = self.get_client(request.user)
title = form_data['title']
description = form_data['description']
link = absolute_uri(group.get_absolute_url(params={'referrer': 'vsts_plugin'}))
try:
created_item = client.create_work_item(
instance=instance,
project=project,
title=title,
comment=markdown(description),
link=link,
)
except Exception as e:
self.raise_error(e, identity=client.auth)
return {
'id': created_item['id'],
'url': created_item['_links']['html']['href'],
'title': title,
} |
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec | Factory of decorators turning a function into a generic function
dispatching on the given arguments. | Below is the the instruction that describes the task:
### Input:
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
### Response:
def dispatch_on(*dispatch_args):
"""
Factory of decorators turning a function into a generic function
dispatching on the given arguments.
"""
assert dispatch_args, 'No dispatch args passed'
dispatch_str = '(%s,)' % ', '.join(dispatch_args)
def check(arguments, wrong=operator.ne, msg=''):
"""Make sure one passes the expected number of arguments"""
if wrong(len(arguments), len(dispatch_args)):
raise TypeError('Expected %d arguments, got %d%s' %
(len(dispatch_args), len(arguments), msg))
def gen_func_dec(func):
"""Decorator turning a function into a generic function"""
# first check the dispatch arguments
argset = set(getfullargspec(func).args)
if not set(dispatch_args) <= argset:
raise NameError('Unknown dispatch arguments %s' % dispatch_str)
typemap = {}
def vancestors(*types):
"""
Get a list of sets of virtual ancestors for the given types
"""
check(types)
ras = [[] for _ in range(len(dispatch_args))]
for types_ in typemap:
for t, type_, ra in zip(types, types_, ras):
if issubclass(t, type_) and type_ not in t.mro():
append(type_, ra)
return [set(ra) for ra in ras]
def ancestors(*types):
"""
Get a list of virtual MROs, one for each type
"""
check(types)
lists = []
for t, vas in zip(types, vancestors(*types)):
n_vas = len(vas)
if n_vas > 1:
raise RuntimeError(
'Ambiguous dispatch for %s: %s' % (t, vas))
elif n_vas == 1:
va, = vas
mro = type('t', (t, va), {}).mro()[1:]
else:
mro = t.mro()
lists.append(mro[:-1]) # discard t and object
return lists
def register(*types):
"""
Decorator to register an implementation for the given types
"""
check(types)
def dec(f):
check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__)
typemap[types] = f
return f
return dec
def dispatch_info(*types):
"""
An utility to introspect the dispatch algorithm
"""
check(types)
lst = []
for anc in itertools.product(*ancestors(*types)):
lst.append(tuple(a.__name__ for a in anc))
return lst
def _dispatch(dispatch_args, *args, **kw):
types = tuple(type(arg) for arg in dispatch_args)
try: # fast path
f = typemap[types]
except KeyError:
pass
else:
return f(*args, **kw)
combinations = itertools.product(*ancestors(*types))
next(combinations) # the first one has been already tried
for types_ in combinations:
f = typemap.get(types_)
if f is not None:
return f(*args, **kw)
# else call the default implementation
return func(*args, **kw)
return FunctionMaker.create(
func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str,
dict(_f_=_dispatch), register=register, default=func,
typemap=typemap, vancestors=vancestors, ancestors=ancestors,
dispatch_info=dispatch_info, __wrapped__=func)
gen_func_dec.__name__ = 'dispatch_on' + dispatch_str
return gen_func_dec |
def wave_interp_option(obj):
r"""
Validate if an object is a :ref:`WaveInterpOption` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
"""
exdesc = pexdoc.pcontracts.get_exdesc()
if not isinstance(obj, str):
raise ValueError(exdesc)
if obj.upper() in ["CONTINUOUS", "STAIRCASE"]:
return None
raise ValueError(exdesc) | r"""
Validate if an object is a :ref:`WaveInterpOption` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None | Below is the the instruction that describes the task:
### Input:
r"""
Validate if an object is a :ref:`WaveInterpOption` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
### Response:
def wave_interp_option(obj):
r"""
Validate if an object is a :ref:`WaveInterpOption` pseudo-type object.
:param obj: Object
:type obj: any
:raises: RuntimeError (Argument \`*[argument_name]*\` is not valid). The
token \*[argument_name]\* is replaced by the name of the argument the
contract is attached to
:rtype: None
"""
exdesc = pexdoc.pcontracts.get_exdesc()
if not isinstance(obj, str):
raise ValueError(exdesc)
if obj.upper() in ["CONTINUOUS", "STAIRCASE"]:
return None
raise ValueError(exdesc) |
def plot_energy(
data,
kind="kde",
bfmi=True,
figsize=None,
legend=True,
fill_alpha=(1, 0.75),
fill_color=("C0", "C5"),
bw=4.5,
textsize=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
):
"""Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
data : xarray dataset, or object that can be converted (must represent
`sample_stats` and have an `energy` variable)
kind : str
Type of plot to display (kde or histogram)
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size. If None it will be defined automatically.
legend : bool
Flag for plotting legend (defaults to True)
fill_alpha : tuple of floats
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to (1, .75)
fill_color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution.
Defaults to ('C0', 'C5')
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
fill_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` (to control the shade)
plot_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')
ax : axes
Matplotlib axes.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot a default energy plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_energy(data)
Represent energy plot via histograms
.. plot::
:context: close-figs
>>> az.plot_energy(data, kind='hist')
"""
energy = convert_to_dataset(data, group="sample_stats").energy.values
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
if fill_kwargs is None:
fill_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
series = zip(
fill_alpha,
fill_color,
("Marginal Energy", "Energy transition"),
(energy - energy.mean(), np.diff(energy)),
)
if kind == "kde":
for alpha, color, label, value in series:
fill_kwargs["alpha"] = alpha
fill_kwargs["color"] = color
plot_kwargs.setdefault("color", color)
plot_kwargs.setdefault("alpha", 0)
plot_kwargs.setdefault("linewidth", linewidth)
plot_kde(
value,
bw=bw,
label=label,
textsize=xt_labelsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
ax=ax,
)
elif kind == "hist":
for alpha, color, label, value in series:
ax.hist(
value.flatten(),
bins="auto",
density=True,
alpha=alpha,
label=label,
color=color,
**plot_kwargs
)
else:
raise ValueError("Plot type {} not recognized.".format(kind))
if bfmi:
for idx, val in enumerate(e_bfmi(energy)):
ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(idx, val), alpha=0)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.legend()
return ax | Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
data : xarray dataset, or object that can be converted (must represent
`sample_stats` and have an `energy` variable)
kind : str
Type of plot to display (kde or histogram)
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size. If None it will be defined automatically.
legend : bool
Flag for plotting legend (defaults to True)
fill_alpha : tuple of floats
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to (1, .75)
fill_color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution.
Defaults to ('C0', 'C5')
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
fill_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` (to control the shade)
plot_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')
ax : axes
Matplotlib axes.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot a default energy plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_energy(data)
Represent energy plot via histograms
.. plot::
:context: close-figs
>>> az.plot_energy(data, kind='hist') | Below is the the instruction that describes the task:
### Input:
Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
data : xarray dataset, or object that can be converted (must represent
`sample_stats` and have an `energy` variable)
kind : str
Type of plot to display (kde or histogram)
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size. If None it will be defined automatically.
legend : bool
Flag for plotting legend (defaults to True)
fill_alpha : tuple of floats
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to (1, .75)
fill_color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution.
Defaults to ('C0', 'C5')
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
fill_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` (to control the shade)
plot_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')
ax : axes
Matplotlib axes.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot a default energy plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_energy(data)
Represent energy plot via histograms
.. plot::
:context: close-figs
>>> az.plot_energy(data, kind='hist')
### Response:
def plot_energy(
data,
kind="kde",
bfmi=True,
figsize=None,
legend=True,
fill_alpha=(1, 0.75),
fill_color=("C0", "C5"),
bw=4.5,
textsize=None,
fill_kwargs=None,
plot_kwargs=None,
ax=None,
):
"""Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
data : xarray dataset, or object that can be converted (must represent
`sample_stats` and have an `energy` variable)
kind : str
Type of plot to display (kde or histogram)
bfmi : bool
If True add to the plot the value of the estimated Bayesian fraction of missing information
figsize : tuple
Figure size. If None it will be defined automatically.
legend : bool
Flag for plotting legend (defaults to True)
fill_alpha : tuple of floats
Alpha blending value for the shaded area under the curve, between 0
(no shade) and 1 (opaque). Defaults to (1, .75)
fill_color : tuple of valid matplotlib color
Color for Marginal energy distribution and Energy transition distribution.
Defaults to ('C0', 'C5')
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
fill_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` (to control the shade)
plot_kwargs : dicts, optional
Additional keywords passed to `arviz.plot_kde` or `plt.hist` (if type='hist')
ax : axes
Matplotlib axes.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot a default energy plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_energy(data)
Represent energy plot via histograms
.. plot::
:context: close-figs
>>> az.plot_energy(data, kind='hist')
"""
energy = convert_to_dataset(data, group="sample_stats").energy.values
if ax is None:
_, ax = plt.subplots(figsize=figsize, constrained_layout=True)
if fill_kwargs is None:
fill_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
figsize, _, _, xt_labelsize, linewidth, _ = _scale_fig_size(figsize, textsize, 1, 1)
series = zip(
fill_alpha,
fill_color,
("Marginal Energy", "Energy transition"),
(energy - energy.mean(), np.diff(energy)),
)
if kind == "kde":
for alpha, color, label, value in series:
fill_kwargs["alpha"] = alpha
fill_kwargs["color"] = color
plot_kwargs.setdefault("color", color)
plot_kwargs.setdefault("alpha", 0)
plot_kwargs.setdefault("linewidth", linewidth)
plot_kde(
value,
bw=bw,
label=label,
textsize=xt_labelsize,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
ax=ax,
)
elif kind == "hist":
for alpha, color, label, value in series:
ax.hist(
value.flatten(),
bins="auto",
density=True,
alpha=alpha,
label=label,
color=color,
**plot_kwargs
)
else:
raise ValueError("Plot type {} not recognized.".format(kind))
if bfmi:
for idx, val in enumerate(e_bfmi(energy)):
ax.plot([], label="chain {:>2} BFMI = {:.2f}".format(idx, val), alpha=0)
ax.set_xticks([])
ax.set_yticks([])
if legend:
ax.legend()
return ax |
def importalma(asdm, ms):
"""Convert an ALMA low-level ASDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importalma('myalma.asdm', 'myalma.ms')
"""
from .scripting import CasapyScript
script = os.path.join(os.path.dirname(__file__), 'cscript_importalma.py')
with CasapyScript(script, asdm=asdm, ms=ms) as cs:
pass | Convert an ALMA low-level ASDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importalma('myalma.asdm', 'myalma.ms') | Below is the the instruction that describes the task:
### Input:
Convert an ALMA low-level ASDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importalma('myalma.asdm', 'myalma.ms')
### Response:
def importalma(asdm, ms):
"""Convert an ALMA low-level ASDM dataset to Measurement Set format.
asdm (str)
The path to the input ASDM dataset.
ms (str)
The path to the output MS dataset.
This implementation automatically infers the value of the "tbuff"
parameter.
Example::
from pwkit.environments.casa import tasks
tasks.importalma('myalma.asdm', 'myalma.ms')
"""
from .scripting import CasapyScript
script = os.path.join(os.path.dirname(__file__), 'cscript_importalma.py')
with CasapyScript(script, asdm=asdm, ms=ms) as cs:
pass |
def fpsInformation(self,args):
'''fps command'''
invalidStr = 'Invalid number of arguments. Usage horizon-fps set <fps> or horizon-fps get. Set fps to zero to get unrestricted framerate.'
if len(args)>0:
if args[0] == "get":
'''Get the current framerate.'''
if (self.fps == 0.0):
print('Horizon Framerate: Unrestricted')
else:
print("Horizon Framerate: " + str(self.fps))
elif args[0] == "set":
if len(args)==2:
self.fps = float(args[1])
if (self.fps != 0):
self.sendDelay = 1.0/self.fps
else:
self.sendDelay = 0.0
self.msgList.append(FPS(self.fps))
if (self.fps == 0.0):
print('Horizon Framerate: Unrestricted')
else:
print("Horizon Framerate: " + str(self.fps))
else:
print(invalidStr)
else:
print(invalidStr)
else:
print(invalidStr) | fps command | Below is the the instruction that describes the task:
### Input:
fps command
### Response:
def fpsInformation(self,args):
'''fps command'''
invalidStr = 'Invalid number of arguments. Usage horizon-fps set <fps> or horizon-fps get. Set fps to zero to get unrestricted framerate.'
if len(args)>0:
if args[0] == "get":
'''Get the current framerate.'''
if (self.fps == 0.0):
print('Horizon Framerate: Unrestricted')
else:
print("Horizon Framerate: " + str(self.fps))
elif args[0] == "set":
if len(args)==2:
self.fps = float(args[1])
if (self.fps != 0):
self.sendDelay = 1.0/self.fps
else:
self.sendDelay = 0.0
self.msgList.append(FPS(self.fps))
if (self.fps == 0.0):
print('Horizon Framerate: Unrestricted')
else:
print("Horizon Framerate: " + str(self.fps))
else:
print(invalidStr)
else:
print(invalidStr)
else:
print(invalidStr) |
def reload_using_spawn_exit(self):
"""
Spawn a subprocess and exit the current process.
:return:
None.
"""
# Create command parts
cmd_parts = [sys.executable] + sys.argv
# Get env dict copy
env_copy = os.environ.copy()
# Spawn subprocess
subprocess.Popen(cmd_parts, env=env_copy, close_fds=True)
# If need force exit
if self._force_exit:
# Force exit
os._exit(0) # pylint: disable=protected-access
# If not need force exit
else:
# Send interrupt to main thread
interrupt_main()
# Set the flag
self._watcher_to_stop = True
# Exit the watcher thread
sys.exit(0) | Spawn a subprocess and exit the current process.
:return:
None. | Below is the the instruction that describes the task:
### Input:
Spawn a subprocess and exit the current process.
:return:
None.
### Response:
def reload_using_spawn_exit(self):
"""
Spawn a subprocess and exit the current process.
:return:
None.
"""
# Create command parts
cmd_parts = [sys.executable] + sys.argv
# Get env dict copy
env_copy = os.environ.copy()
# Spawn subprocess
subprocess.Popen(cmd_parts, env=env_copy, close_fds=True)
# If need force exit
if self._force_exit:
# Force exit
os._exit(0) # pylint: disable=protected-access
# If not need force exit
else:
# Send interrupt to main thread
interrupt_main()
# Set the flag
self._watcher_to_stop = True
# Exit the watcher thread
sys.exit(0) |
def c(self, *args, **kwargs):
"""
Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information.
"""
# If not arguments, print everything
if len(args) + len(kwargs) == 0:
print("Columns")
if len(self.ckeys)==0: print (' No columns of data yet.')
# Loop over the ckeys and display their information
for n in range(len(self.ckeys)):
print(' '+str(n)+': '+str(self.ckeys[n])+' '+str(_n.shape(self[n])))
return
# Otherwise, find n
elif len(args): n = args[0]
elif len(kwargs):
for k in kwargs: n = kwargs[k]
# Nothing to do here.
if len(self.columns) == 0: return None
# if it's a string, use it as a key for the dictionary
if type(n) is str: return self.columns[n]
# if it's a list, return the specified columns
if type(n) in [list, tuple, range]:
output = []
for i in n: output.append(self[i])
return output
# If it's a slice, do the slice thing
if type(n) is slice:
start = n.start
stop = n.stop
step = n.step
# Fix up the unspecifieds
if start == None: start = 0
if stop == None or stop>len(self): stop = len(self)
if step == None: step = 1
# Return what was asked for
return self[range(start, stop, step)]
# Otherwise assume it's an integer
return self.columns[self.ckeys[n]] | Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information. | Below is the the instruction that describes the task:
### Input:
Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information.
### Response:
def c(self, *args, **kwargs):
"""
Takes a single argument or keyword argument, and returns the specified
column. If the argument (or keyword argument) is an integer, return the
n'th column, otherwise return the column based on key.
If no arguments are supplied, simply print the column information.
"""
# If not arguments, print everything
if len(args) + len(kwargs) == 0:
print("Columns")
if len(self.ckeys)==0: print (' No columns of data yet.')
# Loop over the ckeys and display their information
for n in range(len(self.ckeys)):
print(' '+str(n)+': '+str(self.ckeys[n])+' '+str(_n.shape(self[n])))
return
# Otherwise, find n
elif len(args): n = args[0]
elif len(kwargs):
for k in kwargs: n = kwargs[k]
# Nothing to do here.
if len(self.columns) == 0: return None
# if it's a string, use it as a key for the dictionary
if type(n) is str: return self.columns[n]
# if it's a list, return the specified columns
if type(n) in [list, tuple, range]:
output = []
for i in n: output.append(self[i])
return output
# If it's a slice, do the slice thing
if type(n) is slice:
start = n.start
stop = n.stop
step = n.step
# Fix up the unspecifieds
if start == None: start = 0
if stop == None or stop>len(self): stop = len(self)
if step == None: step = 1
# Return what was asked for
return self[range(start, stop, step)]
# Otherwise assume it's an integer
return self.columns[self.ckeys[n]] |
def uploads(self):
"""returns an object to work with the site uploads"""
if self._resources is None:
self.__init()
if "uploads" in self._resources:
url = self._url + "/uploads"
return _uploads.Uploads(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
return None | returns an object to work with the site uploads | Below is the the instruction that describes the task:
### Input:
returns an object to work with the site uploads
### Response:
def uploads(self):
"""returns an object to work with the site uploads"""
if self._resources is None:
self.__init()
if "uploads" in self._resources:
url = self._url + "/uploads"
return _uploads.Uploads(url=url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
else:
return None |
def _generate_typevars(self):
# type: () -> None
"""
Creates type variables that are used by the type signatures for
_process_custom_annotations.
"""
self.emit("T = TypeVar('T', bound=bb.AnnotationType)")
self.emit("U = TypeVar('U')")
self.import_tracker._register_typing_import('TypeVar')
self.emit() | Creates type variables that are used by the type signatures for
_process_custom_annotations. | Below is the the instruction that describes the task:
### Input:
Creates type variables that are used by the type signatures for
_process_custom_annotations.
### Response:
def _generate_typevars(self):
# type: () -> None
"""
Creates type variables that are used by the type signatures for
_process_custom_annotations.
"""
self.emit("T = TypeVar('T', bound=bb.AnnotationType)")
self.emit("U = TypeVar('U')")
self.import_tracker._register_typing_import('TypeVar')
self.emit() |
def join(L, keycols=None, nullvals=None, renamer=None,
returnrenaming=False, Names=None):
"""
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
"""
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if not keycols:
keycols = utils.listintersection([a.dtype.names for a in LL])
if len(keycols) == 0:
raise ValueError('No common column names found.')
keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l]
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with identical datatypes found.')
keycols = [l for l in keycols if all([isunique(a[keycols])
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with unique value sets in all arrays to be '
'merged, were found.')
else:
print('Inferring keycols to be:', keycols)
elif isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
renaming = {}
if len(commons) > 0:
print 'common attributes, forcing a renaming ...'
if renamer == None:
print('Using default renamer ...')
renamer = DEFAULT_RENAMER
renaming = renamer(L, Names=Names)
if not RenamingIsInCorrectFormat(renaming, L, Names=Names):
print('Renaming from specified renamer is not in correct format,'
'using default renamer instead ...')
renaming = DEFAULT_RENAMER(L, Names = Names)
NewNames = [[l if l not in renaming[k].keys() else renaming[k][l]
for l in ll.dtype.names] for (k, ll) in zip(Names, LL)]
if set(Commons(NewNames)).difference(keycols):
raise ValueError('Renaming convention failed to produce '
'separated names.')
Result = strictjoin(L, keycols, nullvals, renaming, Names=Names)
if returnrenaming:
return [Result, renaming]
else:
if renaming:
print('There was a nontrivial renaming, to get it set '
'"returnrenaming = True" in keyword to join function.')
return Result | Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin` | Below is the the instruction that describes the task:
### Input:
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
### Response:
def join(L, keycols=None, nullvals=None, renamer=None,
returnrenaming=False, Names=None):
"""
Combine two or more numpy ndarray with structured dtype on common key
column(s).
Merge a list (or dictionary) of numpy ndarray with structured dtype, given
by `L`, on key columns listed in `keycols`.
This function is actually a wrapper for
:func:`tabular.spreadsheet.strictjoin`.
The ``strictjoin`` function has a few restrictions, and this ``join``
function will try to ensure that they are satisfied:
* each element of `keycol` must be a valid column name in `X`
and each array in `L`, and all of the same data-type.
* for each column `col` in `keycols`, and each array `A` in `L`, the
values in `A[col]` must be unique, -- and same for `X[col]`.
(Actually this uniqueness doesn't have to hold for the first tabarray
in L, that is, L[0], but must for all the subsequent ones.)
* the *non*-key-column column names in each of the arrays must be
disjoint from each other -- or disjoint after a renaming (see below).
An error will be thrown if these conditions are not met.
If you don't provide a value of `keycols`, the algorithm will attempt to
infer which columns should be used by trying to find the largest set of
common column names that contain unique values in each array and have the
same data type. An error will be thrown if no such inference can be made.
*Renaming of overlapping columns*
If the non-keycol column names of the arrays overlap, ``join`` will
by default attempt to rename the columns by using a simple
convention:
* If `L` is a list, it will append the number in the list to the
key associated with the array.
* If `L` is a dictionary, the algorithm will append the string
representation of the key associated with an array to the
overlapping columns from that array.
You can override the default renaming scheme using the `renamer`
parameter.
*Nullvalues for keycolumn differences*
If there are regions of the keycolumns that are not overlapping
between merged arrays, `join` will fill in the relevant entries
with null values chosen by default:
* '0' for integer columns
* '0.0' for float columns
* the empty character ('') for string columns.
**Parameters**
**L** : list or dictionary
Numpy recarrays to merge. If `L` is a dictionary, the keys
name each numpy recarray, and the corresponding values are
the actual numpy recarrays.
**keycols** : list of strings
List of the names of the key columns along which to do the
merging.
**nullvals** : function, optional
A function that returns a null value for a numpy format
descriptor string, e.g. ``'<i4'`` or ``'|S5'``.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_NULLVALUEFORMAT`
**renamer** : function, optional
A function for renaming overlapping non-key column names
among the numpy recarrays to merge.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**returnrenaming** : Boolean, optional
Whether to return the result of the `renamer` function.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**Names**: list of strings:
If `L` is a list, than names for elements of `L` can be
specified with `Names` (without losing the ordering as you
would if you did it with a dictionary).
`len(L)` must equal `len(Names)`
**Returns**
**result** : numpy ndarray with structured dtype
Result of the join, e.g. the result of merging the input
numpy arrays defined in `L` on the key columns listed in
`keycols`.
**renaming** : dictionary of dictionaries, optional
The result returned by the `renamer` function. Returned
only if `returnrenaming == True`.
See the default function for further documentation:
:func:`tabular.spreadsheet.DEFAULT_RENAMER`
**See Also:**
:func:`tabular.spreadsheet.strictjoin`
"""
if isinstance(L, dict):
Names = L.keys()
LL = L.values()
else:
if Names == None:
Names = range(len(L))
else:
assert len(Names) == len(L)
LL = L
if not keycols:
keycols = utils.listintersection([a.dtype.names for a in LL])
if len(keycols) == 0:
raise ValueError('No common column names found.')
keycols = [l for l in keycols if all([a.dtype[l] == LL[0].dtype[l]
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with identical datatypes found.')
keycols = [l for l in keycols if all([isunique(a[keycols])
for a in LL])]
if len(keycols) == 0:
raise ValueError('No suitable common keycolumns, '
'with unique value sets in all arrays to be '
'merged, were found.')
else:
print('Inferring keycols to be:', keycols)
elif isinstance(keycols,str):
keycols = [l.strip() for l in keycols.split(',')]
commons = set(Commons([l.dtype.names for l in LL])).difference(keycols)
renaming = {}
if len(commons) > 0:
print 'common attributes, forcing a renaming ...'
if renamer == None:
print('Using default renamer ...')
renamer = DEFAULT_RENAMER
renaming = renamer(L, Names=Names)
if not RenamingIsInCorrectFormat(renaming, L, Names=Names):
print('Renaming from specified renamer is not in correct format,'
'using default renamer instead ...')
renaming = DEFAULT_RENAMER(L, Names = Names)
NewNames = [[l if l not in renaming[k].keys() else renaming[k][l]
for l in ll.dtype.names] for (k, ll) in zip(Names, LL)]
if set(Commons(NewNames)).difference(keycols):
raise ValueError('Renaming convention failed to produce '
'separated names.')
Result = strictjoin(L, keycols, nullvals, renaming, Names=Names)
if returnrenaming:
return [Result, renaming]
else:
if renaming:
print('There was a nontrivial renaming, to get it set '
'"returnrenaming = True" in keyword to join function.')
return Result |
def _update_u(u, W):
"""
Update the threat points if it not feasible in the new W,
by the minimum of new feasible payoffs.
Parameters
----------
u : ndarray(float, ndim=1)
The threat points.
W : ndarray(float, ndim=1)
The points that construct the feasible payoff convex hull.
Returns
-------
u : ndarray(float, ndim=1)
The updated threat points.
"""
for i in range(2):
W_min = W[:, i].min()
if u[i] < W_min:
u[i] = W_min
return u | Update the threat points if it not feasible in the new W,
by the minimum of new feasible payoffs.
Parameters
----------
u : ndarray(float, ndim=1)
The threat points.
W : ndarray(float, ndim=1)
The points that construct the feasible payoff convex hull.
Returns
-------
u : ndarray(float, ndim=1)
The updated threat points. | Below is the the instruction that describes the task:
### Input:
Update the threat points if it not feasible in the new W,
by the minimum of new feasible payoffs.
Parameters
----------
u : ndarray(float, ndim=1)
The threat points.
W : ndarray(float, ndim=1)
The points that construct the feasible payoff convex hull.
Returns
-------
u : ndarray(float, ndim=1)
The updated threat points.
### Response:
def _update_u(u, W):
"""
Update the threat points if it not feasible in the new W,
by the minimum of new feasible payoffs.
Parameters
----------
u : ndarray(float, ndim=1)
The threat points.
W : ndarray(float, ndim=1)
The points that construct the feasible payoff convex hull.
Returns
-------
u : ndarray(float, ndim=1)
The updated threat points.
"""
for i in range(2):
W_min = W[:, i].min()
if u[i] < W_min:
u[i] = W_min
return u |
def like(self, photo_id):
"""
Like a photo on behalf of the logged-in user.
This requires the 'write_likes' scope.
Note: This action is idempotent; sending the POST request
to a single photo multiple times has no additional effect.
:param photo_id [string]: The photo’s ID. Required.
:return: [Photo]: The Unsplash Photo.
"""
url = "/photos/%s/like" % photo_id
result = self._post(url)
return PhotoModel.parse(result) | Like a photo on behalf of the logged-in user.
This requires the 'write_likes' scope.
Note: This action is idempotent; sending the POST request
to a single photo multiple times has no additional effect.
:param photo_id [string]: The photo’s ID. Required.
:return: [Photo]: The Unsplash Photo. | Below is the the instruction that describes the task:
### Input:
Like a photo on behalf of the logged-in user.
This requires the 'write_likes' scope.
Note: This action is idempotent; sending the POST request
to a single photo multiple times has no additional effect.
:param photo_id [string]: The photo’s ID. Required.
:return: [Photo]: The Unsplash Photo.
### Response:
def like(self, photo_id):
"""
Like a photo on behalf of the logged-in user.
This requires the 'write_likes' scope.
Note: This action is idempotent; sending the POST request
to a single photo multiple times has no additional effect.
:param photo_id [string]: The photo’s ID. Required.
:return: [Photo]: The Unsplash Photo.
"""
url = "/photos/%s/like" % photo_id
result = self._post(url)
return PhotoModel.parse(result) |
def nfc(ctx, enable, disable, enable_all, disable_all, list, lock_code, force):
"""
Enable or disable applications over NFC.
"""
if not (list or enable_all or enable or disable_all or disable):
ctx.fail('No configuration options chosen.')
if enable_all:
enable = APPLICATION.__members__.keys()
if disable_all:
disable = APPLICATION.__members__.keys()
_ensure_not_invalid_options(ctx, enable, disable)
dev = ctx.obj['dev']
nfc_supported = dev.config.nfc_supported
nfc_enabled = dev.config.nfc_enabled
if not nfc_supported:
ctx.fail('NFC interface not available.')
if list:
_list_apps(ctx, nfc_enabled)
for app in enable:
if APPLICATION[app] & nfc_supported:
nfc_enabled |= APPLICATION[app]
else:
ctx.fail('{} not supported over NFC.'.format(app))
for app in disable:
if APPLICATION[app] & nfc_supported:
nfc_enabled &= ~APPLICATION[app]
else:
ctx.fail('{} not supported over NFC.'.format(app))
f_confirm = '{}{}Configure NFC interface?'.format(
'Enable {}.\n'.format(
', '.join(
[str(APPLICATION[app]) for app in enable])) if enable else '',
'Disable {}.\n'.format(
', '.join(
[str(APPLICATION[app]) for app in disable])) if disable else '')
is_locked = dev.config.configuration_locked
if force and is_locked and not lock_code:
ctx.fail('Configuration is locked - please supply the --lock-code '
'option.')
if lock_code and not is_locked:
ctx.fail('Configuration is not locked - please remove the '
'--lock-code option.')
force or click.confirm(f_confirm, abort=True, err=True)
if is_locked and not lock_code:
lock_code = prompt_lock_code()
if lock_code:
lock_code = _parse_lock_code(ctx, lock_code)
try:
dev.write_config(
device_config(
nfc_enabled=nfc_enabled),
reboot=True, lock_key=lock_code)
except Exception as e:
logger.error('Failed to write config', exc_info=e)
ctx.fail('Failed to configure NFC applications.') | Enable or disable applications over NFC. | Below is the the instruction that describes the task:
### Input:
Enable or disable applications over NFC.
### Response:
def nfc(ctx, enable, disable, enable_all, disable_all, list, lock_code, force):
"""
Enable or disable applications over NFC.
"""
if not (list or enable_all or enable or disable_all or disable):
ctx.fail('No configuration options chosen.')
if enable_all:
enable = APPLICATION.__members__.keys()
if disable_all:
disable = APPLICATION.__members__.keys()
_ensure_not_invalid_options(ctx, enable, disable)
dev = ctx.obj['dev']
nfc_supported = dev.config.nfc_supported
nfc_enabled = dev.config.nfc_enabled
if not nfc_supported:
ctx.fail('NFC interface not available.')
if list:
_list_apps(ctx, nfc_enabled)
for app in enable:
if APPLICATION[app] & nfc_supported:
nfc_enabled |= APPLICATION[app]
else:
ctx.fail('{} not supported over NFC.'.format(app))
for app in disable:
if APPLICATION[app] & nfc_supported:
nfc_enabled &= ~APPLICATION[app]
else:
ctx.fail('{} not supported over NFC.'.format(app))
f_confirm = '{}{}Configure NFC interface?'.format(
'Enable {}.\n'.format(
', '.join(
[str(APPLICATION[app]) for app in enable])) if enable else '',
'Disable {}.\n'.format(
', '.join(
[str(APPLICATION[app]) for app in disable])) if disable else '')
is_locked = dev.config.configuration_locked
if force and is_locked and not lock_code:
ctx.fail('Configuration is locked - please supply the --lock-code '
'option.')
if lock_code and not is_locked:
ctx.fail('Configuration is not locked - please remove the '
'--lock-code option.')
force or click.confirm(f_confirm, abort=True, err=True)
if is_locked and not lock_code:
lock_code = prompt_lock_code()
if lock_code:
lock_code = _parse_lock_code(ctx, lock_code)
try:
dev.write_config(
device_config(
nfc_enabled=nfc_enabled),
reboot=True, lock_key=lock_code)
except Exception as e:
logger.error('Failed to write config', exc_info=e)
ctx.fail('Failed to configure NFC applications.') |
def write(self, buf):
"""Write bytes to the sink (if currently playing).
"""
buf = align_buf(buf, self._sample_width)
buf = normalize_audio_buffer(buf, self.volume_percentage)
return self._sink.write(buf) | Write bytes to the sink (if currently playing). | Below is the the instruction that describes the task:
### Input:
Write bytes to the sink (if currently playing).
### Response:
def write(self, buf):
"""Write bytes to the sink (if currently playing).
"""
buf = align_buf(buf, self._sample_width)
buf = normalize_audio_buffer(buf, self.volume_percentage)
return self._sink.write(buf) |
def create_topic(self, topic):
"""Create a topic."""
nsq.assert_valid_topic_name(topic)
return self._request('POST', '/topic/create', fields={'topic': topic}) | Create a topic. | Below is the the instruction that describes the task:
### Input:
Create a topic.
### Response:
def create_topic(self, topic):
"""Create a topic."""
nsq.assert_valid_topic_name(topic)
return self._request('POST', '/topic/create', fields={'topic': topic}) |
def instantiate_config(config):
'''setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded
'''
make_absolute_paths(config)
pipeline_config = config['streamcorpus_pipeline']
pipeline_config['config_hash'] = make_hash(config)
pipeline_config['config_json'] = json.dumps(config)
logger.debug('running config: {0} = {1!r}'
.format(pipeline_config['config_hash'], config))
## Load modules
# This is a method of using settings in yaml configs to load plugins.
die = False
for pathstr in pipeline_config.get('pythonpath', {}).itervalues():
if pathstr not in sys.path:
sys.path.append(pathstr)
for modname in pipeline_config.get('setup_modules', {}).itervalues():
try:
m = importlib.import_module(modname)
if not m:
logger.critical('could not load module %r', modname)
die = True
continue
if hasattr(m, 'setup'):
m.setup()
logger.debug('loaded and setup %r', modname)
else:
logger.debug('loaded %r', modname)
except Exception:
logger.critical('error loading and initting module %r', modname, exc_info=True)
die = True
if die:
sys.exit(1) | setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded | Below is the the instruction that describes the task:
### Input:
setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded
### Response:
def instantiate_config(config):
'''setup the config and load external modules
This updates 'config' as follows:
* All paths are replaced with absolute paths
* A hash and JSON dump of the config are stored in the config
* If 'pythonpath' is in the config, it is added to sys.path
* If 'setup_modules' is in the config, all modules named in it are loaded
'''
make_absolute_paths(config)
pipeline_config = config['streamcorpus_pipeline']
pipeline_config['config_hash'] = make_hash(config)
pipeline_config['config_json'] = json.dumps(config)
logger.debug('running config: {0} = {1!r}'
.format(pipeline_config['config_hash'], config))
## Load modules
# This is a method of using settings in yaml configs to load plugins.
die = False
for pathstr in pipeline_config.get('pythonpath', {}).itervalues():
if pathstr not in sys.path:
sys.path.append(pathstr)
for modname in pipeline_config.get('setup_modules', {}).itervalues():
try:
m = importlib.import_module(modname)
if not m:
logger.critical('could not load module %r', modname)
die = True
continue
if hasattr(m, 'setup'):
m.setup()
logger.debug('loaded and setup %r', modname)
else:
logger.debug('loaded %r', modname)
except Exception:
logger.critical('error loading and initting module %r', modname, exc_info=True)
die = True
if die:
sys.exit(1) |
def export_ply(mesh,
encoding='binary',
vertex_normal=None):
"""
Export a mesh in the PLY format.
Parameters
----------
mesh : Trimesh object
encoding : ['ascii'|'binary_little_endian']
vertex_normal : include vertex normals
Returns
----------
export : bytes of result
"""
# evaluate input args
# allow a shortcut for binary
if encoding == 'binary':
encoding = 'binary_little_endian'
elif encoding not in ['binary_little_endian', 'ascii']:
raise ValueError('encoding must be binary or ascii')
# if vertex normals aren't specifically asked for
# only export them if they are stored in cache
if vertex_normal is None:
vertex_normal = 'vertex_normal' in mesh._cache
# custom numpy dtypes for exporting
dtype_face = [('count', '<u1'),
('index', '<i4', (3))]
dtype_vertex = [('vertex', '<f4', (3))]
# will be appended to main dtype if needed
dtype_vertex_normal = ('normals', '<f4', (3))
dtype_color = ('rgba', '<u1', (4))
# get template strings in dict
templates = json.loads(get_resource('ply.template'))
# start collecting elements into a string for the header
header = templates['intro']
header += templates['vertex']
# if we're exporting vertex normals add them
# to the header and dtype
if vertex_normal:
header += templates['vertex_normal']
dtype_vertex.append(dtype_vertex_normal)
# if mesh has a vertex coloradd it to the header
if mesh.visual.kind == 'vertex' and encoding != 'ascii':
header += templates['color']
dtype_vertex.append(dtype_color)
# create and populate the custom dtype for vertices
vertex = np.zeros(len(mesh.vertices),
dtype=dtype_vertex)
vertex['vertex'] = mesh.vertices
if vertex_normal:
vertex['normals'] = mesh.vertex_normals
if mesh.visual.kind == 'vertex':
vertex['rgba'] = mesh.visual.vertex_colors
header += templates['face']
if mesh.visual.kind == 'face' and encoding != 'ascii':
header += templates['color']
dtype_face.append(dtype_color)
# put mesh face data into custom dtype to export
faces = np.zeros(len(mesh.faces), dtype=dtype_face)
faces['count'] = 3
faces['index'] = mesh.faces
if mesh.visual.kind == 'face' and encoding != 'ascii':
faces['rgba'] = mesh.visual.face_colors
header += templates['outro']
header_params = {'vertex_count': len(mesh.vertices),
'face_count': len(mesh.faces),
'encoding': encoding}
export = Template(header).substitute(header_params).encode('utf-8')
if encoding == 'binary_little_endian':
export += vertex.tostring()
export += faces.tostring()
elif encoding == 'ascii':
# ply format is: (face count, v0, v1, v2)
fstack = np.column_stack((np.ones(len(mesh.faces),
dtype=np.int64) * 3,
mesh.faces))
# if we're exporting vertex normals they get stacked
if vertex_normal:
vstack = np.column_stack((mesh.vertices,
mesh.vertex_normals))
else:
vstack = mesh.vertices
# add the string formatted vertices and faces
export += (util.array_to_string(vstack,
col_delim=' ',
row_delim='\n') +
'\n' +
util.array_to_string(fstack,
col_delim=' ',
row_delim='\n')).encode('utf-8')
else:
raise ValueError('encoding must be ascii or binary!')
return export | Export a mesh in the PLY format.
Parameters
----------
mesh : Trimesh object
encoding : ['ascii'|'binary_little_endian']
vertex_normal : include vertex normals
Returns
----------
export : bytes of result | Below is the the instruction that describes the task:
### Input:
Export a mesh in the PLY format.
Parameters
----------
mesh : Trimesh object
encoding : ['ascii'|'binary_little_endian']
vertex_normal : include vertex normals
Returns
----------
export : bytes of result
### Response:
def export_ply(mesh,
encoding='binary',
vertex_normal=None):
"""
Export a mesh in the PLY format.
Parameters
----------
mesh : Trimesh object
encoding : ['ascii'|'binary_little_endian']
vertex_normal : include vertex normals
Returns
----------
export : bytes of result
"""
# evaluate input args
# allow a shortcut for binary
if encoding == 'binary':
encoding = 'binary_little_endian'
elif encoding not in ['binary_little_endian', 'ascii']:
raise ValueError('encoding must be binary or ascii')
# if vertex normals aren't specifically asked for
# only export them if they are stored in cache
if vertex_normal is None:
vertex_normal = 'vertex_normal' in mesh._cache
# custom numpy dtypes for exporting
dtype_face = [('count', '<u1'),
('index', '<i4', (3))]
dtype_vertex = [('vertex', '<f4', (3))]
# will be appended to main dtype if needed
dtype_vertex_normal = ('normals', '<f4', (3))
dtype_color = ('rgba', '<u1', (4))
# get template strings in dict
templates = json.loads(get_resource('ply.template'))
# start collecting elements into a string for the header
header = templates['intro']
header += templates['vertex']
# if we're exporting vertex normals add them
# to the header and dtype
if vertex_normal:
header += templates['vertex_normal']
dtype_vertex.append(dtype_vertex_normal)
# if mesh has a vertex coloradd it to the header
if mesh.visual.kind == 'vertex' and encoding != 'ascii':
header += templates['color']
dtype_vertex.append(dtype_color)
# create and populate the custom dtype for vertices
vertex = np.zeros(len(mesh.vertices),
dtype=dtype_vertex)
vertex['vertex'] = mesh.vertices
if vertex_normal:
vertex['normals'] = mesh.vertex_normals
if mesh.visual.kind == 'vertex':
vertex['rgba'] = mesh.visual.vertex_colors
header += templates['face']
if mesh.visual.kind == 'face' and encoding != 'ascii':
header += templates['color']
dtype_face.append(dtype_color)
# put mesh face data into custom dtype to export
faces = np.zeros(len(mesh.faces), dtype=dtype_face)
faces['count'] = 3
faces['index'] = mesh.faces
if mesh.visual.kind == 'face' and encoding != 'ascii':
faces['rgba'] = mesh.visual.face_colors
header += templates['outro']
header_params = {'vertex_count': len(mesh.vertices),
'face_count': len(mesh.faces),
'encoding': encoding}
export = Template(header).substitute(header_params).encode('utf-8')
if encoding == 'binary_little_endian':
export += vertex.tostring()
export += faces.tostring()
elif encoding == 'ascii':
# ply format is: (face count, v0, v1, v2)
fstack = np.column_stack((np.ones(len(mesh.faces),
dtype=np.int64) * 3,
mesh.faces))
# if we're exporting vertex normals they get stacked
if vertex_normal:
vstack = np.column_stack((mesh.vertices,
mesh.vertex_normals))
else:
vstack = mesh.vertices
# add the string formatted vertices and faces
export += (util.array_to_string(vstack,
col_delim=' ',
row_delim='\n') +
'\n' +
util.array_to_string(fstack,
col_delim=' ',
row_delim='\n')).encode('utf-8')
else:
raise ValueError('encoding must be ascii or binary!')
return export |
def post(self):
"""token生成流程:
>1. 首先使用用户名和密码去签定身份返回True/False
>2. 如果True接着获取用户可公开数据
>3. 之后生成JWT
具体业务流程自由定义,比如可以保存token到redis中,设置ttl过期时间。
"""
#1.
username = request.form.get("username")
password = request.form.get("password")
exipres = 7200
#expire time(seconds)
if username and password:
_authRes = self._getAuthentication(username, password)
else:
return {"msg": "invalid username or password"}
#2.
if _authRes:
_payload = self._getUserData(username)
#3.
try:
token = self.jwt.createJWT(_payload, expiredSeconds=exipres)
except JWTException:
return {"msg": "Failed to request token"}
else:
return {"token": token}
else:
return {"msg": "Authentication failed"} | token生成流程:
>1. 首先使用用户名和密码去签定身份返回True/False
>2. 如果True接着获取用户可公开数据
>3. 之后生成JWT
具体业务流程自由定义,比如可以保存token到redis中,设置ttl过期时间。 | Below is the the instruction that describes the task:
### Input:
token生成流程:
>1. 首先使用用户名和密码去签定身份返回True/False
>2. 如果True接着获取用户可公开数据
>3. 之后生成JWT
具体业务流程自由定义,比如可以保存token到redis中,设置ttl过期时间。
### Response:
def post(self):
"""token生成流程:
>1. 首先使用用户名和密码去签定身份返回True/False
>2. 如果True接着获取用户可公开数据
>3. 之后生成JWT
具体业务流程自由定义,比如可以保存token到redis中,设置ttl过期时间。
"""
#1.
username = request.form.get("username")
password = request.form.get("password")
exipres = 7200
#expire time(seconds)
if username and password:
_authRes = self._getAuthentication(username, password)
else:
return {"msg": "invalid username or password"}
#2.
if _authRes:
_payload = self._getUserData(username)
#3.
try:
token = self.jwt.createJWT(_payload, expiredSeconds=exipres)
except JWTException:
return {"msg": "Failed to request token"}
else:
return {"token": token}
else:
return {"msg": "Authentication failed"} |
def wait_for_master_to_start(single_master):
'''
Wait for a nomad master to start
'''
i = 0
while True:
try:
r = requests.get("http://%s:4646/v1/status/leader" % single_master)
if r.status_code == 200:
break
except:
Log.debug(sys.exc_info()[0])
Log.info("Waiting for cluster to come up... %s" % i)
time.sleep(1)
if i > 10:
Log.error("Failed to start Nomad Cluster!")
sys.exit(-1)
i = i + 1 | Wait for a nomad master to start | Below is the the instruction that describes the task:
### Input:
Wait for a nomad master to start
### Response:
def wait_for_master_to_start(single_master):
'''
Wait for a nomad master to start
'''
i = 0
while True:
try:
r = requests.get("http://%s:4646/v1/status/leader" % single_master)
if r.status_code == 200:
break
except:
Log.debug(sys.exc_info()[0])
Log.info("Waiting for cluster to come up... %s" % i)
time.sleep(1)
if i > 10:
Log.error("Failed to start Nomad Cluster!")
sys.exit(-1)
i = i + 1 |
def save(self, *args, **kwargs):
"""
Updates name
"""
self.name = str(self.company.name) + " --- " + str(self.person)
super(Executive, self).save(*args, **kwargs) | Updates name | Below is the the instruction that describes the task:
### Input:
Updates name
### Response:
def save(self, *args, **kwargs):
"""
Updates name
"""
self.name = str(self.company.name) + " --- " + str(self.person)
super(Executive, self).save(*args, **kwargs) |
def filter_record(self, record):
"""
Filter a single record
"""
quality_scores = record.letter_annotations['phred_quality']
mean_score = mean(quality_scores)
if mean_score >= self.min_mean_score:
return record
else:
raise FailedFilter(mean_score) | Filter a single record | Below is the the instruction that describes the task:
### Input:
Filter a single record
### Response:
def filter_record(self, record):
"""
Filter a single record
"""
quality_scores = record.letter_annotations['phred_quality']
mean_score = mean(quality_scores)
if mean_score >= self.min_mean_score:
return record
else:
raise FailedFilter(mean_score) |
def _get_flatchoices(self):
"""
Redefine standard method.
Return constants themselves instead of their names for right rendering
in admin's 'change_list' view, if field is present in 'list_display'
attribute of model's admin.
"""
return [
(self.to_python(choice), value) for choice, value in self._choices
] | Redefine standard method.
Return constants themselves instead of their names for right rendering
in admin's 'change_list' view, if field is present in 'list_display'
attribute of model's admin. | Below is the the instruction that describes the task:
### Input:
Redefine standard method.
Return constants themselves instead of their names for right rendering
in admin's 'change_list' view, if field is present in 'list_display'
attribute of model's admin.
### Response:
def _get_flatchoices(self):
"""
Redefine standard method.
Return constants themselves instead of their names for right rendering
in admin's 'change_list' view, if field is present in 'list_display'
attribute of model's admin.
"""
return [
(self.to_python(choice), value) for choice, value in self._choices
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.