code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def make_parser(stream_or_string):
"""Create a xml.dom.pulldom parser."""
if isinstance(stream_or_string, six.string_types):
# XXX: the pulldom.parseString() function doesn't seem to
# like operating on unicode strings!
return pulldom.parseString(str(stream_or_string))
else:
return pulldom.parse(stream_or_string) | Create a xml.dom.pulldom parser. | Below is the the instruction that describes the task:
### Input:
Create a xml.dom.pulldom parser.
### Response:
def make_parser(stream_or_string):
"""Create a xml.dom.pulldom parser."""
if isinstance(stream_or_string, six.string_types):
# XXX: the pulldom.parseString() function doesn't seem to
# like operating on unicode strings!
return pulldom.parseString(str(stream_or_string))
else:
return pulldom.parse(stream_or_string) |
def compute_freq(self, csd=False):
"""Compute frequency domain analysis.
Returns
-------
list of dict
each item is a dict where 'data' is an instance of ChanFreq for a
single segment of signal, 'name' is the event type, if applicable,
'times' is a tuple of the start and end times in sec, 'duration' is
the actual duration of the segment, in seconds (can be dissociated
from 'times' if the signal was concatenated)
and with 'chan' (str), 'stage' (str) and 'cycle' (int)
"""
progress = QProgressDialog('Computing frequency', 'Abort',
0, len(self.data) - 1, self)
progress.setWindowModality(Qt.ApplicationModal)
freq = self.frequency
prep = freq['prep'].get_value()
scaling = freq['scaling'].get_value()
log_trans = freq['log_trans'].get_value()
#sides = freq['sides'].get_value()
taper = freq['taper'].get_value()
halfbandwidth = freq['hbw'].get_value()
NW = freq['nhbw_val'].get_value()
duration = freq['duration'].get_value()
overlap = freq['overlap_val'].value()
step = freq['step_val'].get_value()
centend = freq['centend'].get_value()
detrend = freq['detrend'].get_value()
norm = freq['norm'].get_value()
norm_concat = freq['norm_concat'].get_value()
if csd:
output = 'csd'
elif freq['spectrald'].isChecked():
output = 'spectraldensity'
else:
output = 'complex'
sides = 'one'
#if sides == 1:
# sides = 'one'
#elif sides == 2:
# sides = 'two'
if freq['overlap'].isChecked():
step = None
else:
overlap = None
if NW == 0 or not freq['nhbw'].get_value():
NW = None
if duration == 0 or not freq['welch_on'].get_value():
duration = None
if step == 0:
step = None
if detrend == 'none':
detrend = None
if freq['nfft_fixed'].isChecked():
n_fft = int(freq['nfft_fixed_val'].get_value())
elif freq['nfft_zeropad'].isChecked():
n_fft = max([x['data'].number_of('time')[0] for x in self.data])
lg.info('n_fft is zero-padded to: ' + str(n_fft))
elif freq['nfft_seg'].isChecked():
n_fft = None
# Normalization data preparation
if norm not in ['none', 'by integral of each segment']:
norm_evt_type = None
norm_stage = None
norm_chan = None
ncat = (0, 0, 0, 0)
if norm == 'by mean of event type(s)':
norm_chan = [x + ' (' + self.idx_group.currentText() + ''
')'for x in self.one_grp['chan_to_plot']]
norm_evt_type = [x.text() for x in \
freq['norm_evt_type'].selectedItems()]
if norm == 'by mean of stage(s)':
norm_stage = [x.text() for x in \
freq['norm_stage'].selectedItems()]
if norm_concat:
ncat = (1, 1, 1, 1)
lg.info(' '.join(['Getting segments for norm. cat: ', str(ncat),
'evt_type', str(norm_evt_type), 'stage',
str(norm_stage), 'chan', str(norm_chan)]))
norm_seg = fetch(self.parent.info.dataset,
self.parent.notes.annot, ncat,
evt_type=norm_evt_type, stage=norm_stage,
chan_full=norm_chan)
if not norm_seg.segments:
msg = 'No valid normalization signal found.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
progress.cancel()
return
norm_seg.read_data(self.chan, ref_chan=self.one_grp['ref_chan'],
grp_name=self.one_grp['name'], parent=None)
if prep:
norm_seg = self.transform_data(norm_seg)
all_Sxx = []
for seg in norm_seg:
dat = seg['data']
if prep:
dat = seg['trans_data']
try:
Sxx = frequency(dat, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except ValueError:
msg = ('Value error encountered in frequency '
'transformation for normalization reference data.'
'\nIf using time-averaging, make sure the '
'normalization data segments are at least as long '
'as the time window.')
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error transforming data')
error_dialog.showMessage(msg)
progress.cancel()
return
all_Sxx.append(Sxx)
nSxx = ChanFreq()
nSxx.s_freq = Sxx.s_freq
nSxx.axis['freq'] = Sxx.axis['freq']
nSxx.axis['chan'] = Sxx.axis['chan']
nSxx.data = empty(1, dtype='O')
nSxx.data[0] = empty((Sxx.number_of('chan')[0],
Sxx.number_of('freq')[0]), dtype='f')
nSxx.data[0] = mean(
stack([x()[0] for x in all_Sxx], axis=2), axis=2)
# end of normalization data prep
lg.info(' '.join(['Freq settings:', output, scaling, 'sides:',
str(sides), taper, 'hbw:', str(halfbandwidth), 'NW:',
str(NW), 'dur:', str(duration), 'overlap:',
str(overlap), 'step:', str(step), 'detrend:',
str(detrend), 'n_fft:', str(n_fft), 'norm',
str(norm), 'log:', str(log_trans), 'central tendency',
str(centend)]))
# Main frequency analysis
xfreq = []
for i, seg in enumerate(self.data):
new_seg = dict(seg)
data = seg['data']
if prep:
data = seg['trans_data']
timeline = seg['data'].axis['time'][0]
new_seg['start'] = timeline[0]
new_seg['end'] = timeline[-1]
new_seg['duration'] = len(timeline) / data.s_freq
try:
Sxx = frequency(data, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except SyntaxError:
msg = 'Value error encountered in frequency transformation.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error transforming data')
error_dialog.showMessage(msg)
progress.cancel()
return
if norm != 'none':
for j, chan in enumerate(Sxx.axis['chan'][0]):
dat = Sxx.data[0][j,:]
sf = Sxx.axis['freq'][0]
f_res = sf[1] - sf[0] # frequency resolution
if norm == 'by integral of each segment':
norm_dat = sum(dat) * f_res # integral by midpoint rule
else:
norm_dat = nSxx(chan=chan)[0]
Sxx.data[0][j,:] = dat / norm_dat
new_seg['data'] = Sxx
xfreq.append(new_seg)
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
self.parent.statusBar().showMessage(msg)
return
progress.close()
return xfreq | Compute frequency domain analysis.
Returns
-------
list of dict
each item is a dict where 'data' is an instance of ChanFreq for a
single segment of signal, 'name' is the event type, if applicable,
'times' is a tuple of the start and end times in sec, 'duration' is
the actual duration of the segment, in seconds (can be dissociated
from 'times' if the signal was concatenated)
and with 'chan' (str), 'stage' (str) and 'cycle' (int) | Below is the the instruction that describes the task:
### Input:
Compute frequency domain analysis.
Returns
-------
list of dict
each item is a dict where 'data' is an instance of ChanFreq for a
single segment of signal, 'name' is the event type, if applicable,
'times' is a tuple of the start and end times in sec, 'duration' is
the actual duration of the segment, in seconds (can be dissociated
from 'times' if the signal was concatenated)
and with 'chan' (str), 'stage' (str) and 'cycle' (int)
### Response:
def compute_freq(self, csd=False):
"""Compute frequency domain analysis.
Returns
-------
list of dict
each item is a dict where 'data' is an instance of ChanFreq for a
single segment of signal, 'name' is the event type, if applicable,
'times' is a tuple of the start and end times in sec, 'duration' is
the actual duration of the segment, in seconds (can be dissociated
from 'times' if the signal was concatenated)
and with 'chan' (str), 'stage' (str) and 'cycle' (int)
"""
progress = QProgressDialog('Computing frequency', 'Abort',
0, len(self.data) - 1, self)
progress.setWindowModality(Qt.ApplicationModal)
freq = self.frequency
prep = freq['prep'].get_value()
scaling = freq['scaling'].get_value()
log_trans = freq['log_trans'].get_value()
#sides = freq['sides'].get_value()
taper = freq['taper'].get_value()
halfbandwidth = freq['hbw'].get_value()
NW = freq['nhbw_val'].get_value()
duration = freq['duration'].get_value()
overlap = freq['overlap_val'].value()
step = freq['step_val'].get_value()
centend = freq['centend'].get_value()
detrend = freq['detrend'].get_value()
norm = freq['norm'].get_value()
norm_concat = freq['norm_concat'].get_value()
if csd:
output = 'csd'
elif freq['spectrald'].isChecked():
output = 'spectraldensity'
else:
output = 'complex'
sides = 'one'
#if sides == 1:
# sides = 'one'
#elif sides == 2:
# sides = 'two'
if freq['overlap'].isChecked():
step = None
else:
overlap = None
if NW == 0 or not freq['nhbw'].get_value():
NW = None
if duration == 0 or not freq['welch_on'].get_value():
duration = None
if step == 0:
step = None
if detrend == 'none':
detrend = None
if freq['nfft_fixed'].isChecked():
n_fft = int(freq['nfft_fixed_val'].get_value())
elif freq['nfft_zeropad'].isChecked():
n_fft = max([x['data'].number_of('time')[0] for x in self.data])
lg.info('n_fft is zero-padded to: ' + str(n_fft))
elif freq['nfft_seg'].isChecked():
n_fft = None
# Normalization data preparation
if norm not in ['none', 'by integral of each segment']:
norm_evt_type = None
norm_stage = None
norm_chan = None
ncat = (0, 0, 0, 0)
if norm == 'by mean of event type(s)':
norm_chan = [x + ' (' + self.idx_group.currentText() + ''
')'for x in self.one_grp['chan_to_plot']]
norm_evt_type = [x.text() for x in \
freq['norm_evt_type'].selectedItems()]
if norm == 'by mean of stage(s)':
norm_stage = [x.text() for x in \
freq['norm_stage'].selectedItems()]
if norm_concat:
ncat = (1, 1, 1, 1)
lg.info(' '.join(['Getting segments for norm. cat: ', str(ncat),
'evt_type', str(norm_evt_type), 'stage',
str(norm_stage), 'chan', str(norm_chan)]))
norm_seg = fetch(self.parent.info.dataset,
self.parent.notes.annot, ncat,
evt_type=norm_evt_type, stage=norm_stage,
chan_full=norm_chan)
if not norm_seg.segments:
msg = 'No valid normalization signal found.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error fetching data')
error_dialog.showMessage(msg)
progress.cancel()
return
norm_seg.read_data(self.chan, ref_chan=self.one_grp['ref_chan'],
grp_name=self.one_grp['name'], parent=None)
if prep:
norm_seg = self.transform_data(norm_seg)
all_Sxx = []
for seg in norm_seg:
dat = seg['data']
if prep:
dat = seg['trans_data']
try:
Sxx = frequency(dat, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except ValueError:
msg = ('Value error encountered in frequency '
'transformation for normalization reference data.'
'\nIf using time-averaging, make sure the '
'normalization data segments are at least as long '
'as the time window.')
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error transforming data')
error_dialog.showMessage(msg)
progress.cancel()
return
all_Sxx.append(Sxx)
nSxx = ChanFreq()
nSxx.s_freq = Sxx.s_freq
nSxx.axis['freq'] = Sxx.axis['freq']
nSxx.axis['chan'] = Sxx.axis['chan']
nSxx.data = empty(1, dtype='O')
nSxx.data[0] = empty((Sxx.number_of('chan')[0],
Sxx.number_of('freq')[0]), dtype='f')
nSxx.data[0] = mean(
stack([x()[0] for x in all_Sxx], axis=2), axis=2)
# end of normalization data prep
lg.info(' '.join(['Freq settings:', output, scaling, 'sides:',
str(sides), taper, 'hbw:', str(halfbandwidth), 'NW:',
str(NW), 'dur:', str(duration), 'overlap:',
str(overlap), 'step:', str(step), 'detrend:',
str(detrend), 'n_fft:', str(n_fft), 'norm',
str(norm), 'log:', str(log_trans), 'central tendency',
str(centend)]))
# Main frequency analysis
xfreq = []
for i, seg in enumerate(self.data):
new_seg = dict(seg)
data = seg['data']
if prep:
data = seg['trans_data']
timeline = seg['data'].axis['time'][0]
new_seg['start'] = timeline[0]
new_seg['end'] = timeline[-1]
new_seg['duration'] = len(timeline) / data.s_freq
try:
Sxx = frequency(data, output=output, scaling=scaling,
sides=sides, taper=taper,
halfbandwidth=halfbandwidth, NW=NW,
duration=duration, overlap=overlap, step=step,
detrend=detrend, n_fft=n_fft,
log_trans=log_trans, centend=centend)
except SyntaxError:
msg = 'Value error encountered in frequency transformation.'
error_dialog = QErrorMessage(self)
error_dialog.setWindowTitle('Error transforming data')
error_dialog.showMessage(msg)
progress.cancel()
return
if norm != 'none':
for j, chan in enumerate(Sxx.axis['chan'][0]):
dat = Sxx.data[0][j,:]
sf = Sxx.axis['freq'][0]
f_res = sf[1] - sf[0] # frequency resolution
if norm == 'by integral of each segment':
norm_dat = sum(dat) * f_res # integral by midpoint rule
else:
norm_dat = nSxx(chan=chan)[0]
Sxx.data[0][j,:] = dat / norm_dat
new_seg['data'] = Sxx
xfreq.append(new_seg)
progress.setValue(i)
if progress.wasCanceled():
msg = 'Analysis canceled by user.'
self.parent.statusBar().showMessage(msg)
return
progress.close()
return xfreq |
def query(self, s):
"""Query the search index for sets similar to the query set.
Args:
s (Iterable): the query set.
Returns (list): a list of tuples `(index, similarity)` where the index
is the index of the matching sets in the original list of sets.
"""
s1 = np.sort([self.order[token] for token in s if token in self.order])
logging.debug("{} original tokens and {} tokens after applying "
"frequency order.".format(len(s), len(s1)))
prefix = self._get_prefix(s1)
candidates = set([i for p1, token in enumerate(prefix)
for i, p2 in self.index[token]
if self.position_filter_func(s1, self.sets[i], p1, p2,
self.similarity_threshold)])
logging.debug("{} candidates found.".format(len(candidates)))
results = deque([])
for i in candidates:
s2 = self.sets[i]
sim = self.similarity_func(s1, s2)
if sim < self.similarity_threshold:
continue
results.append((i, sim))
logging.debug("{} verified sets found.".format(len(results)))
return list(results) | Query the search index for sets similar to the query set.
Args:
s (Iterable): the query set.
Returns (list): a list of tuples `(index, similarity)` where the index
is the index of the matching sets in the original list of sets. | Below is the the instruction that describes the task:
### Input:
Query the search index for sets similar to the query set.
Args:
s (Iterable): the query set.
Returns (list): a list of tuples `(index, similarity)` where the index
is the index of the matching sets in the original list of sets.
### Response:
def query(self, s):
"""Query the search index for sets similar to the query set.
Args:
s (Iterable): the query set.
Returns (list): a list of tuples `(index, similarity)` where the index
is the index of the matching sets in the original list of sets.
"""
s1 = np.sort([self.order[token] for token in s if token in self.order])
logging.debug("{} original tokens and {} tokens after applying "
"frequency order.".format(len(s), len(s1)))
prefix = self._get_prefix(s1)
candidates = set([i for p1, token in enumerate(prefix)
for i, p2 in self.index[token]
if self.position_filter_func(s1, self.sets[i], p1, p2,
self.similarity_threshold)])
logging.debug("{} candidates found.".format(len(candidates)))
results = deque([])
for i in candidates:
s2 = self.sets[i]
sim = self.similarity_func(s1, s2)
if sim < self.similarity_threshold:
continue
results.append((i, sim))
logging.debug("{} verified sets found.".format(len(results)))
return list(results) |
def parse_timezone(matches, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description) | Parses ISO 8601 time zone specs into tzinfo offsets | Below is the the instruction that describes the task:
### Input:
Parses ISO 8601 time zone specs into tzinfo offsets
### Response:
def parse_timezone(matches, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description) |
def PyParseRangeCheck(lower_bound, upper_bound):
"""Verify that a number is within a defined range.
This is a callback method for pyparsing setParseAction
that verifies that a read number is within a certain range.
To use this method it needs to be defined as a callback method
in setParseAction with the upper and lower bound set as parameters.
Args:
lower_bound (int): lower bound of the range.
upper_bound (int): upper bound of the range.
Returns:
Function: callback method that can be used by pyparsing setParseAction.
"""
# pylint: disable=unused-argument
def CheckRange(string, location, tokens):
"""Parse the arguments.
Args:
string (str): original string.
location (int): location in the string where the match was made
tokens (list[str]): tokens.
"""
try:
check_number = tokens[0]
except IndexError:
check_number = -1
if check_number < lower_bound:
raise pyparsing.ParseException(
'Value: {0:d} precedes lower bound: {1:d}'.format(
check_number, lower_bound))
if check_number > upper_bound:
raise pyparsing.ParseException(
'Value: {0:d} exceeds upper bound: {1:d}'.format(
check_number, upper_bound))
# Since callback methods for pyparsing need to accept certain parameters
# and there is no way to define conditions, like upper and lower bounds
# we need to return here a method that accepts those pyparsing parameters.
return CheckRange | Verify that a number is within a defined range.
This is a callback method for pyparsing setParseAction
that verifies that a read number is within a certain range.
To use this method it needs to be defined as a callback method
in setParseAction with the upper and lower bound set as parameters.
Args:
lower_bound (int): lower bound of the range.
upper_bound (int): upper bound of the range.
Returns:
Function: callback method that can be used by pyparsing setParseAction. | Below is the the instruction that describes the task:
### Input:
Verify that a number is within a defined range.
This is a callback method for pyparsing setParseAction
that verifies that a read number is within a certain range.
To use this method it needs to be defined as a callback method
in setParseAction with the upper and lower bound set as parameters.
Args:
lower_bound (int): lower bound of the range.
upper_bound (int): upper bound of the range.
Returns:
Function: callback method that can be used by pyparsing setParseAction.
### Response:
def PyParseRangeCheck(lower_bound, upper_bound):
"""Verify that a number is within a defined range.
This is a callback method for pyparsing setParseAction
that verifies that a read number is within a certain range.
To use this method it needs to be defined as a callback method
in setParseAction with the upper and lower bound set as parameters.
Args:
lower_bound (int): lower bound of the range.
upper_bound (int): upper bound of the range.
Returns:
Function: callback method that can be used by pyparsing setParseAction.
"""
# pylint: disable=unused-argument
def CheckRange(string, location, tokens):
"""Parse the arguments.
Args:
string (str): original string.
location (int): location in the string where the match was made
tokens (list[str]): tokens.
"""
try:
check_number = tokens[0]
except IndexError:
check_number = -1
if check_number < lower_bound:
raise pyparsing.ParseException(
'Value: {0:d} precedes lower bound: {1:d}'.format(
check_number, lower_bound))
if check_number > upper_bound:
raise pyparsing.ParseException(
'Value: {0:d} exceeds upper bound: {1:d}'.format(
check_number, upper_bound))
# Since callback methods for pyparsing need to accept certain parameters
# and there is no way to define conditions, like upper and lower bounds
# we need to return here a method that accepts those pyparsing parameters.
return CheckRange |
def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bes, reads = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.)) * 1000
# Attract reads
samfile, logfile = align([bes, reads, "--reorder", "--mapped",
"--firstN={0}".format(opts.firstN)])
samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped))
pf = mapped.split(".")[0]
pf = pf.split("-")[0]
bespf = bes.split(".")[0]
reads = pf + ".expand.fastq"
first([str(nreads), mapped, "-o", reads])
# Perform mini-assembly
fastafile = reads.rsplit(".", 1)[0] + ".fasta"
qualfile = ""
if need_update(reads, fastafile):
fastafile, qualfile = fasta([reads])
contigs = op.join(pf, "454LargeContigs.fna")
if need_update(fastafile, contigs):
cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile)
sh(cmd)
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for query, b in Blast(blastfile).iter_best_hit():
mapping[query] = b
f = Fasta(contigs, lazy=True)
annotatedfasta = ".".join((pf, bespf, "fasta"))
fw = open(annotatedfasta, "w")
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for key, v in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = "_".join((pf, vid, subject))
rec.description = ""
recs.append((keys.index(subject), rid, rec))
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, "fasta")
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug("Annotated seqs (n={0}) written to `{1}`.".\
format(len(recs), annotatedfasta))
return annotatedfasta | %prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences. | Below is the the instruction that describes the task:
### Input:
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
### Response:
def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bes, reads = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.)) * 1000
# Attract reads
samfile, logfile = align([bes, reads, "--reorder", "--mapped",
"--firstN={0}".format(opts.firstN)])
samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped))
pf = mapped.split(".")[0]
pf = pf.split("-")[0]
bespf = bes.split(".")[0]
reads = pf + ".expand.fastq"
first([str(nreads), mapped, "-o", reads])
# Perform mini-assembly
fastafile = reads.rsplit(".", 1)[0] + ".fasta"
qualfile = ""
if need_update(reads, fastafile):
fastafile, qualfile = fasta([reads])
contigs = op.join(pf, "454LargeContigs.fna")
if need_update(fastafile, contigs):
cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile)
sh(cmd)
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for query, b in Blast(blastfile).iter_best_hit():
mapping[query] = b
f = Fasta(contigs, lazy=True)
annotatedfasta = ".".join((pf, bespf, "fasta"))
fw = open(annotatedfasta, "w")
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for key, v in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = "_".join((pf, vid, subject))
rec.description = ""
recs.append((keys.index(subject), rid, rec))
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, "fasta")
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug("Annotated seqs (n={0}) written to `{1}`.".\
format(len(recs), annotatedfasta))
return annotatedfasta |
def sha1(self):
"""SHA1 hash of the config file itself."""
with open(self.path, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest() | SHA1 hash of the config file itself. | Below is the the instruction that describes the task:
### Input:
SHA1 hash of the config file itself.
### Response:
def sha1(self):
"""SHA1 hash of the config file itself."""
with open(self.path, 'rb') as f:
return hashlib.sha1(f.read()).hexdigest() |
def set_management_icmp(enabled=True, deploy=False):
'''
Enables or disables the ICMP management service on the device.
CLI Example:
Args:
enabled (bool): If true the service will be enabled. If false the service will be disabled.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_management_icmp
salt '*' panos.set_management_icmp enabled=False deploy=True
'''
if enabled is True:
value = "no"
elif enabled is False:
value = "yes"
else:
raise CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
'element': '<disable-icmp>{0}</disable-icmp>'.format(value)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret | Enables or disables the ICMP management service on the device.
CLI Example:
Args:
enabled (bool): If true the service will be enabled. If false the service will be disabled.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_management_icmp
salt '*' panos.set_management_icmp enabled=False deploy=True | Below is the the instruction that describes the task:
### Input:
Enables or disables the ICMP management service on the device.
CLI Example:
Args:
enabled (bool): If true the service will be enabled. If false the service will be disabled.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_management_icmp
salt '*' panos.set_management_icmp enabled=False deploy=True
### Response:
def set_management_icmp(enabled=True, deploy=False):
'''
Enables or disables the ICMP management service on the device.
CLI Example:
Args:
enabled (bool): If true the service will be enabled. If false the service will be disabled.
deploy (bool): If true then commit the full candidate configuration, if false only set pending change.
.. code-block:: bash
salt '*' panos.set_management_icmp
salt '*' panos.set_management_icmp enabled=False deploy=True
'''
if enabled is True:
value = "no"
elif enabled is False:
value = "yes"
else:
raise CommandExecutionError("Invalid option provided for service enabled option.")
ret = {}
query = {'type': 'config',
'action': 'set',
'xpath': '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/service',
'element': '<disable-icmp>{0}</disable-icmp>'.format(value)}
ret.update(__proxy__['panos.call'](query))
if deploy is True:
ret.update(commit())
return ret |
def tacacs_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def tacacs_server_host_key(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
tacacs_server = ET.SubElement(config, "tacacs-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(tacacs_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
key = ET.SubElement(host, "key")
key.text = kwargs.pop('key')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def supprime(cls,table, **kwargs):
""" Remove entries matchin given condition
kwargs is a dict of column name : value , with length ONE.
"""
assert len(kwargs) == 1
field, value = kwargs.popitem()
req = f"""DELETE FROM {table} WHERE {field} = """ + cls.mark_style
args = (value,)
return MonoExecutant((req, args)) | Remove entries matchin given condition
kwargs is a dict of column name : value , with length ONE. | Below is the the instruction that describes the task:
### Input:
Remove entries matchin given condition
kwargs is a dict of column name : value , with length ONE.
### Response:
def supprime(cls,table, **kwargs):
""" Remove entries matchin given condition
kwargs is a dict of column name : value , with length ONE.
"""
assert len(kwargs) == 1
field, value = kwargs.popitem()
req = f"""DELETE FROM {table} WHERE {field} = """ + cls.mark_style
args = (value,)
return MonoExecutant((req, args)) |
def _set_limit(self, v, load=False):
"""
Setter method for limit, mapped from YANG variable /hardware/profile/tcam/limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_limit() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=limit.limit, is_container='container', presence=False, yang_name="limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set upper limit to applications on TCAM entry usage', u'display-when': u"../predefined/tcam_profiletype = 'npb-optimised-1'", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """limit must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=limit.limit, is_container='container', presence=False, yang_name="limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set upper limit to applications on TCAM entry usage', u'display-when': u"../predefined/tcam_profiletype = 'npb-optimised-1'", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""",
})
self.__limit = t
if hasattr(self, '_set'):
self._set() | Setter method for limit, mapped from YANG variable /hardware/profile/tcam/limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_limit() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for limit, mapped from YANG variable /hardware/profile/tcam/limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_limit() directly.
### Response:
def _set_limit(self, v, load=False):
"""
Setter method for limit, mapped from YANG variable /hardware/profile/tcam/limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_limit() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=limit.limit, is_container='container', presence=False, yang_name="limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set upper limit to applications on TCAM entry usage', u'display-when': u"../predefined/tcam_profiletype = 'npb-optimised-1'", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """limit must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=limit.limit, is_container='container', presence=False, yang_name="limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set upper limit to applications on TCAM entry usage', u'display-when': u"../predefined/tcam_profiletype = 'npb-optimised-1'", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""",
})
self.__limit = t
if hasattr(self, '_set'):
self._set() |
def skip(self, other):
'''(<<) Ends with a specified parser, and at the end parser consumed the
end flag.'''
@Parser
def ends_with_parser(text, index):
res = self(text, index)
if not res.status:
return res
end = other(text, res.index)
if end.status:
return Value.success(end.index, res.value)
else:
return Value.failure(end.index, 'ends with {}'.format(end.expected))
return ends_with_parser | (<<) Ends with a specified parser, and at the end parser consumed the
end flag. | Below is the the instruction that describes the task:
### Input:
(<<) Ends with a specified parser, and at the end parser consumed the
end flag.
### Response:
def skip(self, other):
'''(<<) Ends with a specified parser, and at the end parser consumed the
end flag.'''
@Parser
def ends_with_parser(text, index):
res = self(text, index)
if not res.status:
return res
end = other(text, res.index)
if end.status:
return Value.success(end.index, res.value)
else:
return Value.failure(end.index, 'ends with {}'.format(end.expected))
return ends_with_parser |
def chunk_by(n, iterable, fillvalue=None):
"""
Iterate over a given ``iterable`` by ``n`` elements at a time.
>>> for x, y in chunk_by(2, [1, 2, 3, 4, 5]):
... # iteration no 1: x=1, y=2
... # iteration no 2: x=3, y=4
... # iteration no 3: x=5, y=None
:param n: (int) a chunk size number
:param iterable: (iterator) an input iterator
:param fillvalue: (any) a value to be used to fit chunk size if there
not enough values in input iterator
:returns: (iterator) an output iterator that iterates over the input
one by chunks of size ``n``
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue) | Iterate over a given ``iterable`` by ``n`` elements at a time.
>>> for x, y in chunk_by(2, [1, 2, 3, 4, 5]):
... # iteration no 1: x=1, y=2
... # iteration no 2: x=3, y=4
... # iteration no 3: x=5, y=None
:param n: (int) a chunk size number
:param iterable: (iterator) an input iterator
:param fillvalue: (any) a value to be used to fit chunk size if there
not enough values in input iterator
:returns: (iterator) an output iterator that iterates over the input
one by chunks of size ``n`` | Below is the the instruction that describes the task:
### Input:
Iterate over a given ``iterable`` by ``n`` elements at a time.
>>> for x, y in chunk_by(2, [1, 2, 3, 4, 5]):
... # iteration no 1: x=1, y=2
... # iteration no 2: x=3, y=4
... # iteration no 3: x=5, y=None
:param n: (int) a chunk size number
:param iterable: (iterator) an input iterator
:param fillvalue: (any) a value to be used to fit chunk size if there
not enough values in input iterator
:returns: (iterator) an output iterator that iterates over the input
one by chunks of size ``n``
### Response:
def chunk_by(n, iterable, fillvalue=None):
"""
Iterate over a given ``iterable`` by ``n`` elements at a time.
>>> for x, y in chunk_by(2, [1, 2, 3, 4, 5]):
... # iteration no 1: x=1, y=2
... # iteration no 2: x=3, y=4
... # iteration no 3: x=5, y=None
:param n: (int) a chunk size number
:param iterable: (iterator) an input iterator
:param fillvalue: (any) a value to be used to fit chunk size if there
not enough values in input iterator
:returns: (iterator) an output iterator that iterates over the input
one by chunks of size ``n``
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue) |
def gettrans(t):
"""
Return a trans object
Parameters
----------
t : str | callable | type | trans
name of transformation function
Returns
-------
out : trans
"""
obj = t
# Make sure trans object is instantiated
if isinstance(obj, str):
name = '{}_trans'.format(obj)
obj = globals()[name]()
if callable(obj):
obj = obj()
if isinstance(obj, type):
obj = obj()
if not isinstance(obj, trans):
raise ValueError("Could not get transform object.")
return obj | Return a trans object
Parameters
----------
t : str | callable | type | trans
name of transformation function
Returns
-------
out : trans | Below is the the instruction that describes the task:
### Input:
Return a trans object
Parameters
----------
t : str | callable | type | trans
name of transformation function
Returns
-------
out : trans
### Response:
def gettrans(t):
"""
Return a trans object
Parameters
----------
t : str | callable | type | trans
name of transformation function
Returns
-------
out : trans
"""
obj = t
# Make sure trans object is instantiated
if isinstance(obj, str):
name = '{}_trans'.format(obj)
obj = globals()[name]()
if callable(obj):
obj = obj()
if isinstance(obj, type):
obj = obj()
if not isinstance(obj, trans):
raise ValueError("Could not get transform object.")
return obj |
def auto(name):
'''
.. versionadded:: 0.17.0
Instruct alternatives to use the highest priority
path for <name>
name
is the master name for this link group
(e.g. pager)
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
display = __salt__['alternatives.display'](name)
line = display.splitlines()[0]
if line.endswith(' auto mode'):
ret['comment'] = '{0} already in auto mode'.format(name)
return ret
if __opts__['test']:
ret['comment'] = '{0} will be put in auto mode'.format(name)
ret['result'] = None
return ret
ret['changes']['result'] = __salt__['alternatives.auto'](name)
return ret | .. versionadded:: 0.17.0
Instruct alternatives to use the highest priority
path for <name>
name
is the master name for this link group
(e.g. pager) | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 0.17.0
Instruct alternatives to use the highest priority
path for <name>
name
is the master name for this link group
(e.g. pager)
### Response:
def auto(name):
'''
.. versionadded:: 0.17.0
Instruct alternatives to use the highest priority
path for <name>
name
is the master name for this link group
(e.g. pager)
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}}
display = __salt__['alternatives.display'](name)
line = display.splitlines()[0]
if line.endswith(' auto mode'):
ret['comment'] = '{0} already in auto mode'.format(name)
return ret
if __opts__['test']:
ret['comment'] = '{0} will be put in auto mode'.format(name)
ret['result'] = None
return ret
ret['changes']['result'] = __salt__['alternatives.auto'](name)
return ret |
def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) | Downloads competition file to output_dir. | Below is the the instruction that describes the task:
### Input:
Downloads competition file to output_dir.
### Response:
def download_file(self, fname, output_dir):
"""Downloads competition file to output_dir."""
if fname not in self.competition_files: # pylint: disable=unsupported-membership-test
raise ValueError("%s is not one of the competition's "
"files: %s" % (fname, self.competition_files))
command = [
"kaggle",
"competitions",
"download",
"--file",
fname,
"--path",
output_dir,
"-c",
self._competition_name,
]
_run_kaggle_command(command, self._competition_name)
return os.path.join(output_dir, fname) |
async def get_entity_by_id(self, get_entity_by_id_request):
"""Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts.
"""
response = hangouts_pb2.GetEntityByIdResponse()
await self._pb_request('contacts/getentitybyid',
get_entity_by_id_request, response)
return response | Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts. | Below is the the instruction that describes the task:
### Input:
Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts.
### Response:
async def get_entity_by_id(self, get_entity_by_id_request):
"""Return one or more user entities.
Searching by phone number only finds entities when their phone number
is in your contacts (and not always even then), and can't be used to
find Google Voice contacts.
"""
response = hangouts_pb2.GetEntityByIdResponse()
await self._pb_request('contacts/getentitybyid',
get_entity_by_id_request, response)
return response |
def run_play(play_source, inventory_path=None, roles=None,
extra_vars=None, on_error_continue=False):
"""Run a play.
Args:
pattern_hosts (str): pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
play_source (dict): ansible task
inventory_path (str): inventory to use
extra_vars (dict): extra_vars to use
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
Returns:
List of all the results
"""
# NOTE(msimonin): inventory could be infered from a host list (maybe)
results = []
inventory, variable_manager, loader, options = _load_defaults(
inventory_path=inventory_path,
roles=roles,
extra_vars=extra_vars)
callback = _MyCallback(results)
passwords = {}
tqm = task_queue_manager.TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback=callback)
# create play
play_inst = play.Play().load(play_source,
variable_manager=variable_manager,
loader=loader)
# actually run it
try:
tqm.run(play_inst)
finally:
tqm.cleanup()
# Handling errors
failed_hosts = []
unreachable_hosts = []
for r in results:
if r.status == STATUS_UNREACHABLE:
unreachable_hosts.append(r)
if r.status == STATUS_FAILED:
failed_hosts.append(r)
if len(failed_hosts) > 0:
logger.error("Failed hosts: %s" % failed_hosts)
if not on_error_continue:
raise EnosFailedHostsError(failed_hosts)
if len(unreachable_hosts) > 0:
logger.error("Unreachable hosts: %s" % unreachable_hosts)
if not on_error_continue:
raise EnosUnreachableHostsError(unreachable_hosts)
return results | Run a play.
Args:
pattern_hosts (str): pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
play_source (dict): ansible task
inventory_path (str): inventory to use
extra_vars (dict): extra_vars to use
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
Returns:
List of all the results | Below is the the instruction that describes the task:
### Input:
Run a play.
Args:
pattern_hosts (str): pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
play_source (dict): ansible task
inventory_path (str): inventory to use
extra_vars (dict): extra_vars to use
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
Returns:
List of all the results
### Response:
def run_play(play_source, inventory_path=None, roles=None,
extra_vars=None, on_error_continue=False):
"""Run a play.
Args:
pattern_hosts (str): pattern to describe ansible hosts to target.
see https://docs.ansible.com/ansible/latest/intro_patterns.html
play_source (dict): ansible task
inventory_path (str): inventory to use
extra_vars (dict): extra_vars to use
on_error_continue(bool): Don't throw any exception in case a host is
unreachable or the playbooks run with errors
Raises:
:py:class:`enoslib.errors.EnosFailedHostsError`: if a task returns an
error on a host and ``on_error_continue==False``
:py:class:`enoslib.errors.EnosUnreachableHostsError`: if a host is
unreachable (through ssh) and ``on_error_continue==False``
Returns:
List of all the results
"""
# NOTE(msimonin): inventory could be infered from a host list (maybe)
results = []
inventory, variable_manager, loader, options = _load_defaults(
inventory_path=inventory_path,
roles=roles,
extra_vars=extra_vars)
callback = _MyCallback(results)
passwords = {}
tqm = task_queue_manager.TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
stdout_callback=callback)
# create play
play_inst = play.Play().load(play_source,
variable_manager=variable_manager,
loader=loader)
# actually run it
try:
tqm.run(play_inst)
finally:
tqm.cleanup()
# Handling errors
failed_hosts = []
unreachable_hosts = []
for r in results:
if r.status == STATUS_UNREACHABLE:
unreachable_hosts.append(r)
if r.status == STATUS_FAILED:
failed_hosts.append(r)
if len(failed_hosts) > 0:
logger.error("Failed hosts: %s" % failed_hosts)
if not on_error_continue:
raise EnosFailedHostsError(failed_hosts)
if len(unreachable_hosts) > 0:
logger.error("Unreachable hosts: %s" % unreachable_hosts)
if not on_error_continue:
raise EnosUnreachableHostsError(unreachable_hosts)
return results |
def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):
"""Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2
"""
assert t_b - 1 == t_a
if normal_distr:
# TODO(brews): Test this. Line 946 of Bacon.R.
std = np.sqrt(calibcurve.error ** 2 + w2)
dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)
else:
# TODO(brews): Test this. Line 947 of Bacon.R.
dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))
cal = np.array([calibcurve.calbp.copy(), dens]).T
cal[:, 1] = cal[:, 1] / cal[:, 1].sum()
# "ensure that also very precise dates get a range of probabilities"
cutoff_mask = cal[:, 1] > cutoff
if cutoff_mask.sum() > 5:
out = cal[cutoff_mask, :]
else:
calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)
caly = np.interp(calx, cal[:, 0], cal[:, 1])
out = np.array([calx, caly / caly.sum()]).T
return out | Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2 | Below is the the instruction that describes the task:
### Input:
Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2
### Response:
def d_cal(calibcurve, rcmean, w2, cutoff=0.0001, normal_distr=False, t_a=3, t_b=4):
"""Get calendar date probabilities
Parameters
----------
calibcurve : CalibCurve
Calibration curve.
rcmean : scalar
Reservoir-adjusted age.
w2 : scalar
r'$w^2_j(\theta)$' from pg 461 & 463 of Blaauw and Christen 2011.
cutoff : scalar, optional
Unknown.
normal_distr : Bool, optional
Use normal distribution for date errors. If False, then use Student's t-distribution.
t_a : scalar, optional
Student's t-distribution parameter, a. t_b - 1 must equal t_b.
t_b : scalar, optional
Student's t-distribution parameter, b. t_b - 1 must equal t_b.
#Line 943 of Bacon.R
#cc : calib_curve (3-col format)
#rcmean : det['age'][i] - d_R
#w2 : dat['error'][i]^2 + d_STD**2
"""
assert t_b - 1 == t_a
if normal_distr:
# TODO(brews): Test this. Line 946 of Bacon.R.
std = np.sqrt(calibcurve.error ** 2 + w2)
dens = stats.norm(loc=rcmean, scale=std).pdf(calibcurve.c14age)
else:
# TODO(brews): Test this. Line 947 of Bacon.R.
dens = (t_b + ((rcmean - calibcurve.c14age) ** 2) / (2 * (calibcurve.error ** 2 + w2))) ** (-1 * (t_a + 0.5))
cal = np.array([calibcurve.calbp.copy(), dens]).T
cal[:, 1] = cal[:, 1] / cal[:, 1].sum()
# "ensure that also very precise dates get a range of probabilities"
cutoff_mask = cal[:, 1] > cutoff
if cutoff_mask.sum() > 5:
out = cal[cutoff_mask, :]
else:
calx = np.linspace(cal[:, 0].min(), cal[:, 0].max(), num=50)
caly = np.interp(calx, cal[:, 0], cal[:, 1])
out = np.array([calx, caly / caly.sum()]).T
return out |
def check_basic_battery_status(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.4.1.318.1.1.1.2.1.1.0
MIB Excerpt
The status of the UPS batteries. A batteryLow(3)
value indicates the UPS will be unable to sustain the
current load, and its services will be lost if power is
not restored. The amount of run time in reserve at the
time of low battery can be configured by the
upsAdvConfigLowBatteryRunTime.
Value List
unknown (1)
batteryNormal (2)
batteryLow (3)
batteryInFaultCondition (4)
"""
apc_battery_states = {
'1' : 'unknown',
'2' : 'batteryNormal',
'3' : 'batteryLow',
'4' : 'batteryInFaultCondition'
}
a_state = apc_battery_states.get(the_snmp_value, 'unknown')
if the_snmp_value == '2':
the_helper.add_status(pynag.Plugins.ok)
elif the_snmp_value == '3':
the_helper.add_status(pynag.Plugins.warning)
else:
the_helper.add_status(pynag.Plugins.critical)
the_helper.set_summary("UPS batteries state is {}".format(a_state)) | OID .1.3.6.1.4.1.318.1.1.1.2.1.1.0
MIB Excerpt
The status of the UPS batteries. A batteryLow(3)
value indicates the UPS will be unable to sustain the
current load, and its services will be lost if power is
not restored. The amount of run time in reserve at the
time of low battery can be configured by the
upsAdvConfigLowBatteryRunTime.
Value List
unknown (1)
batteryNormal (2)
batteryLow (3)
batteryInFaultCondition (4) | Below is the the instruction that describes the task:
### Input:
OID .1.3.6.1.4.1.318.1.1.1.2.1.1.0
MIB Excerpt
The status of the UPS batteries. A batteryLow(3)
value indicates the UPS will be unable to sustain the
current load, and its services will be lost if power is
not restored. The amount of run time in reserve at the
time of low battery can be configured by the
upsAdvConfigLowBatteryRunTime.
Value List
unknown (1)
batteryNormal (2)
batteryLow (3)
batteryInFaultCondition (4)
### Response:
def check_basic_battery_status(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.4.1.318.1.1.1.2.1.1.0
MIB Excerpt
The status of the UPS batteries. A batteryLow(3)
value indicates the UPS will be unable to sustain the
current load, and its services will be lost if power is
not restored. The amount of run time in reserve at the
time of low battery can be configured by the
upsAdvConfigLowBatteryRunTime.
Value List
unknown (1)
batteryNormal (2)
batteryLow (3)
batteryInFaultCondition (4)
"""
apc_battery_states = {
'1' : 'unknown',
'2' : 'batteryNormal',
'3' : 'batteryLow',
'4' : 'batteryInFaultCondition'
}
a_state = apc_battery_states.get(the_snmp_value, 'unknown')
if the_snmp_value == '2':
the_helper.add_status(pynag.Plugins.ok)
elif the_snmp_value == '3':
the_helper.add_status(pynag.Plugins.warning)
else:
the_helper.add_status(pynag.Plugins.critical)
the_helper.set_summary("UPS batteries state is {}".format(a_state)) |
def parse(cls, content, is_pyproject=False):
"""
A convenience method for parsing a TOML-serialized configuration.
:param content: a TOML string containing a TidyPy configuration
:type content: str
:param is_pyproject:
whether or not the content is (or resembles) a ``pyproject.toml``
file, where the TidyPy configuration is located within a key named
``tool``.
:type is_pyproject: bool
:rtype: dict
"""
parsed = pytoml.loads(content)
if is_pyproject:
parsed = parsed.get('tool', {})
parsed = parsed.get('tidypy', {})
return parsed | A convenience method for parsing a TOML-serialized configuration.
:param content: a TOML string containing a TidyPy configuration
:type content: str
:param is_pyproject:
whether or not the content is (or resembles) a ``pyproject.toml``
file, where the TidyPy configuration is located within a key named
``tool``.
:type is_pyproject: bool
:rtype: dict | Below is the the instruction that describes the task:
### Input:
A convenience method for parsing a TOML-serialized configuration.
:param content: a TOML string containing a TidyPy configuration
:type content: str
:param is_pyproject:
whether or not the content is (or resembles) a ``pyproject.toml``
file, where the TidyPy configuration is located within a key named
``tool``.
:type is_pyproject: bool
:rtype: dict
### Response:
def parse(cls, content, is_pyproject=False):
"""
A convenience method for parsing a TOML-serialized configuration.
:param content: a TOML string containing a TidyPy configuration
:type content: str
:param is_pyproject:
whether or not the content is (or resembles) a ``pyproject.toml``
file, where the TidyPy configuration is located within a key named
``tool``.
:type is_pyproject: bool
:rtype: dict
"""
parsed = pytoml.loads(content)
if is_pyproject:
parsed = parsed.get('tool', {})
parsed = parsed.get('tidypy', {})
return parsed |
def get_all_matches(self, partial_selector):
"""Returns all values matching `partial_selector` as a list."""
matching_selectors = self.matching_selectors(partial_selector)
return [self._selector_map[selector] for selector in matching_selectors] | Returns all values matching `partial_selector` as a list. | Below is the the instruction that describes the task:
### Input:
Returns all values matching `partial_selector` as a list.
### Response:
def get_all_matches(self, partial_selector):
"""Returns all values matching `partial_selector` as a list."""
matching_selectors = self.matching_selectors(partial_selector)
return [self._selector_map[selector] for selector in matching_selectors] |
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every):
'''adversarial training of the VAE
'''
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#discriminator
h = discriminator1(ndf)
dloss = discriminator2(ndf)
symD1 = h
symD2 = dloss
# ==============data==============
X_train, _ = get_data(data_path, activation)
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True)
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=train_iter.provide_data)
modE.init_params(initializer=mx.init.Normal(0.02))
modE.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods = [modE]
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
})
mods.append(modG)
# =============module D=============
modD1 = mx.mod.Module(symD1, label_names=[], context=ctx)
modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx)
modD = mx.mod.SequentialModule()
modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-3,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modD)
# =============module DL=============
symDL = DiscriminatorLayerLoss()
modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx)
modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here
label_shapes=[('label', (batch_size,nef * 4,4,4))],
inputs_need_grad=True)
modDL.init_params(initializer=mx.init.Normal(0.02))
modDL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
# =============module KL=============
symKL = KLDivergenceLoss()
modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx)
modKL.bind(data_shapes=[('data', (batch_size*2,Z))],
inputs_need_grad=True)
modKL.init_params(initializer=mx.init.Normal(0.02))
modKL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modKL)
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
'''calculating prediction accuracy
'''
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
'''calculating binary cross-entropy loss
'''
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
def kldivergence(label, pred):
'''calculating KL divergence loss
'''
mean, log_var = np.split(pred, 2, axis=0)
var = np.exp(log_var)
KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var)
KLLoss = KLLoss / nElements
return KLLoss
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mE = mx.metric.CustomMetric(kldivergence)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(num_epoch):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
#update discriminator on decoded
modE.forward(batch, is_train=True)
mu, lv, z = modE.get_outputs()
z = z.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 0
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
#modD.update()
gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
lx = [out.copyto(out.context) for out in modD1.get_outputs()]
modD.backward()
for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
#update generator
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
DLloss = modDL.get_outputs()
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
#update encoder
nElements = batch_size
modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True)
KLloss = modKL.get_outputs()
modKL.backward()
gradKLLoss = modKL.get_input_grads()
diffG = modG.get_input_grads()
diffG = diffG[0].reshape((batch_size, Z))
modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG])
modE.update()
pred = mx.ndarray.concat(mu,lv, dim=0)
mE.update([pred], [pred])
if mon is not None:
mon.toc_print()
t += 1
if t % show_after_every == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy())
mACC.reset()
mG.reset()
mD.reset()
mE.reset()
if epoch % visualize_after_every == 0:
visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation)
visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation)
if check_point and epoch % save_after_every == 0:
print('Saving...')
modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch))
modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch))
modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch)) | adversarial training of the VAE | Below is the the instruction that describes the task:
### Input:
adversarial training of the VAE
### Response:
def train(dataset, nef, ndf, ngf, nc, batch_size, Z, lr, beta1, epsilon, ctx, check_point, g_dl_weight, output_path, checkpoint_path, data_path, activation,num_epoch, save_after_every, visualize_after_every, show_after_every):
'''adversarial training of the VAE
'''
#encoder
z_mu, z_lv, z = encoder(nef, Z, batch_size)
symE = mx.sym.Group([z_mu, z_lv, z])
#generator
symG = generator(ngf, nc, no_bias=True, fix_gamma=True, eps=1e-5 + 1e-12, z_dim = Z, activation=activation )
#discriminator
h = discriminator1(ndf)
dloss = discriminator2(ndf)
symD1 = h
symD2 = dloss
# ==============data==============
X_train, _ = get_data(data_path, activation)
train_iter = mx.io.NDArrayIter(X_train, batch_size=batch_size, shuffle=True)
rand_iter = RandIter(batch_size, Z)
label = mx.nd.zeros((batch_size,), ctx=ctx)
# =============module E=============
modE = mx.mod.Module(symbol=symE, data_names=('data',), label_names=None, context=ctx)
modE.bind(data_shapes=train_iter.provide_data)
modE.init_params(initializer=mx.init.Normal(0.02))
modE.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods = [modE]
# =============module G=============
modG = mx.mod.Module(symbol=symG, data_names=('rand',), label_names=None, context=ctx)
modG.bind(data_shapes=rand_iter.provide_data, inputs_need_grad=True)
modG.init_params(initializer=mx.init.Normal(0.02))
modG.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-6,
'beta1': beta1,
'epsilon': epsilon,
})
mods.append(modG)
# =============module D=============
modD1 = mx.mod.Module(symD1, label_names=[], context=ctx)
modD2 = mx.mod.Module(symD2, label_names=('label',), context=ctx)
modD = mx.mod.SequentialModule()
modD.add(modD1).add(modD2, take_labels=True, auto_wiring=True)
modD.bind(data_shapes=train_iter.provide_data,
label_shapes=[('label', (batch_size,))],
inputs_need_grad=True)
modD.init_params(initializer=mx.init.Normal(0.02))
modD.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 1e-3,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modD)
# =============module DL=============
symDL = DiscriminatorLayerLoss()
modDL = mx.mod.Module(symbol=symDL, data_names=('data',), label_names=('label',), context=ctx)
modDL.bind(data_shapes=[('data', (batch_size,nef * 4,4,4))], ################################################################################################################################ fix 512 here
label_shapes=[('label', (batch_size,nef * 4,4,4))],
inputs_need_grad=True)
modDL.init_params(initializer=mx.init.Normal(0.02))
modDL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
# =============module KL=============
symKL = KLDivergenceLoss()
modKL = mx.mod.Module(symbol=symKL, data_names=('data',), label_names=None, context=ctx)
modKL.bind(data_shapes=[('data', (batch_size*2,Z))],
inputs_need_grad=True)
modKL.init_params(initializer=mx.init.Normal(0.02))
modKL.init_optimizer(
optimizer='adam',
optimizer_params={
'learning_rate': lr,
'wd': 0.,
'beta1': beta1,
'epsilon': epsilon,
'rescale_grad': (1.0/batch_size)
})
mods.append(modKL)
def norm_stat(d):
return mx.nd.norm(d)/np.sqrt(d.size)
mon = mx.mon.Monitor(10, norm_stat, pattern=".*output|d1_backward_data", sort=True)
mon = None
if mon is not None:
for mod in mods:
pass
def facc(label, pred):
'''calculating prediction accuracy
'''
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
def fentropy(label, pred):
'''calculating binary cross-entropy loss
'''
pred = pred.ravel()
label = label.ravel()
return -(label*np.log(pred+1e-12) + (1.-label)*np.log(1.-pred+1e-12)).mean()
def kldivergence(label, pred):
'''calculating KL divergence loss
'''
mean, log_var = np.split(pred, 2, axis=0)
var = np.exp(log_var)
KLLoss = -0.5 * np.sum(1 + log_var - np.power(mean, 2) - var)
KLLoss = KLLoss / nElements
return KLLoss
mG = mx.metric.CustomMetric(fentropy)
mD = mx.metric.CustomMetric(fentropy)
mE = mx.metric.CustomMetric(kldivergence)
mACC = mx.metric.CustomMetric(facc)
print('Training...')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
# =============train===============
for epoch in range(num_epoch):
train_iter.reset()
for t, batch in enumerate(train_iter):
rbatch = rand_iter.next()
if mon is not None:
mon.tic()
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
# update discriminator on fake
label[:] = 0
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
gradD11 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD12 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
#update discriminator on decoded
modE.forward(batch, is_train=True)
mu, lv, z = modE.get_outputs()
z = z.reshape((batch_size, Z, 1, 1))
sample = mx.io.DataBatch([z], label=None, provide_data = [('rand', (batch_size, Z, 1, 1))])
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 0
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
#modD.update()
gradD21 = [[grad.copyto(grad.context) for grad in grads] for grads in modD1._exec_group.grad_arrays]
gradD22 = [[grad.copyto(grad.context) for grad in grads] for grads in modD2._exec_group.grad_arrays]
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
# update discriminator on real
label[:] = 1
batch.label = [label]
modD.forward(batch, is_train=True)
lx = [out.copyto(out.context) for out in modD1.get_outputs()]
modD.backward()
for gradsr, gradsf, gradsd in zip(modD1._exec_group.grad_arrays, gradD11, gradD21):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
for gradsr, gradsf, gradsd in zip(modD2._exec_group.grad_arrays, gradD12, gradD22):
for gradr, gradf, gradd in zip(gradsr, gradsf, gradsd):
gradr += 0.5 * (gradf + gradd)
modD.update()
modD.update_metric(mD, [label])
modD.update_metric(mACC, [label])
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(rbatch, is_train=True)
outG = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(outG, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG1 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
label[:] = 1
modD.forward(mx.io.DataBatch(xz, [label]), is_train=True)
modD.backward()
diffD = modD1.get_input_grads()
modG.backward(diffD)
gradG2 = [[grad.copyto(grad.context) for grad in grads] for grads in modG._exec_group.grad_arrays]
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
for grads, gradsG1, gradsG2 in zip(modG._exec_group.grad_arrays, gradG1, gradG2):
for grad, gradg1, gradg2 in zip(grads, gradsG1, gradsG2):
grad = g_dl_weight * grad + 0.5 * (gradg1 + gradg2)
modG.update()
mG.update([label], modD.get_outputs())
modG.forward(sample, is_train=True)
xz = modG.get_outputs()
#update generator
modD1.forward(mx.io.DataBatch(xz, []), is_train=True)
outD1 = modD1.get_outputs()
modDL.forward(mx.io.DataBatch(outD1, lx), is_train=True)
DLloss = modDL.get_outputs()
modDL.backward()
dlGrad = modDL.get_input_grads()
modD1.backward(dlGrad)
diffD = modD1.get_input_grads()
modG.backward(diffD)
#update encoder
nElements = batch_size
modKL.forward(mx.io.DataBatch([mx.ndarray.concat(mu,lv, dim=0)]), is_train=True)
KLloss = modKL.get_outputs()
modKL.backward()
gradKLLoss = modKL.get_input_grads()
diffG = modG.get_input_grads()
diffG = diffG[0].reshape((batch_size, Z))
modE.backward(mx.ndarray.split(gradKLLoss[0], num_outputs=2, axis=0) + [diffG])
modE.update()
pred = mx.ndarray.concat(mu,lv, dim=0)
mE.update([pred], [pred])
if mon is not None:
mon.toc_print()
t += 1
if t % show_after_every == 0:
print('epoch:', epoch, 'iter:', t, 'metric:', mACC.get(), mG.get(), mD.get(), mE.get(), KLloss[0].asnumpy(), DLloss[0].asnumpy())
mACC.reset()
mG.reset()
mD.reset()
mE.reset()
if epoch % visualize_after_every == 0:
visual(output_path +'gout'+str(epoch), outG[0].asnumpy(), activation)
visual(output_path + 'data'+str(epoch), batch.data[0].asnumpy(), activation)
if check_point and epoch % save_after_every == 0:
print('Saving...')
modG.save_params(checkpoint_path + '/%s_G-%04d.params'%(dataset, epoch))
modD.save_params(checkpoint_path + '/%s_D-%04d.params'%(dataset, epoch))
modE.save_params(checkpoint_path + '/%s_E-%04d.params'%(dataset, epoch)) |
def _create_consumer(self):
"""Tries to establing the Kafka consumer connection"""
if not self.closed:
try:
self.logger.debug("Creating new kafka consumer using brokers: " +
str(self.settings['KAFKA_HOSTS']) + ' and topic ' +
self.settings['KAFKA_TOPIC_PREFIX'] +
".outbound_firehose")
return KafkaConsumer(
self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose",
group_id=None,
bootstrap_servers=self.settings['KAFKA_HOSTS'],
consumer_timeout_ms=self.settings['KAFKA_CONSUMER_TIMEOUT'],
auto_offset_reset=self.settings['KAFKA_CONSUMER_AUTO_OFFSET_RESET'],
auto_commit_interval_ms=self.settings['KAFKA_CONSUMER_COMMIT_INTERVAL_MS'],
enable_auto_commit=self.settings['KAFKA_CONSUMER_AUTO_COMMIT_ENABLE'],
max_partition_fetch_bytes=self.settings['KAFKA_CONSUMER_FETCH_MESSAGE_MAX_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka consumer for topic",
{'ex': traceback.format_exc()})
raise | Tries to establing the Kafka consumer connection | Below is the the instruction that describes the task:
### Input:
Tries to establing the Kafka consumer connection
### Response:
def _create_consumer(self):
"""Tries to establing the Kafka consumer connection"""
if not self.closed:
try:
self.logger.debug("Creating new kafka consumer using brokers: " +
str(self.settings['KAFKA_HOSTS']) + ' and topic ' +
self.settings['KAFKA_TOPIC_PREFIX'] +
".outbound_firehose")
return KafkaConsumer(
self.settings['KAFKA_TOPIC_PREFIX'] + ".outbound_firehose",
group_id=None,
bootstrap_servers=self.settings['KAFKA_HOSTS'],
consumer_timeout_ms=self.settings['KAFKA_CONSUMER_TIMEOUT'],
auto_offset_reset=self.settings['KAFKA_CONSUMER_AUTO_OFFSET_RESET'],
auto_commit_interval_ms=self.settings['KAFKA_CONSUMER_COMMIT_INTERVAL_MS'],
enable_auto_commit=self.settings['KAFKA_CONSUMER_AUTO_COMMIT_ENABLE'],
max_partition_fetch_bytes=self.settings['KAFKA_CONSUMER_FETCH_MESSAGE_MAX_BYTES'])
except KeyError as e:
self.logger.error('Missing setting named ' + str(e),
{'ex': traceback.format_exc()})
except:
self.logger.error("Couldn't initialize kafka consumer for topic",
{'ex': traceback.format_exc()})
raise |
def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
"""
if algorithm not in self._symmetric_key_algorithms.keys():
raise exceptions.InvalidField(
"The cryptographic algorithm {0} is not a supported symmetric "
"key algorithm.".format(algorithm)
)
cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)
if length not in cryptography_algorithm.key_sizes:
raise exceptions.InvalidField(
"The cryptographic length ({0}) is not valid for "
"the cryptographic algorithm ({1}).".format(
length, algorithm.name
)
)
self.logger.info(
"Generating a {0} symmetric key with length: {1}".format(
algorithm.name, length
)
)
key_bytes = os.urandom(length // 8)
try:
cryptography_algorithm(key_bytes)
except Exception as e:
self.logger.exception(e)
raise exceptions.CryptographicFailure(
"Invalid bytes for the provided cryptographic algorithm.")
return {'value': key_bytes, 'format': enums.KeyFormatType.RAW} | Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256) | Below is the the instruction that describes the task:
### Input:
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
### Response:
def create_symmetric_key(self, algorithm, length):
"""
Create a symmetric key.
Args:
algorithm(CryptographicAlgorithm): An enumeration specifying the
algorithm for which the created key will be compliant.
length(int): The length of the key to be created. This value must
be compliant with the constraints of the provided algorithm.
Returns:
dict: A dictionary containing the key data, with the following
key/value fields:
* value - the bytes of the key
* format - a KeyFormatType enumeration for the bytes format
Raises:
InvalidField: Raised when the algorithm is unsupported or the
length is incompatible with the algorithm.
CryptographicFailure: Raised when the key generation process
fails.
Example:
>>> engine = CryptographyEngine()
>>> key = engine.create_symmetric_key(
... CryptographicAlgorithm.AES, 256)
"""
if algorithm not in self._symmetric_key_algorithms.keys():
raise exceptions.InvalidField(
"The cryptographic algorithm {0} is not a supported symmetric "
"key algorithm.".format(algorithm)
)
cryptography_algorithm = self._symmetric_key_algorithms.get(algorithm)
if length not in cryptography_algorithm.key_sizes:
raise exceptions.InvalidField(
"The cryptographic length ({0}) is not valid for "
"the cryptographic algorithm ({1}).".format(
length, algorithm.name
)
)
self.logger.info(
"Generating a {0} symmetric key with length: {1}".format(
algorithm.name, length
)
)
key_bytes = os.urandom(length // 8)
try:
cryptography_algorithm(key_bytes)
except Exception as e:
self.logger.exception(e)
raise exceptions.CryptographicFailure(
"Invalid bytes for the provided cryptographic algorithm.")
return {'value': key_bytes, 'format': enums.KeyFormatType.RAW} |
def _create_deserializer(self) -> JsonObjectDeserializer:
"""
Creates a deserializer that is to be used by this decoder.
:return: the deserializer
"""
if self._deserializer_cache is None:
deserializer_cls = type(
"%sInternalDeserializer" % type(self),
(JsonObjectDeserializer,),
{
"_JSON_ENCODER_ARGS": self._args,
"_JSON_ENCODER_KWARGS": self._kwargs
}
)
self._deserializer_cache = deserializer_cls(self._get_property_mappings(), self._get_deserializable_cls())
return self._deserializer_cache | Creates a deserializer that is to be used by this decoder.
:return: the deserializer | Below is the the instruction that describes the task:
### Input:
Creates a deserializer that is to be used by this decoder.
:return: the deserializer
### Response:
def _create_deserializer(self) -> JsonObjectDeserializer:
"""
Creates a deserializer that is to be used by this decoder.
:return: the deserializer
"""
if self._deserializer_cache is None:
deserializer_cls = type(
"%sInternalDeserializer" % type(self),
(JsonObjectDeserializer,),
{
"_JSON_ENCODER_ARGS": self._args,
"_JSON_ENCODER_KWARGS": self._kwargs
}
)
self._deserializer_cache = deserializer_cls(self._get_property_mappings(), self._get_deserializable_cls())
return self._deserializer_cache |
def as_coeff_unit(self):
"""Factor the coefficient multiplying a unit
For units that are multiplied by a constant dimensionless
coefficient, returns a tuple containing the coefficient and
a new unit object for the unmultiplied unit.
Example
-------
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
>>> unit.as_coeff_unit()
(100.0, m)
"""
coeff, mul = self.expr.as_coeff_Mul()
coeff = float(coeff)
ret = Unit(
mul,
self.base_value / coeff,
self.base_offset,
self.dimensions,
self.registry,
)
return coeff, ret | Factor the coefficient multiplying a unit
For units that are multiplied by a constant dimensionless
coefficient, returns a tuple containing the coefficient and
a new unit object for the unmultiplied unit.
Example
-------
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
>>> unit.as_coeff_unit()
(100.0, m) | Below is the the instruction that describes the task:
### Input:
Factor the coefficient multiplying a unit
For units that are multiplied by a constant dimensionless
coefficient, returns a tuple containing the coefficient and
a new unit object for the unmultiplied unit.
Example
-------
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
>>> unit.as_coeff_unit()
(100.0, m)
### Response:
def as_coeff_unit(self):
"""Factor the coefficient multiplying a unit
For units that are multiplied by a constant dimensionless
coefficient, returns a tuple containing the coefficient and
a new unit object for the unmultiplied unit.
Example
-------
>>> import unyt as u
>>> unit = (u.m**2/u.cm).simplify()
>>> unit
100*m
>>> unit.as_coeff_unit()
(100.0, m)
"""
coeff, mul = self.expr.as_coeff_Mul()
coeff = float(coeff)
ret = Unit(
mul,
self.base_value / coeff,
self.base_offset,
self.dimensions,
self.registry,
)
return coeff, ret |
def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:,"pertubation"] = \
self.parameter_data.parval1 + \
self.parameter_data.increment
self.parameter_data.loc[:,"out_forward"] = \
self.parameter_data.loc[:,"pertubation"] > \
self.parameter_data.loc[:,"parubnd"]
out_forward = self.parameter_data.groupby("out_forward").groups
if True in out_forward:
self.parameter_data.loc[out_forward[True],"pertubation"] = \
self.parameter_data.loc[out_forward[True],"parval1"] - \
self.parameter_data.loc[out_forward[True],"increment"]
self.parameter_data.loc[:,"out_back"] = \
self.parameter_data.loc[:,"pertubation"] < \
self.parameter_data.loc[:,"parlbnd"]
out_back = self.parameter_data.groupby("out_back").groups
if True in out_back:
still_out = out_back[True]
print(self.parameter_data.loc[still_out,:],flush=True)
raise Exception("Pst.calculate_pertubations(): " +\
"can't calc pertubations for the following "+\
"Parameters {0}".format(','.join(still_out))) | experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware! | Below is the the instruction that describes the task:
### Input:
experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
### Response:
def calculate_pertubations(self):
""" experimental method to calculate finite difference parameter
pertubations. The pertubation values are added to the
Pst.parameter_data attribute
Note
----
user beware!
"""
self.build_increments()
self.parameter_data.loc[:,"pertubation"] = \
self.parameter_data.parval1 + \
self.parameter_data.increment
self.parameter_data.loc[:,"out_forward"] = \
self.parameter_data.loc[:,"pertubation"] > \
self.parameter_data.loc[:,"parubnd"]
out_forward = self.parameter_data.groupby("out_forward").groups
if True in out_forward:
self.parameter_data.loc[out_forward[True],"pertubation"] = \
self.parameter_data.loc[out_forward[True],"parval1"] - \
self.parameter_data.loc[out_forward[True],"increment"]
self.parameter_data.loc[:,"out_back"] = \
self.parameter_data.loc[:,"pertubation"] < \
self.parameter_data.loc[:,"parlbnd"]
out_back = self.parameter_data.groupby("out_back").groups
if True in out_back:
still_out = out_back[True]
print(self.parameter_data.loc[still_out,:],flush=True)
raise Exception("Pst.calculate_pertubations(): " +\
"can't calc pertubations for the following "+\
"Parameters {0}".format(','.join(still_out))) |
def isample(self, *args, **kwds):
"""
Samples in interactive mode. Main thread of control stays in this function.
"""
self._exc_info = None
out = kwds.pop('out', sys.stdout)
kwds['progress_bar'] = False
def samp_targ(*args, **kwds):
try:
self.sample(*args, **kwds)
except:
self._exc_info = sys.exc_info()
self._sampling_thread = Thread(
target=samp_targ,
args=args,
kwargs=kwds)
self.status = 'running'
self._sampling_thread.start()
self.iprompt(out=out) | Samples in interactive mode. Main thread of control stays in this function. | Below is the the instruction that describes the task:
### Input:
Samples in interactive mode. Main thread of control stays in this function.
### Response:
def isample(self, *args, **kwds):
"""
Samples in interactive mode. Main thread of control stays in this function.
"""
self._exc_info = None
out = kwds.pop('out', sys.stdout)
kwds['progress_bar'] = False
def samp_targ(*args, **kwds):
try:
self.sample(*args, **kwds)
except:
self._exc_info = sys.exc_info()
self._sampling_thread = Thread(
target=samp_targ,
args=args,
kwargs=kwds)
self.status = 'running'
self._sampling_thread.start()
self.iprompt(out=out) |
def derivativeX(self,x,y):
'''
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdx_out = self.func.derivativeX(x-xShift,y)
return dfdx_out | Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y), of same shape as inputs. | Below is the the instruction that describes the task:
### Input:
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y), of same shape as inputs.
### Response:
def derivativeX(self,x,y):
'''
Evaluate the first derivative with respect to x of the function at given
state space points.
Parameters
----------
x : np.array
First input values.
y : np.array
Second input values; should be of same shape as x.
Returns
-------
dfdx_out : np.array
First derivative of function with respect to the first input,
evaluated at (x,y), of same shape as inputs.
'''
xShift = self.lowerBound(y)
dfdx_out = self.func.derivativeX(x-xShift,y)
return dfdx_out |
def load(self, playerName=None):
"""retrieve the PlayerRecord settings from saved disk file"""
if playerName: # switch the PlayerRecord this object describes
self.name = playerName # preset value to load self.filename
try:
with open(self.filename, "rb") as f:
data = f.read()
except Exception:
raise ValueError("invalid profile, '%s'. file does not exist: %s"%(self.name, self.filename))
self.update(json.loads(data))
self._matches = [] # mandate match history be recalculated for this newly loaded player | retrieve the PlayerRecord settings from saved disk file | Below is the the instruction that describes the task:
### Input:
retrieve the PlayerRecord settings from saved disk file
### Response:
def load(self, playerName=None):
"""retrieve the PlayerRecord settings from saved disk file"""
if playerName: # switch the PlayerRecord this object describes
self.name = playerName # preset value to load self.filename
try:
with open(self.filename, "rb") as f:
data = f.read()
except Exception:
raise ValueError("invalid profile, '%s'. file does not exist: %s"%(self.name, self.filename))
self.update(json.loads(data))
self._matches = [] # mandate match history be recalculated for this newly loaded player |
def main(args=None):
"""
Program entry point.
"""
if args is None:
args = process_args()
print('Parsing and resolving model: '+args.lems_file)
model = Model()
if args.I is not None:
for dir in args.I:
model.add_include_directory(dir)
model.import_from_file(args.lems_file)
resolved_model = model.resolve()
print('Building simulation')
sim = SimulationBuilder(resolved_model).build()
#sim.dump("Afterbuild:")
if args.dlems:
print('Exporting as: '+dlems_info)
from lems.dlems.exportdlems import export_component
target = model.targets[0]
sim_comp = model.components[target]
target_net = sim_comp.parameters['target']
target_comp = model.components[target_net]
dlems_file_name = args.lems_file.replace('.xml', '.json')
if dlems_file_name == args.lems_file:
dlems_file_name = args.lems_file + '.json'
if target_comp.type == 'network':
for child in target_comp.children:
if child.type == 'population':
comp = model.components[child.parameters['component']]
export_component(model, comp, sim_comp, child.id, file_name=dlems_file_name)
else:
export_component(model, sim_comp, target_comp)
else:
print('Running simulation')
sim.run()
process_simulation_output(sim, model, args) | Program entry point. | Below is the the instruction that describes the task:
### Input:
Program entry point.
### Response:
def main(args=None):
"""
Program entry point.
"""
if args is None:
args = process_args()
print('Parsing and resolving model: '+args.lems_file)
model = Model()
if args.I is not None:
for dir in args.I:
model.add_include_directory(dir)
model.import_from_file(args.lems_file)
resolved_model = model.resolve()
print('Building simulation')
sim = SimulationBuilder(resolved_model).build()
#sim.dump("Afterbuild:")
if args.dlems:
print('Exporting as: '+dlems_info)
from lems.dlems.exportdlems import export_component
target = model.targets[0]
sim_comp = model.components[target]
target_net = sim_comp.parameters['target']
target_comp = model.components[target_net]
dlems_file_name = args.lems_file.replace('.xml', '.json')
if dlems_file_name == args.lems_file:
dlems_file_name = args.lems_file + '.json'
if target_comp.type == 'network':
for child in target_comp.children:
if child.type == 'population':
comp = model.components[child.parameters['component']]
export_component(model, comp, sim_comp, child.id, file_name=dlems_file_name)
else:
export_component(model, sim_comp, target_comp)
else:
print('Running simulation')
sim.run()
process_simulation_output(sim, model, args) |
def __set_labels(self, labels):
"""
Add a label on the document.
"""
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \
as file_desc:
for label in labels:
file_desc.write("%s,%s\n" % (label.name,
label.get_color_str())) | Add a label on the document. | Below is the the instruction that describes the task:
### Input:
Add a label on the document.
### Response:
def __set_labels(self, labels):
"""
Add a label on the document.
"""
with self.fs.open(self.fs.join(self.path, self.LABEL_FILE), 'w') \
as file_desc:
for label in labels:
file_desc.write("%s,%s\n" % (label.name,
label.get_color_str())) |
def reload(self):
"""Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance]
"""
instance_pb = self._client.instance_admin_client.get_instance(self.name)
# NOTE: _update_from_pb does not check that the project and
# instance ID on the response match the request.
self._update_from_pb(instance_pb) | Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance] | Below is the the instruction that describes the task:
### Input:
Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance]
### Response:
def reload(self):
"""Reload the metadata for this instance.
For example:
.. literalinclude:: snippets.py
:start-after: [START bigtable_reload_instance]
:end-before: [END bigtable_reload_instance]
"""
instance_pb = self._client.instance_admin_client.get_instance(self.name)
# NOTE: _update_from_pb does not check that the project and
# instance ID on the response match the request.
self._update_from_pb(instance_pb) |
def execute_sql(self, *args, **kwargs):
"""Sync execute SQL query, `allow_sync` must be set to True.
"""
assert self._allow_sync, (
"Error, sync query is not allowed! Call the `.set_allow_sync()` "
"or use the `.allow_sync()` context manager.")
if self._allow_sync in (logging.ERROR, logging.WARNING):
logging.log(self._allow_sync,
"Error, sync query is not allowed: %s %s" %
(str(args), str(kwargs)))
return super().execute_sql(*args, **kwargs) | Sync execute SQL query, `allow_sync` must be set to True. | Below is the the instruction that describes the task:
### Input:
Sync execute SQL query, `allow_sync` must be set to True.
### Response:
def execute_sql(self, *args, **kwargs):
"""Sync execute SQL query, `allow_sync` must be set to True.
"""
assert self._allow_sync, (
"Error, sync query is not allowed! Call the `.set_allow_sync()` "
"or use the `.allow_sync()` context manager.")
if self._allow_sync in (logging.ERROR, logging.WARNING):
logging.log(self._allow_sync,
"Error, sync query is not allowed: %s %s" %
(str(args), str(kwargs)))
return super().execute_sql(*args, **kwargs) |
def get_driver_whitelist(driver): # noqa: E501
"""Retrieve the whitelist in the driver
Retrieve the whitelist in the driver # noqa: E501
:param driver: The driver to use for the request. ie. github
:type driver: str
:rtype: Response
"""
response = errorIfUnauthorized(role='admin')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(driver)
response.body.add({'whitelist': driver.getDriverWhitelist()})
return Response(status=200, body=response.getResponseBody()) | Retrieve the whitelist in the driver
Retrieve the whitelist in the driver # noqa: E501
:param driver: The driver to use for the request. ie. github
:type driver: str
:rtype: Response | Below is the the instruction that describes the task:
### Input:
Retrieve the whitelist in the driver
Retrieve the whitelist in the driver # noqa: E501
:param driver: The driver to use for the request. ie. github
:type driver: str
:rtype: Response
### Response:
def get_driver_whitelist(driver): # noqa: E501
"""Retrieve the whitelist in the driver
Retrieve the whitelist in the driver # noqa: E501
:param driver: The driver to use for the request. ie. github
:type driver: str
:rtype: Response
"""
response = errorIfUnauthorized(role='admin')
if response:
return response
else:
response = ApitaxResponse()
driver: Driver = LoadedDrivers.getDriver(driver)
response.body.add({'whitelist': driver.getDriverWhitelist()})
return Response(status=200, body=response.getResponseBody()) |
def _sql(self, sql: str, params=()):
"""
:deprecated: use self.sql instead
"""
statement = SingleSqlStatement(sql)
return self.statement(statement).execute_for_params(params).cursor | :deprecated: use self.sql instead | Below is the the instruction that describes the task:
### Input:
:deprecated: use self.sql instead
### Response:
def _sql(self, sql: str, params=()):
"""
:deprecated: use self.sql instead
"""
statement = SingleSqlStatement(sql)
return self.statement(statement).execute_for_params(params).cursor |
def _prompt_username(prompt="Username: ", prefill=None):
"""Prompt the user for username."""
if prefill:
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt).strip()
except EOFError:
print()
finally:
readline.set_startup_hook() | Prompt the user for username. | Below is the the instruction that describes the task:
### Input:
Prompt the user for username.
### Response:
def _prompt_username(prompt="Username: ", prefill=None):
"""Prompt the user for username."""
if prefill:
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return input(prompt).strip()
except EOFError:
print()
finally:
readline.set_startup_hook() |
def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network | Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails. | Below is the the instruction that describes the task:
### Input:
Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
### Response:
def to_database(self, manager=None):
"""Send the model to the PyBEL database
This function wraps :py:func:`pybel.to_database`.
Parameters
----------
manager : Optional[pybel.manager.Manager]
A PyBEL database manager. If none, first checks the PyBEL
configuration for ``PYBEL_CONNECTION`` then checks the
environment variable ``PYBEL_REMOTE_HOST``. Finally,
defaults to using SQLite database in PyBEL data directory
(automatically configured by PyBEL)
Returns
-------
network : Optional[pybel.manager.models.Network]
The SQLAlchemy model representing the network that was uploaded.
Returns None if upload fails.
"""
network = pybel.to_database(self.model, manager=manager)
return network |
def on_page_layout_choice(self, event):
"""Page layout choice event handler"""
width, height = self.paper_sizes_points[event.GetString()]
self.page_width_text_ctrl.SetValue(str(width / 72.0))
self.page_height_text_ctrl.SetValue(str(height / 72.0))
event.Skip() | Page layout choice event handler | Below is the the instruction that describes the task:
### Input:
Page layout choice event handler
### Response:
def on_page_layout_choice(self, event):
"""Page layout choice event handler"""
width, height = self.paper_sizes_points[event.GetString()]
self.page_width_text_ctrl.SetValue(str(width / 72.0))
self.page_height_text_ctrl.SetValue(str(height / 72.0))
event.Skip() |
def access_array(self, id_, lineno, scope=None, default_type=None):
"""
Called whenever an accessed variable is expected to be an array.
ZX BASIC requires arrays to be declared before usage, so they're
checked.
Also checks for class array.
"""
if not self.check_is_declared(id_, lineno, 'array', scope):
return None
if not self.check_class(id_, CLASS.array, lineno, scope):
return None
return self.access_id(id_, lineno, scope=scope, default_type=default_type) | Called whenever an accessed variable is expected to be an array.
ZX BASIC requires arrays to be declared before usage, so they're
checked.
Also checks for class array. | Below is the the instruction that describes the task:
### Input:
Called whenever an accessed variable is expected to be an array.
ZX BASIC requires arrays to be declared before usage, so they're
checked.
Also checks for class array.
### Response:
def access_array(self, id_, lineno, scope=None, default_type=None):
"""
Called whenever an accessed variable is expected to be an array.
ZX BASIC requires arrays to be declared before usage, so they're
checked.
Also checks for class array.
"""
if not self.check_is_declared(id_, lineno, 'array', scope):
return None
if not self.check_class(id_, CLASS.array, lineno, scope):
return None
return self.access_id(id_, lineno, scope=scope, default_type=default_type) |
def fields_to_dict(fields, type_=OrderedDict):
"""Convert a flat list of key/values into an OrderedDict"""
fields_iterator = iter(fields)
return type_(zip(fields_iterator, fields_iterator)) | Convert a flat list of key/values into an OrderedDict | Below is the the instruction that describes the task:
### Input:
Convert a flat list of key/values into an OrderedDict
### Response:
def fields_to_dict(fields, type_=OrderedDict):
"""Convert a flat list of key/values into an OrderedDict"""
fields_iterator = iter(fields)
return type_(zip(fields_iterator, fields_iterator)) |
def _pool_event_refresh_cb(conn, pool, opaque):
'''
Storage pool refresh events handler
'''
_salt_send_event(opaque, conn, {
'pool': {
'name': pool.name(),
'uuid': pool.UUIDString()
},
'event': opaque['event']
}) | Storage pool refresh events handler | Below is the the instruction that describes the task:
### Input:
Storage pool refresh events handler
### Response:
def _pool_event_refresh_cb(conn, pool, opaque):
'''
Storage pool refresh events handler
'''
_salt_send_event(opaque, conn, {
'pool': {
'name': pool.name(),
'uuid': pool.UUIDString()
},
'event': opaque['event']
}) |
def check_argument_types(cllable = None, call_args = None, clss = None, caller_level = 0):
"""Can be called from within a function or method to apply typechecking to
the arguments that were passed in by the caller. Checking is applied w.r.t.
type hints of the function or method hosting the call to check_argument_types.
"""
return _check_caller_type(False, cllable, call_args, clss, caller_level+1) | Can be called from within a function or method to apply typechecking to
the arguments that were passed in by the caller. Checking is applied w.r.t.
type hints of the function or method hosting the call to check_argument_types. | Below is the the instruction that describes the task:
### Input:
Can be called from within a function or method to apply typechecking to
the arguments that were passed in by the caller. Checking is applied w.r.t.
type hints of the function or method hosting the call to check_argument_types.
### Response:
def check_argument_types(cllable = None, call_args = None, clss = None, caller_level = 0):
"""Can be called from within a function or method to apply typechecking to
the arguments that were passed in by the caller. Checking is applied w.r.t.
type hints of the function or method hosting the call to check_argument_types.
"""
return _check_caller_type(False, cllable, call_args, clss, caller_level+1) |
def user_loc_value_to_class(axis_tag, user_loc):
"""Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2
"""
if axis_tag == "wght":
return int(user_loc)
elif axis_tag == "wdth":
return min(
sorted(WIDTH_CLASS_TO_VALUE.items()),
key=lambda item: abs(item[1] - user_loc),
)[0]
raise NotImplementedError | Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2 | Below is the the instruction that describes the task:
### Input:
Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2
### Response:
def user_loc_value_to_class(axis_tag, user_loc):
"""Return the OS/2 weight or width class that is closest to the provided
user location. For weight the user location is between 0 and 1000 and for
width it is a percentage.
>>> user_loc_value_to_class('wght', 310)
310
>>> user_loc_value_to_class('wdth', 62)
2
"""
if axis_tag == "wght":
return int(user_loc)
elif axis_tag == "wdth":
return min(
sorted(WIDTH_CLASS_TO_VALUE.items()),
key=lambda item: abs(item[1] - user_loc),
)[0]
raise NotImplementedError |
def objects(self):
"""
The objects in this listing.
:type: List[:class:`.ObjectInfo`]
"""
return [ObjectInfo(o, self._instance, self._bucket, self._client)
for o in self._proto.object] | The objects in this listing.
:type: List[:class:`.ObjectInfo`] | Below is the the instruction that describes the task:
### Input:
The objects in this listing.
:type: List[:class:`.ObjectInfo`]
### Response:
def objects(self):
"""
The objects in this listing.
:type: List[:class:`.ObjectInfo`]
"""
return [ObjectInfo(o, self._instance, self._bucket, self._client)
for o in self._proto.object] |
def team_2_json(self):
"""
transform ariane_clip3 team object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("Team.team_2_json")
json_obj = {
'teamID': self.id,
'teamName': self.name,
'teamDescription': self.description,
'teamColorCode': self.color_code,
'teamOSInstancesID': self.osi_ids,
'teamApplicationsID': self.app_ids
}
return json.dumps(json_obj) | transform ariane_clip3 team object to Ariane server JSON obj
:return: Ariane JSON obj | Below is the the instruction that describes the task:
### Input:
transform ariane_clip3 team object to Ariane server JSON obj
:return: Ariane JSON obj
### Response:
def team_2_json(self):
"""
transform ariane_clip3 team object to Ariane server JSON obj
:return: Ariane JSON obj
"""
LOGGER.debug("Team.team_2_json")
json_obj = {
'teamID': self.id,
'teamName': self.name,
'teamDescription': self.description,
'teamColorCode': self.color_code,
'teamOSInstancesID': self.osi_ids,
'teamApplicationsID': self.app_ids
}
return json.dumps(json_obj) |
def show_mode_indicator(viewer, tf, corner='ur'):
"""Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'.
"""
tag = '_$mode_indicator'
canvas = viewer.get_private_canvas()
try:
indic = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
indic.corner = corner
except KeyError:
if tf:
# force a redraw if the mode changes
bm = viewer.get_bindmap()
bm.add_callback('mode-set',
lambda *args: viewer.redraw(whence=3))
Indicator = canvas.get_draw_class('modeindicator')
canvas.add(Indicator(corner=corner),
tag=tag, redraw=False)
canvas.update_canvas(whence=3) | Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'. | Below is the the instruction that describes the task:
### Input:
Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'.
### Response:
def show_mode_indicator(viewer, tf, corner='ur'):
"""Show a keyboard mode indicator in one of the corners.
Parameters
----------
viewer : an ImageView subclass instance
If True, show the color bar; else remove it if present.
tf : bool
If True, show the mark; else remove it if present.
corner : str
One of 'll', 'lr', 'ul' or 'ur' selecting a corner.
The default is 'ur'.
"""
tag = '_$mode_indicator'
canvas = viewer.get_private_canvas()
try:
indic = canvas.get_object_by_tag(tag)
if not tf:
canvas.delete_object_by_tag(tag)
else:
indic.corner = corner
except KeyError:
if tf:
# force a redraw if the mode changes
bm = viewer.get_bindmap()
bm.add_callback('mode-set',
lambda *args: viewer.redraw(whence=3))
Indicator = canvas.get_draw_class('modeindicator')
canvas.add(Indicator(corner=corner),
tag=tag, redraw=False)
canvas.update_canvas(whence=3) |
def _configure_logging(kwargs, extract=True):
"""Requests the logging manager to configure logging.
:param extract:
If naming data should be extracted from the trajectory
"""
try:
logging_manager = kwargs['logging_manager']
if extract:
logging_manager.extract_replacements(kwargs['traj'])
logging_manager.make_logging_handlers_and_tools(multiproc=True)
except Exception as exc:
sys.stderr.write('Could not configure logging system because of: %s' % repr(exc))
traceback.print_exc() | Requests the logging manager to configure logging.
:param extract:
If naming data should be extracted from the trajectory | Below is the the instruction that describes the task:
### Input:
Requests the logging manager to configure logging.
:param extract:
If naming data should be extracted from the trajectory
### Response:
def _configure_logging(kwargs, extract=True):
"""Requests the logging manager to configure logging.
:param extract:
If naming data should be extracted from the trajectory
"""
try:
logging_manager = kwargs['logging_manager']
if extract:
logging_manager.extract_replacements(kwargs['traj'])
logging_manager.make_logging_handlers_and_tools(multiproc=True)
except Exception as exc:
sys.stderr.write('Could not configure logging system because of: %s' % repr(exc))
traceback.print_exc() |
def quickstart():
"""Quickstart wizard for setting up twtxt."""
width = click.get_terminal_size()[0]
width = width if width <= 79 else 79
click.secho("twtxt - quickstart", fg="cyan")
click.secho("==================", fg="cyan")
click.echo()
help_text = "This wizard will generate a basic configuration file for twtxt with all mandatory options set. " \
"You can change all of these later with either twtxt itself or by editing the config file manually. " \
"Have a look at the docs to get information about the other available options and their meaning."
click.echo(textwrap.fill(help_text, width))
click.echo()
nick = click.prompt("➤ Please enter your desired nick", default=os.environ.get("USER", ""))
def overwrite_check(path):
if os.path.isfile(path):
click.confirm("➤ '{0}' already exists. Overwrite?".format(path), abort=True)
cfgfile = click.prompt("➤ Please enter the desired location for your config file",
os.path.join(Config.config_dir, Config.config_name),
type=click.Path(readable=True, writable=True, file_okay=True))
cfgfile = os.path.expanduser(cfgfile)
overwrite_check(cfgfile)
twtfile = click.prompt("➤ Please enter the desired location for your twtxt file",
os.path.expanduser("~/twtxt.txt"),
type=click.Path(readable=True, writable=True, file_okay=True))
twtfile = os.path.expanduser(twtfile)
overwrite_check(twtfile)
twturl = click.prompt("➤ Please enter the URL your twtxt file will be accessible from",
default="https://example.org/twtxt.txt")
disclose_identity = click.confirm("➤ Do you want to disclose your identity? Your nick and URL will be shared when "
"making HTTP requests", default=False)
click.echo()
add_news = click.confirm("➤ Do you want to follow the twtxt news feed?", default=True)
conf = Config.create_config(cfgfile, nick, twtfile, twturl, disclose_identity, add_news)
twtfile_dir = os.path.dirname(twtfile)
if not os.path.exists(twtfile_dir):
os.makedirs(twtfile_dir)
open(twtfile, "a").close()
click.echo()
click.echo("✓ Created config file at '{0}'.".format(click.format_filename(conf.config_file)))
click.echo("✓ Created twtxt file at '{0}'.".format(click.format_filename(twtfile))) | Quickstart wizard for setting up twtxt. | Below is the the instruction that describes the task:
### Input:
Quickstart wizard for setting up twtxt.
### Response:
def quickstart():
"""Quickstart wizard for setting up twtxt."""
width = click.get_terminal_size()[0]
width = width if width <= 79 else 79
click.secho("twtxt - quickstart", fg="cyan")
click.secho("==================", fg="cyan")
click.echo()
help_text = "This wizard will generate a basic configuration file for twtxt with all mandatory options set. " \
"You can change all of these later with either twtxt itself or by editing the config file manually. " \
"Have a look at the docs to get information about the other available options and their meaning."
click.echo(textwrap.fill(help_text, width))
click.echo()
nick = click.prompt("➤ Please enter your desired nick", default=os.environ.get("USER", ""))
def overwrite_check(path):
if os.path.isfile(path):
click.confirm("➤ '{0}' already exists. Overwrite?".format(path), abort=True)
cfgfile = click.prompt("➤ Please enter the desired location for your config file",
os.path.join(Config.config_dir, Config.config_name),
type=click.Path(readable=True, writable=True, file_okay=True))
cfgfile = os.path.expanduser(cfgfile)
overwrite_check(cfgfile)
twtfile = click.prompt("➤ Please enter the desired location for your twtxt file",
os.path.expanduser("~/twtxt.txt"),
type=click.Path(readable=True, writable=True, file_okay=True))
twtfile = os.path.expanduser(twtfile)
overwrite_check(twtfile)
twturl = click.prompt("➤ Please enter the URL your twtxt file will be accessible from",
default="https://example.org/twtxt.txt")
disclose_identity = click.confirm("➤ Do you want to disclose your identity? Your nick and URL will be shared when "
"making HTTP requests", default=False)
click.echo()
add_news = click.confirm("➤ Do you want to follow the twtxt news feed?", default=True)
conf = Config.create_config(cfgfile, nick, twtfile, twturl, disclose_identity, add_news)
twtfile_dir = os.path.dirname(twtfile)
if not os.path.exists(twtfile_dir):
os.makedirs(twtfile_dir)
open(twtfile, "a").close()
click.echo()
click.echo("✓ Created config file at '{0}'.".format(click.format_filename(conf.config_file)))
click.echo("✓ Created twtxt file at '{0}'.".format(click.format_filename(twtfile))) |
def df64bitto32bit(tbl):
"""
Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe
"""
newtbl = pd.DataFrame(index=tbl.index)
for colname in tbl.columns:
newtbl[colname] = series64bitto32bit(tbl[colname])
return newtbl | Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe | Below is the the instruction that describes the task:
### Input:
Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe
### Response:
def df64bitto32bit(tbl):
"""
Convert a Pandas dataframe from 64 bit types to 32 bit types to save
memory or disk space.
Parameters
----------
tbl : The dataframe to convert
Returns
-------
The converted dataframe
"""
newtbl = pd.DataFrame(index=tbl.index)
for colname in tbl.columns:
newtbl[colname] = series64bitto32bit(tbl[colname])
return newtbl |
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255)) | 0 <= H < 360, 0 <= s,l,a < 1 | Below is the the instruction that describes the task:
### Input:
0 <= H < 360, 0 <= s,l,a < 1
### Response:
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255)) |
def text_channels(self):
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r | List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom. | Below is the the instruction that describes the task:
### Input:
List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
### Response:
def text_channels(self):
"""List[:class:`TextChannel`]: A list of text channels that belongs to this guild.
This is sorted by the position and are in UI order from top to bottom.
"""
r = [ch for ch in self._channels.values() if isinstance(ch, TextChannel)]
r.sort(key=lambda c: (c.position, c.id))
return r |
def rnd_date_array(size, start=date(1970, 1, 1), end=None, **kwargs):
"""
Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_date, start, end) | Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date | Below is the the instruction that describes the task:
### Input:
Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date
### Response:
def rnd_date_array(size, start=date(1970, 1, 1), end=None, **kwargs):
"""
Array or Matrix of random date generator.
:returns: 1d or 2d array of datetime.date
"""
if end is None:
end = date.today()
start = parser.parse_date(start)
end = parser.parse_date(end)
_assert_correct_start_end(start, end)
return _randn(size, _rnd_date, start, end) |
def extract_path_ext(path, default_ext=None):
"""
Extract file extension (without dot) from `path` or return `default_ext` if
path does not contain a valid extension.
"""
ext = None
_, dotext = os.path.splitext(path)
if dotext:
ext = dotext[1:]
if not ext and default_ext:
ext = default_ext
if not ext:
raise ValueError('No extension in path {} and default_ext is None'.format(path))
return ext | Extract file extension (without dot) from `path` or return `default_ext` if
path does not contain a valid extension. | Below is the the instruction that describes the task:
### Input:
Extract file extension (without dot) from `path` or return `default_ext` if
path does not contain a valid extension.
### Response:
def extract_path_ext(path, default_ext=None):
"""
Extract file extension (without dot) from `path` or return `default_ext` if
path does not contain a valid extension.
"""
ext = None
_, dotext = os.path.splitext(path)
if dotext:
ext = dotext[1:]
if not ext and default_ext:
ext = default_ext
if not ext:
raise ValueError('No extension in path {} and default_ext is None'.format(path))
return ext |
def lookup(sock, domain, cache = None):
"""lookup an I2P domain name, returning a Destination instance"""
domain = normalize_domain(domain)
# cache miss, perform lookup
reply = sam_cmd(sock, "NAMING LOOKUP NAME=%s" % domain)
b64_dest = reply.get('VALUE')
if b64_dest:
dest = Dest(b64_dest, encoding='base64')
if cache:
cache[dest.base32 + '.b32.i2p'] = dest
return dest
else:
raise NSError('Domain name %r not resolved because %r' % (domain, reply)) | lookup an I2P domain name, returning a Destination instance | Below is the the instruction that describes the task:
### Input:
lookup an I2P domain name, returning a Destination instance
### Response:
def lookup(sock, domain, cache = None):
"""lookup an I2P domain name, returning a Destination instance"""
domain = normalize_domain(domain)
# cache miss, perform lookup
reply = sam_cmd(sock, "NAMING LOOKUP NAME=%s" % domain)
b64_dest = reply.get('VALUE')
if b64_dest:
dest = Dest(b64_dest, encoding='base64')
if cache:
cache[dest.base32 + '.b32.i2p'] = dest
return dest
else:
raise NSError('Domain name %r not resolved because %r' % (domain, reply)) |
def confd_state_webui_listen_tcp_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
webui = ET.SubElement(confd_state, "webui")
listen = ET.SubElement(webui, "listen")
tcp = ET.SubElement(listen, "tcp")
port = ET.SubElement(tcp, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def confd_state_webui_listen_tcp_port(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
webui = ET.SubElement(confd_state, "webui")
listen = ET.SubElement(webui, "listen")
tcp = ET.SubElement(listen, "tcp")
port = ET.SubElement(tcp, "port")
port.text = kwargs.pop('port')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def annotation(args):
"""
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
"""
from jcvi.formats.base import DictFile
p = OptionParser(annotation.__doc__)
p.add_option("--queryids", help="Query IDS file to switch [default: %default]")
p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
d = "\t"
qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None
sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None
blast = Blast(blastfile)
for b in blast:
query, subject = b.query, b.subject
if qids:
query = qids[query]
if sids:
subject = sids[subject]
print("\t".join((query, subject))) | %prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions. | Below is the the instruction that describes the task:
### Input:
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
### Response:
def annotation(args):
"""
%prog annotation blastfile > annotations
Create simple two column files from the first two coluns in blastfile. Use
--queryids and --subjectids to switch IDs or descriptions.
"""
from jcvi.formats.base import DictFile
p = OptionParser(annotation.__doc__)
p.add_option("--queryids", help="Query IDS file to switch [default: %default]")
p.add_option("--subjectids", help="Subject IDS file to switch [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
blastfile, = args
d = "\t"
qids = DictFile(opts.queryids, delimiter=d) if opts.queryids else None
sids = DictFile(opts.subjectids, delimiter=d) if opts.subjectids else None
blast = Blast(blastfile)
for b in blast:
query, subject = b.query, b.subject
if qids:
query = qids[query]
if sids:
subject = sids[subject]
print("\t".join((query, subject))) |
def stats_per100(self, kind='R', summary=False):
"""Returns a DataFrame of per-100-possession stats."""
return self._get_stats_table('per_poss', kind=kind, summary=summary) | Returns a DataFrame of per-100-possession stats. | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame of per-100-possession stats.
### Response:
def stats_per100(self, kind='R', summary=False):
"""Returns a DataFrame of per-100-possession stats."""
return self._get_stats_table('per_poss', kind=kind, summary=summary) |
def hincrby(self, key, field, increment=1):
"""Increment the integer value of a hash field by the given number."""
return self.execute(b'HINCRBY', key, field, increment) | Increment the integer value of a hash field by the given number. | Below is the the instruction that describes the task:
### Input:
Increment the integer value of a hash field by the given number.
### Response:
def hincrby(self, key, field, increment=1):
"""Increment the integer value of a hash field by the given number."""
return self.execute(b'HINCRBY', key, field, increment) |
def createGWB(psr, Amp, gam, noCorr=False, seed=None, turnover=False,
clm=[N.sqrt(4.0*N.pi)], lmax=0, f0=1e-9, beta=1,
power=1, userSpec=None, npts=600, howml=10):
"""
Function to create GW-induced residuals from a stochastic GWB as defined
in Chamberlin, Creighton, Demorest, et al. (2014).
:param psr: pulsar object for single pulsar
:param Amp: Amplitude of red noise in GW units
:param gam: Red noise power law spectral index
:param noCorr: Add red noise with no spatial correlations
:param seed: Random number seed
:param turnover: Produce spectrum with turnover at frequency f0
:param clm: coefficients of spherical harmonic decomposition of GW power
:param lmax: maximum multipole of GW power decomposition
:param f0: Frequency of spectrum turnover
:param beta: Spectral index of power spectram for f << f0
:param power: Fudge factor for flatness of spectrum turnover
:param userSpec: User-supplied characteristic strain spectrum
(first column is freqs, second is spectrum)
:param npts: Number of points used in interpolation
:param howml: Lowest frequency is 1/(howml * T)
:returns: list of residuals for each pulsar
"""
if seed is not None:
N.random.seed(seed)
# number of pulsars
Npulsars = len(psr)
# gw start and end times for entire data set
start = N.min([p.toas().min()*86400 for p in psr]) - 86400
stop = N.max([p.toas().max()*86400 for p in psr]) + 86400
# duration of the signal
dur = stop - start
# get maximum number of points
if npts is None:
# default to cadence of 2 weeks
npts = dur/(86400*14)
# make a vector of evenly sampled data points
ut = N.linspace(start, stop, npts)
# time resolution in days
dt = dur/npts
# compute the overlap reduction function
if noCorr:
ORF = N.diag(N.ones(Npulsars)*2)
else:
psrlocs = N.zeros((Npulsars,2))
for ii in range(Npulsars):
if 'RAJ' and 'DECJ' in psr[ii].pars():
psrlocs[ii] = N.double(psr[ii]['RAJ'].val), N.double(psr[ii]['DECJ'].val)
elif 'ELONG' and 'ELAT' in psr[ii].pars():
fac = 180./N.pi
# check for B name
if 'B' in psr[ii].name:
epoch = '1950'
else:
epoch = '2000'
coords = ephem.Equatorial(ephem.Ecliptic(str(psr[ii]['ELONG'].val*fac),
str(psr[ii]['ELAT'].val*fac)),
epoch=epoch)
psrlocs[ii] = float(repr(coords.ra)), float(repr(coords.dec))
psrlocs[:,1] = N.pi/2. - psrlocs[:,1]
anisbasis = N.array(anis.CorrBasis(psrlocs,lmax))
ORF = sum(clm[kk]*anisbasis[kk] for kk in range(len(anisbasis)))
ORF *= 2.0
# Define frequencies spanning from DC to Nyquist.
# This is a vector spanning these frequencies in increments of 1/(dur*howml).
f = N.arange(0, 1/(2*dt), 1/(dur*howml))
f[0] = f[1] # avoid divide by 0 warning
Nf = len(f)
# Use Cholesky transform to take 'square root' of ORF
M = N.linalg.cholesky(ORF)
# Create random frequency series from zero mean, unit variance, Gaussian distributions
w = N.zeros((Npulsars, Nf), complex)
for ll in range(Npulsars):
w[ll,:] = N.random.randn(Nf) + 1j*N.random.randn(Nf)
# strain amplitude
if userSpec is None:
f1yr = 1/3.16e7
alpha = -0.5 * (gam-3)
hcf = Amp * (f/f1yr)**(alpha)
if turnover:
si = alpha - beta
hcf /= (1+(f/f0)**(power*si))**(1/power)
elif userSpec is not None:
freqs = userSpec[:,0]
if len(userSpec[:,0]) != len(freqs):
raise ValueError("Number of supplied spectral points does not match number of frequencies!")
else:
fspec_in = interp.interp1d(N.log10(freqs), N.log10(userSpec[:,1]), kind='linear')
fspec_ex = extrap1d(fspec_in)
hcf = 10.0**fspec_ex(N.log10(f))
C = 1 / 96 / N.pi**2 * hcf**2 / f**3 * dur * howml
### injection residuals in the frequency domain
Res_f = N.dot(M, w)
for ll in range(Npulsars):
Res_f[ll] = Res_f[ll] * C**(0.5) # rescale by frequency dependent factor
Res_f[ll,0] = 0 # set DC bin to zero to avoid infinities
Res_f[ll,-1] = 0 # set Nyquist bin to zero also
# Now fill in bins after Nyquist (for fft data packing) and take inverse FT
Res_f2 = N.zeros((Npulsars, 2*Nf-2), complex)
Res_t = N.zeros((Npulsars, 2*Nf-2))
Res_f2[:,0:Nf] = Res_f[:,0:Nf]
Res_f2[:, Nf:(2*Nf-2)] = N.conj(Res_f[:,(Nf-2):0:-1])
Res_t = N.real(N.fft.ifft(Res_f2)/dt)
# shorten data and interpolate onto TOAs
Res = N.zeros((Npulsars, npts))
res_gw = []
for ll in range(Npulsars):
Res[ll,:] = Res_t[ll, 10:(npts+10)]
f = interp.interp1d(ut, Res[ll,:], kind='linear')
res_gw.append(f(psr[ll].toas()*86400))
#return res_gw
ct = 0
for p in psr:
p.stoas[:] += res_gw[ct]/86400.0
ct += 1 | Function to create GW-induced residuals from a stochastic GWB as defined
in Chamberlin, Creighton, Demorest, et al. (2014).
:param psr: pulsar object for single pulsar
:param Amp: Amplitude of red noise in GW units
:param gam: Red noise power law spectral index
:param noCorr: Add red noise with no spatial correlations
:param seed: Random number seed
:param turnover: Produce spectrum with turnover at frequency f0
:param clm: coefficients of spherical harmonic decomposition of GW power
:param lmax: maximum multipole of GW power decomposition
:param f0: Frequency of spectrum turnover
:param beta: Spectral index of power spectram for f << f0
:param power: Fudge factor for flatness of spectrum turnover
:param userSpec: User-supplied characteristic strain spectrum
(first column is freqs, second is spectrum)
:param npts: Number of points used in interpolation
:param howml: Lowest frequency is 1/(howml * T)
:returns: list of residuals for each pulsar | Below is the the instruction that describes the task:
### Input:
Function to create GW-induced residuals from a stochastic GWB as defined
in Chamberlin, Creighton, Demorest, et al. (2014).
:param psr: pulsar object for single pulsar
:param Amp: Amplitude of red noise in GW units
:param gam: Red noise power law spectral index
:param noCorr: Add red noise with no spatial correlations
:param seed: Random number seed
:param turnover: Produce spectrum with turnover at frequency f0
:param clm: coefficients of spherical harmonic decomposition of GW power
:param lmax: maximum multipole of GW power decomposition
:param f0: Frequency of spectrum turnover
:param beta: Spectral index of power spectram for f << f0
:param power: Fudge factor for flatness of spectrum turnover
:param userSpec: User-supplied characteristic strain spectrum
(first column is freqs, second is spectrum)
:param npts: Number of points used in interpolation
:param howml: Lowest frequency is 1/(howml * T)
:returns: list of residuals for each pulsar
### Response:
def createGWB(psr, Amp, gam, noCorr=False, seed=None, turnover=False,
clm=[N.sqrt(4.0*N.pi)], lmax=0, f0=1e-9, beta=1,
power=1, userSpec=None, npts=600, howml=10):
"""
Function to create GW-induced residuals from a stochastic GWB as defined
in Chamberlin, Creighton, Demorest, et al. (2014).
:param psr: pulsar object for single pulsar
:param Amp: Amplitude of red noise in GW units
:param gam: Red noise power law spectral index
:param noCorr: Add red noise with no spatial correlations
:param seed: Random number seed
:param turnover: Produce spectrum with turnover at frequency f0
:param clm: coefficients of spherical harmonic decomposition of GW power
:param lmax: maximum multipole of GW power decomposition
:param f0: Frequency of spectrum turnover
:param beta: Spectral index of power spectram for f << f0
:param power: Fudge factor for flatness of spectrum turnover
:param userSpec: User-supplied characteristic strain spectrum
(first column is freqs, second is spectrum)
:param npts: Number of points used in interpolation
:param howml: Lowest frequency is 1/(howml * T)
:returns: list of residuals for each pulsar
"""
if seed is not None:
N.random.seed(seed)
# number of pulsars
Npulsars = len(psr)
# gw start and end times for entire data set
start = N.min([p.toas().min()*86400 for p in psr]) - 86400
stop = N.max([p.toas().max()*86400 for p in psr]) + 86400
# duration of the signal
dur = stop - start
# get maximum number of points
if npts is None:
# default to cadence of 2 weeks
npts = dur/(86400*14)
# make a vector of evenly sampled data points
ut = N.linspace(start, stop, npts)
# time resolution in days
dt = dur/npts
# compute the overlap reduction function
if noCorr:
ORF = N.diag(N.ones(Npulsars)*2)
else:
psrlocs = N.zeros((Npulsars,2))
for ii in range(Npulsars):
if 'RAJ' and 'DECJ' in psr[ii].pars():
psrlocs[ii] = N.double(psr[ii]['RAJ'].val), N.double(psr[ii]['DECJ'].val)
elif 'ELONG' and 'ELAT' in psr[ii].pars():
fac = 180./N.pi
# check for B name
if 'B' in psr[ii].name:
epoch = '1950'
else:
epoch = '2000'
coords = ephem.Equatorial(ephem.Ecliptic(str(psr[ii]['ELONG'].val*fac),
str(psr[ii]['ELAT'].val*fac)),
epoch=epoch)
psrlocs[ii] = float(repr(coords.ra)), float(repr(coords.dec))
psrlocs[:,1] = N.pi/2. - psrlocs[:,1]
anisbasis = N.array(anis.CorrBasis(psrlocs,lmax))
ORF = sum(clm[kk]*anisbasis[kk] for kk in range(len(anisbasis)))
ORF *= 2.0
# Define frequencies spanning from DC to Nyquist.
# This is a vector spanning these frequencies in increments of 1/(dur*howml).
f = N.arange(0, 1/(2*dt), 1/(dur*howml))
f[0] = f[1] # avoid divide by 0 warning
Nf = len(f)
# Use Cholesky transform to take 'square root' of ORF
M = N.linalg.cholesky(ORF)
# Create random frequency series from zero mean, unit variance, Gaussian distributions
w = N.zeros((Npulsars, Nf), complex)
for ll in range(Npulsars):
w[ll,:] = N.random.randn(Nf) + 1j*N.random.randn(Nf)
# strain amplitude
if userSpec is None:
f1yr = 1/3.16e7
alpha = -0.5 * (gam-3)
hcf = Amp * (f/f1yr)**(alpha)
if turnover:
si = alpha - beta
hcf /= (1+(f/f0)**(power*si))**(1/power)
elif userSpec is not None:
freqs = userSpec[:,0]
if len(userSpec[:,0]) != len(freqs):
raise ValueError("Number of supplied spectral points does not match number of frequencies!")
else:
fspec_in = interp.interp1d(N.log10(freqs), N.log10(userSpec[:,1]), kind='linear')
fspec_ex = extrap1d(fspec_in)
hcf = 10.0**fspec_ex(N.log10(f))
C = 1 / 96 / N.pi**2 * hcf**2 / f**3 * dur * howml
### injection residuals in the frequency domain
Res_f = N.dot(M, w)
for ll in range(Npulsars):
Res_f[ll] = Res_f[ll] * C**(0.5) # rescale by frequency dependent factor
Res_f[ll,0] = 0 # set DC bin to zero to avoid infinities
Res_f[ll,-1] = 0 # set Nyquist bin to zero also
# Now fill in bins after Nyquist (for fft data packing) and take inverse FT
Res_f2 = N.zeros((Npulsars, 2*Nf-2), complex)
Res_t = N.zeros((Npulsars, 2*Nf-2))
Res_f2[:,0:Nf] = Res_f[:,0:Nf]
Res_f2[:, Nf:(2*Nf-2)] = N.conj(Res_f[:,(Nf-2):0:-1])
Res_t = N.real(N.fft.ifft(Res_f2)/dt)
# shorten data and interpolate onto TOAs
Res = N.zeros((Npulsars, npts))
res_gw = []
for ll in range(Npulsars):
Res[ll,:] = Res_t[ll, 10:(npts+10)]
f = interp.interp1d(ut, Res[ll,:], kind='linear')
res_gw.append(f(psr[ll].toas()*86400))
#return res_gw
ct = 0
for p in psr:
p.stoas[:] += res_gw[ct]/86400.0
ct += 1 |
def memory():
"""Determine the machine's memory specifications.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = {}
if platform.linux_distribution()[0]:
with open('/proc/meminfo') as file:
c = 0
for line in file:
lst = line.split()
if str(lst[0]) == 'MemTotal:':
mem_info['total'] = int(lst[1])
elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
c += int(lst[1])
mem_info['free'] = c
mem_info['used'] = (mem_info['total']) - c
elif platform.mac_ver()[0]:
ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]
vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Iterate processes
process_lines = ps.split('\n')
sep = re.compile('[\s]+')
rss_total = 0 # kB
for row in range(1, len(process_lines)):
row_text = process_lines[row].strip()
row_elements = sep.split(row_text)
try:
rss = float(row_elements[0]) * 1024
except:
rss = 0 # ignore...
rss_total += rss
# Process vm_stat
vm_lines = vm.split('\n')
sep = re.compile(':[\s]+')
vm_stats = {}
for row in range(1, len(vm_lines) - 2):
row_text = vm_lines[row].strip()
row_elements = sep.split(row_text)
vm_stats[(row_elements[0])] = int(row_elements[1].strip('\.')) * 4096
mem_info['total'] = rss_total
mem_info['used'] = vm_stats["Pages active"]
mem_info['free'] = vm_stats["Pages free"]
else:
raise('Unsupported Operating System.\n')
exit(1)
return mem_info | Determine the machine's memory specifications.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system. | Below is the the instruction that describes the task:
### Input:
Determine the machine's memory specifications.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
### Response:
def memory():
"""Determine the machine's memory specifications.
Returns
-------
mem_info : dictonary
Holds the current values for the total, free and used memory of the system.
"""
mem_info = {}
if platform.linux_distribution()[0]:
with open('/proc/meminfo') as file:
c = 0
for line in file:
lst = line.split()
if str(lst[0]) == 'MemTotal:':
mem_info['total'] = int(lst[1])
elif str(lst[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
c += int(lst[1])
mem_info['free'] = c
mem_info['used'] = (mem_info['total']) - c
elif platform.mac_ver()[0]:
ps = subprocess.Popen(['ps', '-caxm', '-orss,comm'], stdout=subprocess.PIPE).communicate()[0]
vm = subprocess.Popen(['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Iterate processes
process_lines = ps.split('\n')
sep = re.compile('[\s]+')
rss_total = 0 # kB
for row in range(1, len(process_lines)):
row_text = process_lines[row].strip()
row_elements = sep.split(row_text)
try:
rss = float(row_elements[0]) * 1024
except:
rss = 0 # ignore...
rss_total += rss
# Process vm_stat
vm_lines = vm.split('\n')
sep = re.compile(':[\s]+')
vm_stats = {}
for row in range(1, len(vm_lines) - 2):
row_text = vm_lines[row].strip()
row_elements = sep.split(row_text)
vm_stats[(row_elements[0])] = int(row_elements[1].strip('\.')) * 4096
mem_info['total'] = rss_total
mem_info['used'] = vm_stats["Pages active"]
mem_info['free'] = vm_stats["Pages free"]
else:
raise('Unsupported Operating System.\n')
exit(1)
return mem_info |
def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment | Fetch the cluster layout in form of assignment from zookeeper | Below is the the instruction that describes the task:
### Input:
Fetch the cluster layout in form of assignment from zookeeper
### Response:
def get_cluster_assignment(self):
"""Fetch the cluster layout in form of assignment from zookeeper"""
plan = self.get_cluster_plan()
assignment = {}
for elem in plan['partitions']:
assignment[
(elem['topic'], elem['partition'])
] = elem['replicas']
return assignment |
def orthologs_iterator(fo, version):
"""Ortholog node and edge iterator"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0)
with gzip.open(fo, "rt") as f:
for line in f:
edge = json.loads(line)
if "metadata" in edge:
source = edge["metadata"]["source"]
continue
if "ortholog" in edge:
edge = edge["ortholog"]
subj_tax_id = edge["subject"]["tax_id"]
obj_tax_id = edge["object"]["tax_id"]
# Skip if species not listed in species_list
if species_list and subj_tax_id and subj_tax_id not in species_list:
continue
if species_list and obj_tax_id and obj_tax_id not in species_list:
continue
# Converted to ArangoDB legal chars for _key
subj_key = arangodb.arango_id_to_key(edge["subject"]["id"])
subj_id = edge["subject"]["id"]
# Converted to ArangoDB legal chars for _key
obj_key = arangodb.arango_id_to_key(edge["object"]["id"])
obj_id = edge["object"]["id"]
# Subject node
yield (
arangodb.ortholog_nodes_name,
{
"_key": subj_key,
"name": subj_id,
"tax_id": edge["subject"]["tax_id"],
"source": source,
"version": version,
},
)
# Object node
yield (
arangodb.ortholog_nodes_name,
{
"_key": obj_key,
"name": obj_id,
"tax_id": edge["object"]["tax_id"],
"source": source,
"version": version,
},
)
arango_edge = {
"_from": f"{arangodb.ortholog_nodes_name}/{subj_key}",
"_to": f"{arangodb.ortholog_nodes_name}/{obj_key}",
"_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"),
"type": "ortholog_to",
"source": source,
"version": version,
}
yield (arangodb.ortholog_edges_name, arango_edge) | Ortholog node and edge iterator | Below is the the instruction that describes the task:
### Input:
Ortholog node and edge iterator
### Response:
def orthologs_iterator(fo, version):
"""Ortholog node and edge iterator"""
species_list = config["bel_resources"].get("species_list", [])
fo.seek(0)
with gzip.open(fo, "rt") as f:
for line in f:
edge = json.loads(line)
if "metadata" in edge:
source = edge["metadata"]["source"]
continue
if "ortholog" in edge:
edge = edge["ortholog"]
subj_tax_id = edge["subject"]["tax_id"]
obj_tax_id = edge["object"]["tax_id"]
# Skip if species not listed in species_list
if species_list and subj_tax_id and subj_tax_id not in species_list:
continue
if species_list and obj_tax_id and obj_tax_id not in species_list:
continue
# Converted to ArangoDB legal chars for _key
subj_key = arangodb.arango_id_to_key(edge["subject"]["id"])
subj_id = edge["subject"]["id"]
# Converted to ArangoDB legal chars for _key
obj_key = arangodb.arango_id_to_key(edge["object"]["id"])
obj_id = edge["object"]["id"]
# Subject node
yield (
arangodb.ortholog_nodes_name,
{
"_key": subj_key,
"name": subj_id,
"tax_id": edge["subject"]["tax_id"],
"source": source,
"version": version,
},
)
# Object node
yield (
arangodb.ortholog_nodes_name,
{
"_key": obj_key,
"name": obj_id,
"tax_id": edge["object"]["tax_id"],
"source": source,
"version": version,
},
)
arango_edge = {
"_from": f"{arangodb.ortholog_nodes_name}/{subj_key}",
"_to": f"{arangodb.ortholog_nodes_name}/{obj_key}",
"_key": bel.utils._create_hash(f"{subj_id}>>{obj_id}"),
"type": "ortholog_to",
"source": source,
"version": version,
}
yield (arangodb.ortholog_edges_name, arango_edge) |
def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False):
"""
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
"""
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags) | Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance. | Below is the the instruction that describes the task:
### Input:
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
### Response:
def extract_pairs(fastqfile, p1fw, p2fw, fragsfw, p, suffix=False):
"""
Take fastqfile and array of pair ID, extract adjacent pairs to outfile.
Perform check on numbers when done. p1fw, p2fw is a list of file handles,
each for one end. p is a Pairs instance.
"""
fp = open(fastqfile)
currentID = 0
npairs = nfrags = 0
for x, lib in izip(p.r1, p.libs):
while currentID != x:
fragsfw.writelines(islice(fp, 4)) # Exhaust the iterator
currentID += 1
nfrags += 1
a = list(islice(fp, 4))
b = list(islice(fp, 4))
if suffix:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
else:
b[0] = a[0] # Keep same read ID for pairs
p1fw[lib].writelines(a)
p2fw[lib].writelines(b)
currentID += 2
npairs += 2
# Write the remaining single reads
while True:
contents = list(islice(fp, 4))
if not contents:
break
fragsfw.writelines(contents)
nfrags += 1
logging.debug("A total of {0} paired reads written to `{1}`.".\
format(npairs, ",".join(x.name for x in p1fw + p2fw)))
logging.debug("A total of {0} single reads written to `{1}`.".\
format(nfrags, fragsfw.name))
# Validate the numbers
expected_pairs = 2 * p.npairs
expected_frags = p.nreads - 2 * p.npairs
assert npairs == expected_pairs, "Expect {0} paired reads, got {1} instead".\
format(expected_pairs, npairs)
assert nfrags == expected_frags, "Expect {0} single reads, got {1} instead".\
format(expected_frags, nfrags) |
def join_images(img_files, out_file):
"""Join the list of images into the out file"""
images = [PIL.Image.open(f) for f in img_files]
joined = PIL.Image.new(
'RGB',
(sum(i.size[0] for i in images), max(i.size[1] for i in images))
)
left = 0
for img in images:
joined.paste(im=img, box=(left, 0))
left = left + img.size[0]
joined.save(out_file) | Join the list of images into the out file | Below is the the instruction that describes the task:
### Input:
Join the list of images into the out file
### Response:
def join_images(img_files, out_file):
"""Join the list of images into the out file"""
images = [PIL.Image.open(f) for f in img_files]
joined = PIL.Image.new(
'RGB',
(sum(i.size[0] for i in images), max(i.size[1] for i in images))
)
left = 0
for img in images:
joined.paste(im=img, box=(left, 0))
left = left + img.size[0]
joined.save(out_file) |
def pass_allowedremoterelieve_v1(self):
"""Update the outlet link sequence |dam_outlets.R|."""
flu = self.sequences.fluxes.fastaccess
sen = self.sequences.senders.fastaccess
sen.r[0] += flu.allowedremoterelieve | Update the outlet link sequence |dam_outlets.R|. | Below is the the instruction that describes the task:
### Input:
Update the outlet link sequence |dam_outlets.R|.
### Response:
def pass_allowedremoterelieve_v1(self):
"""Update the outlet link sequence |dam_outlets.R|."""
flu = self.sequences.fluxes.fastaccess
sen = self.sequences.senders.fastaccess
sen.r[0] += flu.allowedremoterelieve |
def __add_handler_factory(self, svc_ref):
# type: (ServiceReference) -> None
"""
Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
if handler_id in self._handlers:
# Duplicated ID
_logger.warning("Already registered handler ID: %s", handler_id)
else:
# Store the service
self._handlers_refs.add(svc_ref)
self._handlers[handler_id] = self.__context.get_service(svc_ref)
# Try to instantiate waiting components
succeeded = set()
for (
name,
(context, instance),
) in self.__waiting_handlers.items():
if self.__try_instantiate(context, instance):
succeeded.add(name)
# Remove instantiated component from the waiting list
for name in succeeded:
del self.__waiting_handlers[name] | Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory | Below is the the instruction that describes the task:
### Input:
Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory
### Response:
def __add_handler_factory(self, svc_ref):
# type: (ServiceReference) -> None
"""
Stores a new handler factory
:param svc_ref: ServiceReference of the new handler factory
"""
with self.__handlers_lock:
# Get the handler ID
handler_id = svc_ref.get_property(handlers_const.PROP_HANDLER_ID)
if handler_id in self._handlers:
# Duplicated ID
_logger.warning("Already registered handler ID: %s", handler_id)
else:
# Store the service
self._handlers_refs.add(svc_ref)
self._handlers[handler_id] = self.__context.get_service(svc_ref)
# Try to instantiate waiting components
succeeded = set()
for (
name,
(context, instance),
) in self.__waiting_handlers.items():
if self.__try_instantiate(context, instance):
succeeded.add(name)
# Remove instantiated component from the waiting list
for name in succeeded:
del self.__waiting_handlers[name] |
def compile_with_instrumentation(self,
container: Container,
verbose: bool = False
) -> CompilationOutcome:
"""
Attempts to compile the program inside a given container with
instrumentation enabled.
See: `Container.compile`
"""
bug = self.__installation.bugs[container.bug]
bug.compiler.clean(self, container, verbose=verbose) # TODO port
return bug.compiler.compile_with_coverage_instrumentation(self,
container,
verbose=verbose) | Attempts to compile the program inside a given container with
instrumentation enabled.
See: `Container.compile` | Below is the the instruction that describes the task:
### Input:
Attempts to compile the program inside a given container with
instrumentation enabled.
See: `Container.compile`
### Response:
def compile_with_instrumentation(self,
container: Container,
verbose: bool = False
) -> CompilationOutcome:
"""
Attempts to compile the program inside a given container with
instrumentation enabled.
See: `Container.compile`
"""
bug = self.__installation.bugs[container.bug]
bug.compiler.clean(self, container, verbose=verbose) # TODO port
return bug.compiler.compile_with_coverage_instrumentation(self,
container,
verbose=verbose) |
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found') | Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String | Below is the the instruction that describes the task:
### Input:
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
### Response:
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found') |
def adjust(
self,
aba_parameters,
boundary_indices,
real_wave_mfcc,
text_file,
allow_arbitrary_shift=False
):
"""
Adjust the boundaries of the text map
using the algorithm and parameters
specified in the constructor,
storing the sync map fragment list internally.
:param dict aba_parameters: a dictionary containing the algorithm and its parameters,
as produced by ``aba_parameters()`` in ``TaskConfiguration``
:param boundary_indices: the current boundary indices,
with respect to the audio file full MFCCs
:type boundary_indices: :class:`numpy.ndarray` (1D)
:param real_wave_mfcc: the audio file MFCCs
:type real_wave_mfcc: :class:`~aeneas.audiofilemfcc.AudioFileMFCC`
:param text_file: the text file containing the text fragments associated
:type text_file: :class:`~aeneas.textfile.TextFile`
:param bool allow_arbitrary_shift: if ``True``, allow arbitrary shifts when adjusting zero length
:rtype: list of :class:`~aeneas.syncmap.SyncMapFragmentList`
"""
self.log(u"Called adjust")
if boundary_indices is None:
self.log_exc(u"boundary_indices is None", None, True, TypeError)
if not isinstance(real_wave_mfcc, AudioFileMFCC):
self.log_exc(u"real_wave_mfcc is not an AudioFileMFCC object", None, True, TypeError)
if not isinstance(text_file, TextFile):
self.log_exc(u"text_file is not a TextFile object", None, True, TypeError)
nozero = aba_parameters["nozero"]
ns_min, ns_string = aba_parameters["nonspeech"]
algorithm, algo_parameters = aba_parameters["algorithm"]
self.log(u" Converting boundary indices to fragment list...")
begin = real_wave_mfcc.middle_begin * real_wave_mfcc.rconf.mws
end = real_wave_mfcc.middle_end * real_wave_mfcc.rconf.mws
time_values = [begin] + list(boundary_indices * self.mws) + [end]
self.intervals_to_fragment_list(
text_file=text_file,
time_values=time_values
)
self.log(u" Converting boundary indices to fragment list... done")
self.log(u" Processing fragments with zero length...")
self._process_zero_length(nozero, allow_arbitrary_shift)
self.log(u" Processing fragments with zero length... done")
self.log(u" Processing nonspeech fragments...")
self._process_long_nonspeech(ns_min, ns_string, real_wave_mfcc)
self.log(u" Processing nonspeech fragments... done")
self.log(u" Adjusting...")
ALGORITHM_MAP = {
self.AFTERCURRENT: self._adjust_aftercurrent,
self.AUTO: self._adjust_auto,
self.BEFORENEXT: self._adjust_beforenext,
self.OFFSET: self._adjust_offset,
self.PERCENT: self._adjust_percent,
self.RATE: self._adjust_rate,
self.RATEAGGRESSIVE: self._adjust_rate_aggressive,
}
ALGORITHM_MAP[algorithm](real_wave_mfcc, algo_parameters)
self.log(u" Adjusting... done")
self.log(u" Smoothing...")
self._smooth_fragment_list(real_wave_mfcc.audio_length, ns_string)
self.log(u" Smoothing... done")
return self.smflist | Adjust the boundaries of the text map
using the algorithm and parameters
specified in the constructor,
storing the sync map fragment list internally.
:param dict aba_parameters: a dictionary containing the algorithm and its parameters,
as produced by ``aba_parameters()`` in ``TaskConfiguration``
:param boundary_indices: the current boundary indices,
with respect to the audio file full MFCCs
:type boundary_indices: :class:`numpy.ndarray` (1D)
:param real_wave_mfcc: the audio file MFCCs
:type real_wave_mfcc: :class:`~aeneas.audiofilemfcc.AudioFileMFCC`
:param text_file: the text file containing the text fragments associated
:type text_file: :class:`~aeneas.textfile.TextFile`
:param bool allow_arbitrary_shift: if ``True``, allow arbitrary shifts when adjusting zero length
:rtype: list of :class:`~aeneas.syncmap.SyncMapFragmentList` | Below is the the instruction that describes the task:
### Input:
Adjust the boundaries of the text map
using the algorithm and parameters
specified in the constructor,
storing the sync map fragment list internally.
:param dict aba_parameters: a dictionary containing the algorithm and its parameters,
as produced by ``aba_parameters()`` in ``TaskConfiguration``
:param boundary_indices: the current boundary indices,
with respect to the audio file full MFCCs
:type boundary_indices: :class:`numpy.ndarray` (1D)
:param real_wave_mfcc: the audio file MFCCs
:type real_wave_mfcc: :class:`~aeneas.audiofilemfcc.AudioFileMFCC`
:param text_file: the text file containing the text fragments associated
:type text_file: :class:`~aeneas.textfile.TextFile`
:param bool allow_arbitrary_shift: if ``True``, allow arbitrary shifts when adjusting zero length
:rtype: list of :class:`~aeneas.syncmap.SyncMapFragmentList`
### Response:
def adjust(
self,
aba_parameters,
boundary_indices,
real_wave_mfcc,
text_file,
allow_arbitrary_shift=False
):
"""
Adjust the boundaries of the text map
using the algorithm and parameters
specified in the constructor,
storing the sync map fragment list internally.
:param dict aba_parameters: a dictionary containing the algorithm and its parameters,
as produced by ``aba_parameters()`` in ``TaskConfiguration``
:param boundary_indices: the current boundary indices,
with respect to the audio file full MFCCs
:type boundary_indices: :class:`numpy.ndarray` (1D)
:param real_wave_mfcc: the audio file MFCCs
:type real_wave_mfcc: :class:`~aeneas.audiofilemfcc.AudioFileMFCC`
:param text_file: the text file containing the text fragments associated
:type text_file: :class:`~aeneas.textfile.TextFile`
:param bool allow_arbitrary_shift: if ``True``, allow arbitrary shifts when adjusting zero length
:rtype: list of :class:`~aeneas.syncmap.SyncMapFragmentList`
"""
self.log(u"Called adjust")
if boundary_indices is None:
self.log_exc(u"boundary_indices is None", None, True, TypeError)
if not isinstance(real_wave_mfcc, AudioFileMFCC):
self.log_exc(u"real_wave_mfcc is not an AudioFileMFCC object", None, True, TypeError)
if not isinstance(text_file, TextFile):
self.log_exc(u"text_file is not a TextFile object", None, True, TypeError)
nozero = aba_parameters["nozero"]
ns_min, ns_string = aba_parameters["nonspeech"]
algorithm, algo_parameters = aba_parameters["algorithm"]
self.log(u" Converting boundary indices to fragment list...")
begin = real_wave_mfcc.middle_begin * real_wave_mfcc.rconf.mws
end = real_wave_mfcc.middle_end * real_wave_mfcc.rconf.mws
time_values = [begin] + list(boundary_indices * self.mws) + [end]
self.intervals_to_fragment_list(
text_file=text_file,
time_values=time_values
)
self.log(u" Converting boundary indices to fragment list... done")
self.log(u" Processing fragments with zero length...")
self._process_zero_length(nozero, allow_arbitrary_shift)
self.log(u" Processing fragments with zero length... done")
self.log(u" Processing nonspeech fragments...")
self._process_long_nonspeech(ns_min, ns_string, real_wave_mfcc)
self.log(u" Processing nonspeech fragments... done")
self.log(u" Adjusting...")
ALGORITHM_MAP = {
self.AFTERCURRENT: self._adjust_aftercurrent,
self.AUTO: self._adjust_auto,
self.BEFORENEXT: self._adjust_beforenext,
self.OFFSET: self._adjust_offset,
self.PERCENT: self._adjust_percent,
self.RATE: self._adjust_rate,
self.RATEAGGRESSIVE: self._adjust_rate_aggressive,
}
ALGORITHM_MAP[algorithm](real_wave_mfcc, algo_parameters)
self.log(u" Adjusting... done")
self.log(u" Smoothing...")
self._smooth_fragment_list(real_wave_mfcc.audio_length, ns_string)
self.log(u" Smoothing... done")
return self.smflist |
def copy(self, strip=None, deep='ref'):
""" Return another instance of the object, with the same attributes
If deep=True, all attributes themselves are also copies
"""
dd = self.to_dict(strip=strip, deep=deep)
return self.__class__(fromdict=dd) | Return another instance of the object, with the same attributes
If deep=True, all attributes themselves are also copies | Below is the the instruction that describes the task:
### Input:
Return another instance of the object, with the same attributes
If deep=True, all attributes themselves are also copies
### Response:
def copy(self, strip=None, deep='ref'):
""" Return another instance of the object, with the same attributes
If deep=True, all attributes themselves are also copies
"""
dd = self.to_dict(strip=strip, deep=deep)
return self.__class__(fromdict=dd) |
def generate_packer_filename(provider, region, builder):
"""Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters.
"""
filename = '{0}_{1}_{2}.json'.format(provider, region, builder)
return filename | Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters. | Below is the the instruction that describes the task:
### Input:
Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters.
### Response:
def generate_packer_filename(provider, region, builder):
"""Generate a filename to be used by packer.
Args:
provider (str): Name of Spinnaker provider.
region (str): Name of provider region to use.
builder (str): Name of builder process type.
Returns:
str: Generated filename based on parameters.
"""
filename = '{0}_{1}_{2}.json'.format(provider, region, builder)
return filename |
def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup | format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done. | Below is the the instruction that describes the task:
### Input:
format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
### Response:
def format(self, soup):
"""format a BeautifulSoup document
This will transform the block elements content from
multi-lines text into single line.
This allow us to avoid having to deal with further text
rendering once this step has been done.
"""
# Remove all the newline characters before a closing tag.
for node in soup.findAll(text=True):
if node.rstrip(" ").endswith("\n"):
node.replaceWith(node.rstrip(" ").rstrip("\n"))
# Join the block elements lines into a single long line
for tag in ['p', 'li']:
for node in soup.findAll(tag):
text = unicode(node)
lines = [x.strip() for x in text.splitlines()]
text = ' '.join(lines)
node.replaceWith(BeautifulSoup.BeautifulSoup(text))
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
# replace all <br/> tag by newline character
for node in soup.findAll('br'):
node.replaceWith("\n")
soup = BeautifulSoup.BeautifulSoup(unicode(soup))
return soup |
def _init_draw(self):
"""Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
"""
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) | Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation. | Below is the the instruction that describes the task:
### Input:
Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
### Response:
def _init_draw(self):
"""Initializes the drawing of the frames by setting the images to
random colors.
This function is called by TimedAnimation.
"""
if self.original is not None:
self.original.set_data(np.random.random((10, 10, 3)))
self.processed.set_data(np.random.random((10, 10, 3))) |
def upload(self, thread_uuid, file_path, description=None):
""" Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned"""
url = "threads/%s/entries" % thread_uuid
return self.core.upload(file_path, url, description) | Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned | Below is the the instruction that describes the task:
### Input:
Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned
### Response:
def upload(self, thread_uuid, file_path, description=None):
""" Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned"""
url = "threads/%s/entries" % thread_uuid
return self.core.upload(file_path, url, description) |
def adapter_add_nio_binding(self, adapter_number, port_number, nio):
"""
Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port
"""
try:
adapter = self._adapters[adapter_number]
except IndexError:
raise IOUError('Adapter {adapter_number} does not exist for IOU "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if not adapter.port_exists(port_number):
raise IOUError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
adapter.add_nio(port_number, nio)
log.info('IOU "{name}" [{id}]: {nio} added to {adapter_number}/{port_number}'.format(name=self._name,
id=self._id,
nio=nio,
adapter_number=adapter_number,
port_number=port_number))
if self.ubridge:
bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name,
iol_id=self.application_id,
bay=adapter_number,
unit=port_number,
lport=nio.lport,
rhost=nio.rhost,
rport=nio.rport))
yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) | Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port | Below is the the instruction that describes the task:
### Input:
Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port
### Response:
def adapter_add_nio_binding(self, adapter_number, port_number, nio):
"""
Adds a adapter NIO binding.
:param adapter_number: adapter number
:param port_number: port number
:param nio: NIO instance to add to the adapter/port
"""
try:
adapter = self._adapters[adapter_number]
except IndexError:
raise IOUError('Adapter {adapter_number} does not exist for IOU "{name}"'.format(name=self._name,
adapter_number=adapter_number))
if not adapter.port_exists(port_number):
raise IOUError("Port {port_number} does not exist in adapter {adapter}".format(adapter=adapter,
port_number=port_number))
adapter.add_nio(port_number, nio)
log.info('IOU "{name}" [{id}]: {nio} added to {adapter_number}/{port_number}'.format(name=self._name,
id=self._id,
nio=nio,
adapter_number=adapter_number,
port_number=port_number))
if self.ubridge:
bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
yield from self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name,
iol_id=self.application_id,
bay=adapter_number,
unit=port_number,
lport=nio.lport,
rhost=nio.rhost,
rport=nio.rport))
yield from self._ubridge_apply_filters(adapter_number, port_number, nio.filters) |
def find_connectable_ip(host, port=None):
"""Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned.
"""
try:
addrinfos = socket.getaddrinfo(host, None)
except socket.gaierror:
return None
ip = None
for family, _, _, _, sockaddr in addrinfos:
connectable = True
if port:
connectable = is_connectable(port, sockaddr[0])
if connectable and family == socket.AF_INET:
return sockaddr[0]
if connectable and not ip and family == socket.AF_INET6:
ip = sockaddr[0]
return ip | Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned. | Below is the the instruction that describes the task:
### Input:
Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned.
### Response:
def find_connectable_ip(host, port=None):
"""Resolve a hostname to an IP, preferring IPv4 addresses.
We prefer IPv4 so that we don't change behavior from previous IPv4-only
implementations, and because some drivers (e.g., FirefoxDriver) do not
support IPv6 connections.
If the optional port number is provided, only IPs that listen on the given
port are considered.
:Args:
- host - A hostname.
- port - Optional port number.
:Returns:
A single IP address, as a string. If any IPv4 address is found, one is
returned. Otherwise, if any IPv6 address is found, one is returned. If
neither, then None is returned.
"""
try:
addrinfos = socket.getaddrinfo(host, None)
except socket.gaierror:
return None
ip = None
for family, _, _, _, sockaddr in addrinfos:
connectable = True
if port:
connectable = is_connectable(port, sockaddr[0])
if connectable and family == socket.AF_INET:
return sockaddr[0]
if connectable and not ip and family == socket.AF_INET6:
ip = sockaddr[0]
return ip |
def safe_logit(p: Union[float, int]) -> Optional[float]:
r"""
Returns the logit (log odds) of its input probability
.. math::
\alpha = logit(p) = log(x / (1 - x))
Args:
p: :math:`p`
Returns:
:math:`\alpha`, or ``None`` if ``x`` is not in the range [0, 1].
"""
if p > 1 or p < 0:
return None # can't take log of negative number
if p == 1:
return float("inf")
if p == 0:
return float("-inf")
return math.log(p / (1 - p)) | r"""
Returns the logit (log odds) of its input probability
.. math::
\alpha = logit(p) = log(x / (1 - x))
Args:
p: :math:`p`
Returns:
:math:`\alpha`, or ``None`` if ``x`` is not in the range [0, 1]. | Below is the the instruction that describes the task:
### Input:
r"""
Returns the logit (log odds) of its input probability
.. math::
\alpha = logit(p) = log(x / (1 - x))
Args:
p: :math:`p`
Returns:
:math:`\alpha`, or ``None`` if ``x`` is not in the range [0, 1].
### Response:
def safe_logit(p: Union[float, int]) -> Optional[float]:
r"""
Returns the logit (log odds) of its input probability
.. math::
\alpha = logit(p) = log(x / (1 - x))
Args:
p: :math:`p`
Returns:
:math:`\alpha`, or ``None`` if ``x`` is not in the range [0, 1].
"""
if p > 1 or p < 0:
return None # can't take log of negative number
if p == 1:
return float("inf")
if p == 0:
return float("-inf")
return math.log(p / (1 - p)) |
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool | Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop. | Below is the the instruction that describes the task:
### Input:
Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop.
### Response:
def runPowerNotificationsThread(self):
"""Main method of the spawned NSThread. Registers run loop source and runs current NSRunLoop."""
pool = NSAutoreleasePool.alloc().init()
@objc.callbackFor(IOPSNotificationCreateRunLoopSource)
def on_power_source_notification(context):
with self._lock:
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
observer.on_power_source_notification()
self._source = IOPSNotificationCreateRunLoopSource(on_power_source_notification, None)
CFRunLoopAddSource(NSRunLoop.currentRunLoop().getCFRunLoop(), self._source, kCFRunLoopDefaultMode)
while not NSThread.currentThread().isCancelled():
NSRunLoop.currentRunLoop().runMode_beforeDate_(NSDefaultRunLoopMode, NSDate.distantFuture())
del pool |
async def addFeedData(self, name, items, seqn=None):
'''
Add feed data to the cortex.
'''
return await self.core.addFeedData(name, items, seqn) | Add feed data to the cortex. | Below is the the instruction that describes the task:
### Input:
Add feed data to the cortex.
### Response:
async def addFeedData(self, name, items, seqn=None):
'''
Add feed data to the cortex.
'''
return await self.core.addFeedData(name, items, seqn) |
def parse_job_id(self, output):
"""Take the output of the submit command and return the job id."""
m = self.job_id_regexp.search(output)
if m is not None:
job_id = m.group()
else:
raise LauncherError("Job id couldn't be determined: %s" % output)
self.job_id = job_id
self.log.info('Job submitted with job id: %r', job_id)
return job_id | Take the output of the submit command and return the job id. | Below is the the instruction that describes the task:
### Input:
Take the output of the submit command and return the job id.
### Response:
def parse_job_id(self, output):
"""Take the output of the submit command and return the job id."""
m = self.job_id_regexp.search(output)
if m is not None:
job_id = m.group()
else:
raise LauncherError("Job id couldn't be determined: %s" % output)
self.job_id = job_id
self.log.info('Job submitted with job id: %r', job_id)
return job_id |
def tags(self, val: str) -> None:
"""
Accessor for record tags (metadata).
:param val: record tags
"""
if not StorageRecord.ok_tags(val):
LOGGER.debug('StorageRecord.__init__ <!< Tags %s must map strings to strings', val)
raise BadRecord('Tags {} must map strings to strings'.format(val))
self._tags = val or {} | Accessor for record tags (metadata).
:param val: record tags | Below is the the instruction that describes the task:
### Input:
Accessor for record tags (metadata).
:param val: record tags
### Response:
def tags(self, val: str) -> None:
"""
Accessor for record tags (metadata).
:param val: record tags
"""
if not StorageRecord.ok_tags(val):
LOGGER.debug('StorageRecord.__init__ <!< Tags %s must map strings to strings', val)
raise BadRecord('Tags {} must map strings to strings'.format(val))
self._tags = val or {} |
def check_ups_input_frequency(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.3.3.1.2.1
MIB excerpt
The present input frequency.
"""
a_frequency = calc_frequency_from_snmpvalue(the_snmp_value)
the_helper.add_metric(
label=the_helper.options.type,
value=a_frequency,
uom='Hz')
the_helper.set_summary("Input Frequency is {} Hz".format(a_frequency)) | OID .1.3.6.1.2.1.33.1.3.3.1.2.1
MIB excerpt
The present input frequency. | Below is the the instruction that describes the task:
### Input:
OID .1.3.6.1.2.1.33.1.3.3.1.2.1
MIB excerpt
The present input frequency.
### Response:
def check_ups_input_frequency(the_session, the_helper, the_snmp_value):
"""
OID .1.3.6.1.2.1.33.1.3.3.1.2.1
MIB excerpt
The present input frequency.
"""
a_frequency = calc_frequency_from_snmpvalue(the_snmp_value)
the_helper.add_metric(
label=the_helper.options.type,
value=a_frequency,
uom='Hz')
the_helper.set_summary("Input Frequency is {} Hz".format(a_frequency)) |
def encode_endpoint_props(ed):
"""
Encodes the properties of the given EndpointDescription
"""
props = encode_osgi_props(ed)
props[ECF_RSVC_ID] = "{0}".format(ed.get_remoteservice_id()[1])
props[ECF_ENDPOINT_ID] = "{0}".format(ed.get_container_id()[1])
props[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = "{0}".format(
ed.get_container_id()[0]
)
props[ECF_ENDPOINT_TIMESTAMP] = "{0}".format(ed.get_timestamp())
ctid = ed.get_connect_target_id()
if ctid:
props[ECF_ENDPOINT_CONNECTTARGET_ID] = "{0}".format(ctid)
id_filters = ed.get_id_filters()
if id_filters:
props[ECF_ENDPOINT_IDFILTER_IDS] = " ".join([x[1] for x in id_filters])
rs_filter = ed.get_remoteservice_filter()
if rs_filter:
props[ECF_ENDPOINT_REMOTESERVICE_FILTER] = ed.get_remoteservice_filter()
async_intfs = ed.get_async_interfaces()
if async_intfs:
props[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES] = " ".join(async_intfs)
all_props = ed.get_properties()
other_props = {
key: all_props[key]
for key in all_props.keys()
if not is_reserved_property(key)
}
return merge_dicts(props, other_props) | Encodes the properties of the given EndpointDescription | Below is the the instruction that describes the task:
### Input:
Encodes the properties of the given EndpointDescription
### Response:
def encode_endpoint_props(ed):
"""
Encodes the properties of the given EndpointDescription
"""
props = encode_osgi_props(ed)
props[ECF_RSVC_ID] = "{0}".format(ed.get_remoteservice_id()[1])
props[ECF_ENDPOINT_ID] = "{0}".format(ed.get_container_id()[1])
props[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = "{0}".format(
ed.get_container_id()[0]
)
props[ECF_ENDPOINT_TIMESTAMP] = "{0}".format(ed.get_timestamp())
ctid = ed.get_connect_target_id()
if ctid:
props[ECF_ENDPOINT_CONNECTTARGET_ID] = "{0}".format(ctid)
id_filters = ed.get_id_filters()
if id_filters:
props[ECF_ENDPOINT_IDFILTER_IDS] = " ".join([x[1] for x in id_filters])
rs_filter = ed.get_remoteservice_filter()
if rs_filter:
props[ECF_ENDPOINT_REMOTESERVICE_FILTER] = ed.get_remoteservice_filter()
async_intfs = ed.get_async_interfaces()
if async_intfs:
props[ECF_SERVICE_EXPORTED_ASYNC_INTERFACES] = " ".join(async_intfs)
all_props = ed.get_properties()
other_props = {
key: all_props[key]
for key in all_props.keys()
if not is_reserved_property(key)
}
return merge_dicts(props, other_props) |
def _handler(self, conn):
"""
Connection handler thread. Takes care of communication with the client
and running the proper task or applying a signal.
"""
incoming = self.recv(conn)
self.log(DEBUG, incoming)
try:
# E.g. ['twister', [7, 'invert'], {'guess_type': True}]
task, args, kw = self.codec.decode(incoming)
# OK, so we've received the information. Now to use it.
self.log(INFO, 'Fulfilling task %r' % task)
self.started_task()
pass_backend = False
obj = self.tasks[task]
if _is_iter(obj):
# (callable, bool)
obj, pass_backend = obj
if pass_backend:
# Have to do this, since args is a list
args = [self] + args
# Get and package the result
res = ['success', obj(*args, **kw)]
except Exception as e:
self.log(ERROR, 'Error while fullfilling task %r: %r' % (task, e))
res = ['error', e.__class__.__name__, e.args]
if self.tracebacks:
show_err()
else:
self.log(INFO, 'Finished fulfilling task %r' % task)
finally:
self.send(conn, self.codec.encode(res))
self.finished_task()
conn.close() | Connection handler thread. Takes care of communication with the client
and running the proper task or applying a signal. | Below is the the instruction that describes the task:
### Input:
Connection handler thread. Takes care of communication with the client
and running the proper task or applying a signal.
### Response:
def _handler(self, conn):
"""
Connection handler thread. Takes care of communication with the client
and running the proper task or applying a signal.
"""
incoming = self.recv(conn)
self.log(DEBUG, incoming)
try:
# E.g. ['twister', [7, 'invert'], {'guess_type': True}]
task, args, kw = self.codec.decode(incoming)
# OK, so we've received the information. Now to use it.
self.log(INFO, 'Fulfilling task %r' % task)
self.started_task()
pass_backend = False
obj = self.tasks[task]
if _is_iter(obj):
# (callable, bool)
obj, pass_backend = obj
if pass_backend:
# Have to do this, since args is a list
args = [self] + args
# Get and package the result
res = ['success', obj(*args, **kw)]
except Exception as e:
self.log(ERROR, 'Error while fullfilling task %r: %r' % (task, e))
res = ['error', e.__class__.__name__, e.args]
if self.tracebacks:
show_err()
else:
self.log(INFO, 'Finished fulfilling task %r' % task)
finally:
self.send(conn, self.codec.encode(res))
self.finished_task()
conn.close() |
def Regions(self,
skip_executable_regions=False,
skip_shared_regions=False,
skip_readonly_regions=False):
"""Iterates over the readable regions for this process.
We use mach_vm_region_recurse here to get a fine grained view of
the process' memory space. The algorithm is that for some regions,
the function returns is_submap=True which means that there are
actually subregions that we need to examine by increasing the
depth and calling the function again. For example, there are two
regions, addresses 1000-2000 and 2000-3000 where 1000-2000 has two
subregions, 1100-1200 and 1300-1400. In that case we would call:
mvrr(address=0, depth=0) -> (1000-2000, is_submap=True)
mvrr(address=0, depth=1) -> (1100-1200, is_submap=False)
mvrr(address=1200, depth=1) -> (1300-1400, is_submap=False)
mvrr(address=1400, depth=1) -> (2000-3000, is_submap=False)
At this point, we know we went out of the original submap which
ends at 2000. We need to recheck the region at 2000, it could be
submap = True at depth 0 so we call
mvrr(address=1400, depth=0) -> (2000-3000, is_submap=False)
Args:
skip_executable_regions: Skips executable sections.
skip_shared_regions: Skips shared sections. Includes mapped files.
skip_readonly_regions: Skips readonly sections.
Yields:
Pairs (address, length) for each identified region.
"""
address = ctypes.c_ulong(0)
mapsize = ctypes.c_ulong(0)
count = ctypes.c_uint32(submap_info_size)
sub_info = vm_region_submap_short_info_data_64()
depth = 0
depth_end_addresses = {}
while True:
c_depth = ctypes.c_uint32(depth)
r = libc.mach_vm_region_recurse(self.task, ctypes.pointer(address),
ctypes.pointer(mapsize),
ctypes.pointer(c_depth),
ctypes.pointer(sub_info),
ctypes.pointer(count))
# If we get told "invalid address", we have crossed into kernel land...
if r == 1:
break
if r != 0:
raise process_error.ProcessError("Error in mach_vm_region, ret=%s" % r)
if depth > 0 and address.value >= depth_end_addresses[depth]:
del depth_end_addresses[depth]
depth -= 1
continue
p = sub_info.protection
if skip_executable_regions and p & VM_PROT_EXECUTE:
address.value += mapsize.value
continue
if skip_shared_regions and sub_info.share_mode in [
SM_COW, SM_SHARED, SM_TRUESHARED
]:
address.value += mapsize.value
continue
if not p & VM_PROT_READ:
address.value += mapsize.value
continue
writable = p & VM_PROT_WRITE
if skip_readonly_regions and not writable:
address.value += mapsize.value
continue
if sub_info.is_submap:
depth += 1
depth_end_addresses[depth] = address.value + mapsize.value
else:
yield address.value, mapsize.value
address.value += mapsize.value | Iterates over the readable regions for this process.
We use mach_vm_region_recurse here to get a fine grained view of
the process' memory space. The algorithm is that for some regions,
the function returns is_submap=True which means that there are
actually subregions that we need to examine by increasing the
depth and calling the function again. For example, there are two
regions, addresses 1000-2000 and 2000-3000 where 1000-2000 has two
subregions, 1100-1200 and 1300-1400. In that case we would call:
mvrr(address=0, depth=0) -> (1000-2000, is_submap=True)
mvrr(address=0, depth=1) -> (1100-1200, is_submap=False)
mvrr(address=1200, depth=1) -> (1300-1400, is_submap=False)
mvrr(address=1400, depth=1) -> (2000-3000, is_submap=False)
At this point, we know we went out of the original submap which
ends at 2000. We need to recheck the region at 2000, it could be
submap = True at depth 0 so we call
mvrr(address=1400, depth=0) -> (2000-3000, is_submap=False)
Args:
skip_executable_regions: Skips executable sections.
skip_shared_regions: Skips shared sections. Includes mapped files.
skip_readonly_regions: Skips readonly sections.
Yields:
Pairs (address, length) for each identified region. | Below is the the instruction that describes the task:
### Input:
Iterates over the readable regions for this process.
We use mach_vm_region_recurse here to get a fine grained view of
the process' memory space. The algorithm is that for some regions,
the function returns is_submap=True which means that there are
actually subregions that we need to examine by increasing the
depth and calling the function again. For example, there are two
regions, addresses 1000-2000 and 2000-3000 where 1000-2000 has two
subregions, 1100-1200 and 1300-1400. In that case we would call:
mvrr(address=0, depth=0) -> (1000-2000, is_submap=True)
mvrr(address=0, depth=1) -> (1100-1200, is_submap=False)
mvrr(address=1200, depth=1) -> (1300-1400, is_submap=False)
mvrr(address=1400, depth=1) -> (2000-3000, is_submap=False)
At this point, we know we went out of the original submap which
ends at 2000. We need to recheck the region at 2000, it could be
submap = True at depth 0 so we call
mvrr(address=1400, depth=0) -> (2000-3000, is_submap=False)
Args:
skip_executable_regions: Skips executable sections.
skip_shared_regions: Skips shared sections. Includes mapped files.
skip_readonly_regions: Skips readonly sections.
Yields:
Pairs (address, length) for each identified region.
### Response:
def Regions(self,
skip_executable_regions=False,
skip_shared_regions=False,
skip_readonly_regions=False):
"""Iterates over the readable regions for this process.
We use mach_vm_region_recurse here to get a fine grained view of
the process' memory space. The algorithm is that for some regions,
the function returns is_submap=True which means that there are
actually subregions that we need to examine by increasing the
depth and calling the function again. For example, there are two
regions, addresses 1000-2000 and 2000-3000 where 1000-2000 has two
subregions, 1100-1200 and 1300-1400. In that case we would call:
mvrr(address=0, depth=0) -> (1000-2000, is_submap=True)
mvrr(address=0, depth=1) -> (1100-1200, is_submap=False)
mvrr(address=1200, depth=1) -> (1300-1400, is_submap=False)
mvrr(address=1400, depth=1) -> (2000-3000, is_submap=False)
At this point, we know we went out of the original submap which
ends at 2000. We need to recheck the region at 2000, it could be
submap = True at depth 0 so we call
mvrr(address=1400, depth=0) -> (2000-3000, is_submap=False)
Args:
skip_executable_regions: Skips executable sections.
skip_shared_regions: Skips shared sections. Includes mapped files.
skip_readonly_regions: Skips readonly sections.
Yields:
Pairs (address, length) for each identified region.
"""
address = ctypes.c_ulong(0)
mapsize = ctypes.c_ulong(0)
count = ctypes.c_uint32(submap_info_size)
sub_info = vm_region_submap_short_info_data_64()
depth = 0
depth_end_addresses = {}
while True:
c_depth = ctypes.c_uint32(depth)
r = libc.mach_vm_region_recurse(self.task, ctypes.pointer(address),
ctypes.pointer(mapsize),
ctypes.pointer(c_depth),
ctypes.pointer(sub_info),
ctypes.pointer(count))
# If we get told "invalid address", we have crossed into kernel land...
if r == 1:
break
if r != 0:
raise process_error.ProcessError("Error in mach_vm_region, ret=%s" % r)
if depth > 0 and address.value >= depth_end_addresses[depth]:
del depth_end_addresses[depth]
depth -= 1
continue
p = sub_info.protection
if skip_executable_regions and p & VM_PROT_EXECUTE:
address.value += mapsize.value
continue
if skip_shared_regions and sub_info.share_mode in [
SM_COW, SM_SHARED, SM_TRUESHARED
]:
address.value += mapsize.value
continue
if not p & VM_PROT_READ:
address.value += mapsize.value
continue
writable = p & VM_PROT_WRITE
if skip_readonly_regions and not writable:
address.value += mapsize.value
continue
if sub_info.is_submap:
depth += 1
depth_end_addresses[depth] = address.value + mapsize.value
else:
yield address.value, mapsize.value
address.value += mapsize.value |
def get_current_clementine():
"""
Get the current song from clementine.
"""
# mpris_version 2
try:
return get_info_mpris2('clementine')
except DBusErrorResponse:
bus_name = 'org.mpris.clementine'
path = '/Player'
interface = 'org.freedesktop.MediaPlayer'
return dbus_get_metadata(path, bus_name, interface) | Get the current song from clementine. | Below is the the instruction that describes the task:
### Input:
Get the current song from clementine.
### Response:
def get_current_clementine():
"""
Get the current song from clementine.
"""
# mpris_version 2
try:
return get_info_mpris2('clementine')
except DBusErrorResponse:
bus_name = 'org.mpris.clementine'
path = '/Player'
interface = 'org.freedesktop.MediaPlayer'
return dbus_get_metadata(path, bus_name, interface) |
def get_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.group import Group
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
Group,
self.__requester,
'GET',
'appointment_groups/{}/groups'.format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs)
) | List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group` | Below is the the instruction that describes the task:
### Input:
List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
### Response:
def get_group_participants(self, appointment_group, **kwargs):
"""
List student group participants in this appointment group.
:calls: `GET /api/v1/appointment_groups/:id/groups \
<https://canvas.instructure.com/doc/api/appointment_groups.html#method.appointment_groups.groups>`_
:param appointment_group: The object or ID of the appointment group.
:type appointment_group: :class:`canvasapi.appointment_group.AppointmentGroup` or int
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.Group`
"""
from canvasapi.appointment_group import AppointmentGroup
from canvasapi.group import Group
appointment_group_id = obj_or_id(
appointment_group, "appointment_group", (AppointmentGroup,)
)
return PaginatedList(
Group,
self.__requester,
'GET',
'appointment_groups/{}/groups'.format(appointment_group_id),
_kwargs=combine_kwargs(**kwargs)
) |
def parse_content(self, content):
"""
Called automatically to process the directory listing(s) contained in
the content.
"""
self.listings = ls_parser.parse(content, self.first_path)
# No longer need the first path found, if any.
delattr(self, 'first_path') | Called automatically to process the directory listing(s) contained in
the content. | Below is the the instruction that describes the task:
### Input:
Called automatically to process the directory listing(s) contained in
the content.
### Response:
def parse_content(self, content):
"""
Called automatically to process the directory listing(s) contained in
the content.
"""
self.listings = ls_parser.parse(content, self.first_path)
# No longer need the first path found, if any.
delattr(self, 'first_path') |
def write(self, process_tile, data):
"""
Write data from process tiles into PNG file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
"""
data = self._prepare_array(data)
if data.mask.all():
logger.debug("data empty, nothing to write")
else:
# in case of S3 output, create an boto3 resource
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
# Convert from process_tile to output_tiles and write
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_raster_window(
in_tile=process_tile,
in_data=data,
out_profile=self.profile(out_tile),
out_tile=out_tile,
out_path=out_path,
bucket_resource=bucket_resource
) | Write data from process tiles into PNG file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid`` | Below is the the instruction that describes the task:
### Input:
Write data from process tiles into PNG file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
### Response:
def write(self, process_tile, data):
"""
Write data from process tiles into PNG file(s).
Parameters
----------
process_tile : ``BufferedTile``
must be member of process ``TilePyramid``
"""
data = self._prepare_array(data)
if data.mask.all():
logger.debug("data empty, nothing to write")
else:
# in case of S3 output, create an boto3 resource
bucket_resource = get_boto3_bucket(self._bucket) if self._bucket else None
# Convert from process_tile to output_tiles and write
for tile in self.pyramid.intersecting(process_tile):
out_path = self.get_path(tile)
self.prepare_path(tile)
out_tile = BufferedTile(tile, self.pixelbuffer)
write_raster_window(
in_tile=process_tile,
in_data=data,
out_profile=self.profile(out_tile),
out_tile=out_tile,
out_path=out_path,
bucket_resource=bucket_resource
) |
def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str | Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata | Below is the the instruction that describes the task:
### Input:
Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
### Response:
def schema_org(builder):
# pylint: disable=line-too-long
"""Builds schema.org microdata for DatasetSearch from DatasetBuilder.
Markup spec: https://developers.google.com/search/docs/data-types/dataset#dataset
Testing tool: https://search.google.com/structured-data/testing-tool
For Google Dataset Search: https://toolbox.google.com/datasetsearch
Microdata format was chosen over JSON-LD due to the fact that Markdown
rendering engines remove all <script> tags.
Args:
builder: `tfds.core.DatasetBuilder`
Returns:
HTML string with microdata
"""
# pylint: enable=line-too-long
properties = [
(lambda x: x.name, SCHEMA_ORG_NAME),
(lambda x: x.description, SCHEMA_ORG_DESC),
(lambda x: x.name, SCHEMA_ORG_URL),
(lambda x: (x.urls and x.urls[0]) or "", SCHEMA_ORG_SAMEAS)
]
info = builder.info
out_str = SCHEMA_ORG_PRE
for extractor, template in properties:
val = extractor(info)
if val:
# We are using cgi module instead of html due to Python 2 compatibility
out_str += template.format(val=cgi.escape(val, quote=True).strip())
out_str += SCHEMA_ORG_POST
return out_str |
def from_stashy(klass, repository, labor_hours=True):
"""
Handles crafting Code.gov Project for Bitbucket Server repositories
"""
# if not isinstance(repository, stashy.repos.Repository):
# raise TypeError('Repository must be a stashy Repository object')
if not isinstance(repository, dict):
raise TypeError('Repository must be a dict')
project = klass()
logger.debug(
'Stashy: project_key=%s repository_slug=%s',
repository['name'],
repository['project']['key'],
)
# -- REQUIRED FIELDS --
project['name'] = repository['name']
clone_urls = [clone['href'] for clone in repository['links']['clone']]
for url in clone_urls:
# Only rely on SSH Urls for repository urls
if url.startswith('ssh://'):
project['repositoryURL'] = url
break
description = repository['project'].get('description', '')
if description:
project['description'] = 'Project description: %s' % description
project['permissions']['licenses'] = None
web_url = repository['links']['self'][0]['href']
public_server = web_url.startswith('https://bitbucket.org')
if repository['public'] and public_server:
project['permissions']['usageType'] = 'openSource'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['bitbucket']
project['contact']['email'] = ''
project['contact']['URL'] = repository['links']['self'][0]['href']
# -- OPTIONAL FIELDS --
# project['version'] = ''
# project['organization'] = organization.name
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = repository['scmId']
project['homepageURL'] = repository['links']['self'][0]['href']
# project['downloadURL'] =
# project['languages'] =
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
# project['date'] = {
# 'created': repository.pushed_at.isoformat(),
# 'lastModified': repository.updated_at.isoformat(),
# 'metadataLastUpdated': '',
# }
_prune_dict_null_str(project)
return project | Handles crafting Code.gov Project for Bitbucket Server repositories | Below is the the instruction that describes the task:
### Input:
Handles crafting Code.gov Project for Bitbucket Server repositories
### Response:
def from_stashy(klass, repository, labor_hours=True):
"""
Handles crafting Code.gov Project for Bitbucket Server repositories
"""
# if not isinstance(repository, stashy.repos.Repository):
# raise TypeError('Repository must be a stashy Repository object')
if not isinstance(repository, dict):
raise TypeError('Repository must be a dict')
project = klass()
logger.debug(
'Stashy: project_key=%s repository_slug=%s',
repository['name'],
repository['project']['key'],
)
# -- REQUIRED FIELDS --
project['name'] = repository['name']
clone_urls = [clone['href'] for clone in repository['links']['clone']]
for url in clone_urls:
# Only rely on SSH Urls for repository urls
if url.startswith('ssh://'):
project['repositoryURL'] = url
break
description = repository['project'].get('description', '')
if description:
project['description'] = 'Project description: %s' % description
project['permissions']['licenses'] = None
web_url = repository['links']['self'][0]['href']
public_server = web_url.startswith('https://bitbucket.org')
if repository['public'] and public_server:
project['permissions']['usageType'] = 'openSource'
if labor_hours:
project['laborHours'] = labor_hours_from_url(project['repositoryURL'])
else:
project['laborHours'] = 0
project['tags'] = ['bitbucket']
project['contact']['email'] = ''
project['contact']['URL'] = repository['links']['self'][0]['href']
# -- OPTIONAL FIELDS --
# project['version'] = ''
# project['organization'] = organization.name
# TODO: Currently, can't be an empty string, see: https://github.com/GSA/code-gov-web/issues/370
project['status'] = 'Development'
project['vcs'] = repository['scmId']
project['homepageURL'] = repository['links']['self'][0]['href']
# project['downloadURL'] =
# project['languages'] =
# project['partners'] = []
# project['relatedCode'] = []
# project['reusedCode'] = []
# date: [object] A date object describing the release.
# created: [string] The date the release was originally created, in YYYY-MM-DD or ISO 8601 format.
# lastModified: [string] The date the release was modified, in YYYY-MM-DD or ISO 8601 format.
# metadataLastUpdated: [string] The date the metadata of the release was last updated, in YYYY-MM-DD or ISO 8601 format.
# project['date'] = {
# 'created': repository.pushed_at.isoformat(),
# 'lastModified': repository.updated_at.isoformat(),
# 'metadataLastUpdated': '',
# }
_prune_dict_null_str(project)
return project |
def find_element(self, by=By.ID, value=None, el_class=None):
"""
usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return:
"""
el = self.child_element(by, value, el_class)
el.reload()
return el | usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return: | Below is the the instruction that describes the task:
### Input:
usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return:
### Response:
def find_element(self, by=By.ID, value=None, el_class=None):
"""
usages with ``'one string'`` selector:
- find_element(by: str) -> PageElement
- find_element(by: str, value: T <= PageElement) -> T
usages with ``'webdriver'`` By selector
- find_element(by: str, value: str) -> PageElement
- find_element(by: str, value: str, el_class: T <= PageElement) -> T
:type by: str
:param by:
:type value: str | T <= PageElement
:param value:
:type el_class: T <= PageElement
:param el_class:
:rtype: T <= PageElement
:return:
"""
el = self.child_element(by, value, el_class)
el.reload()
return el |
def login(self, login_type, **kwargs):
"""Perform /login.
Args:
login_type (str): The value for the 'type' key.
**kwargs: Additional key/values to add to the JSON submitted.
"""
content = {
"type": login_type
}
for key in kwargs:
if kwargs[key]:
content[key] = kwargs[key]
return self._send("POST", "/login", content) | Perform /login.
Args:
login_type (str): The value for the 'type' key.
**kwargs: Additional key/values to add to the JSON submitted. | Below is the the instruction that describes the task:
### Input:
Perform /login.
Args:
login_type (str): The value for the 'type' key.
**kwargs: Additional key/values to add to the JSON submitted.
### Response:
def login(self, login_type, **kwargs):
"""Perform /login.
Args:
login_type (str): The value for the 'type' key.
**kwargs: Additional key/values to add to the JSON submitted.
"""
content = {
"type": login_type
}
for key in kwargs:
if kwargs[key]:
content[key] = kwargs[key]
return self._send("POST", "/login", content) |
def multi_feat_match(template, image, options=None):
"""
Match template and image by extracting multiple features (specified) from it.
:param template: Template image
:param image: Search image
:param options: Options include
- features: List of options for each feature
:return:
"""
h, w = image.shape[:2]
scale = 1
if options is not None and 'features' in options:
heatmap = np.zeros((h, w), dtype=np.float64)
for foptions in options['features']:
f_hmap, _ = feature_match(template, image, foptions)
heatmap += cv.resize(f_hmap, (w, h), interpolation=cv.INTER_AREA)
heatmap /= len(options['features'])
else:
heatmap, scale = feature_match(template, image, options)
return heatmap, scale | Match template and image by extracting multiple features (specified) from it.
:param template: Template image
:param image: Search image
:param options: Options include
- features: List of options for each feature
:return: | Below is the the instruction that describes the task:
### Input:
Match template and image by extracting multiple features (specified) from it.
:param template: Template image
:param image: Search image
:param options: Options include
- features: List of options for each feature
:return:
### Response:
def multi_feat_match(template, image, options=None):
"""
Match template and image by extracting multiple features (specified) from it.
:param template: Template image
:param image: Search image
:param options: Options include
- features: List of options for each feature
:return:
"""
h, w = image.shape[:2]
scale = 1
if options is not None and 'features' in options:
heatmap = np.zeros((h, w), dtype=np.float64)
for foptions in options['features']:
f_hmap, _ = feature_match(template, image, foptions)
heatmap += cv.resize(f_hmap, (w, h), interpolation=cv.INTER_AREA)
heatmap /= len(options['features'])
else:
heatmap, scale = feature_match(template, image, options)
return heatmap, scale |
def instances_changed(self):
"""True if any instance has changed."""
value = bool(lib.EnvGetInstancesChanged(self._env))
lib.EnvSetInstancesChanged(self._env, int(False))
return value | True if any instance has changed. | Below is the the instruction that describes the task:
### Input:
True if any instance has changed.
### Response:
def instances_changed(self):
"""True if any instance has changed."""
value = bool(lib.EnvGetInstancesChanged(self._env))
lib.EnvSetInstancesChanged(self._env, int(False))
return value |
def set_active_vectors(self, name, preference='cell'):
"""Finds the vectors by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveVectors(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveVectors(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name] | Finds the vectors by name and appropriately sets it as active | Below is the the instruction that describes the task:
### Input:
Finds the vectors by name and appropriately sets it as active
### Response:
def set_active_vectors(self, name, preference='cell'):
"""Finds the vectors by name and appropriately sets it as active"""
_, field = get_scalar(self, name, preference=preference, info=True)
if field == POINT_DATA_FIELD:
self.GetPointData().SetActiveVectors(name)
elif field == CELL_DATA_FIELD:
self.GetCellData().SetActiveVectors(name)
else:
raise RuntimeError('Data field ({}) not useable'.format(field))
self._active_vectors_info = [field, name] |
def pusher_connected(self, data):
"""Called when the pusherclient is connected
"""
# Inform user that pusher is done connecting
self.logger.info("Pusherclient connected")
# Bind the events we want to listen to
self.callback_client.bind("payment_authorized",
self.payment_authorized)
self.callback_client.bind("shortlink_scanned",
self.shortlink_scanned) | Called when the pusherclient is connected | Below is the the instruction that describes the task:
### Input:
Called when the pusherclient is connected
### Response:
def pusher_connected(self, data):
"""Called when the pusherclient is connected
"""
# Inform user that pusher is done connecting
self.logger.info("Pusherclient connected")
# Bind the events we want to listen to
self.callback_client.bind("payment_authorized",
self.payment_authorized)
self.callback_client.bind("shortlink_scanned",
self.shortlink_scanned) |
def construct_mapping(self, node, deep=False):
'''
Build the mapping for YAML
'''
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark)
self.flatten_mapping(node)
context = 'while constructing a mapping'
mapping = self.dictclass()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError:
raise ConstructorError(
context,
node.start_mark,
"found unacceptable key {0}".format(key_node.value),
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError(
context,
node.start_mark,
"found conflicting ID '{0}'".format(key),
key_node.start_mark)
mapping[key] = value
return mapping | Build the mapping for YAML | Below is the the instruction that describes the task:
### Input:
Build the mapping for YAML
### Response:
def construct_mapping(self, node, deep=False):
'''
Build the mapping for YAML
'''
if not isinstance(node, MappingNode):
raise ConstructorError(
None,
None,
'expected a mapping node, but found {0}'.format(node.id),
node.start_mark)
self.flatten_mapping(node)
context = 'while constructing a mapping'
mapping = self.dictclass()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError:
raise ConstructorError(
context,
node.start_mark,
"found unacceptable key {0}".format(key_node.value),
key_node.start_mark)
value = self.construct_object(value_node, deep=deep)
if key in mapping:
raise ConstructorError(
context,
node.start_mark,
"found conflicting ID '{0}'".format(key),
key_node.start_mark)
mapping[key] = value
return mapping |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.