code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def exportUsufy(data, ext, fileH):
"""
Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter.
"""
if ext == "csv":
usufyToCsvExport(data, fileH+"."+ext)
elif ext == "gml":
usufyToGmlExport(data, fileH+"."+ext)
elif ext == "json":
usufyToJsonExport(data, fileH+"."+ext)
elif ext == "ods":
usufyToOdsExport(data, fileH+"."+ext)
elif ext == "png":
usufyToPngExport(data, fileH+"."+ext)
elif ext == "txt":
usufyToTextExport(data, fileH+"."+ext)
elif ext == "xls":
usufyToXlsExport(data, fileH+"."+ext)
elif ext == "xlsx":
usufyToXlsxExport(data, fileH+"."+ext) | Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter. | Below is the the instruction that describes the task:
### Input:
Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter.
### Response:
def exportUsufy(data, ext, fileH):
"""
Method that exports the different structures onto different formats.
Args:
-----
data: Data to export.
ext: One of the following: csv, excel, json, ods.
fileH: Fileheader for the output files.
Returns:
--------
Performs the export as requested by parameter.
"""
if ext == "csv":
usufyToCsvExport(data, fileH+"."+ext)
elif ext == "gml":
usufyToGmlExport(data, fileH+"."+ext)
elif ext == "json":
usufyToJsonExport(data, fileH+"."+ext)
elif ext == "ods":
usufyToOdsExport(data, fileH+"."+ext)
elif ext == "png":
usufyToPngExport(data, fileH+"."+ext)
elif ext == "txt":
usufyToTextExport(data, fileH+"."+ext)
elif ext == "xls":
usufyToXlsExport(data, fileH+"."+ext)
elif ext == "xlsx":
usufyToXlsxExport(data, fileH+"."+ext) |
def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.
equivalence_fiber = {}
matched_chains = set()
for chain_id, equivalent_chains in self.identical_sequences.iteritems():
matched_chains.add(chain_id)
equivalent_chain_ids = set()
for equivalent_chain in equivalent_chains:
assert(len(equivalent_chain) == 6)
assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output
equivalent_chain_ids.add(equivalent_chain[5])
found = False
for equivalent_chain_id in equivalent_chain_ids:
if equivalence_fiber.get(equivalent_chain_id):
found = True
assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id])))
break
if not found:
equivalence_fiber[chain_id] = set(equivalent_chain_ids)
equivalence_fiber[chain_id].add(chain_id)
for c in self.chains:
if c not in matched_chains:
equivalence_fiber[c] = set([c])
self.equivalence_fiber = equivalence_fiber
self.representative_chains = equivalence_fiber.keys() | Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping. | Below is the the instruction that describes the task:
### Input:
Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.
### Response:
def _determine_representative_chains(self):
''' Quotient the chains to get equivalence classes of chains. These will be used for the actual mapping.'''
# todo: This logic should be moved into the FASTA class or a more general module (maybe a fast exists which uses a C/C++ library?) but at present it is easier to write here since we do not need to worry about other PDB IDs.
equivalence_fiber = {}
matched_chains = set()
for chain_id, equivalent_chains in self.identical_sequences.iteritems():
matched_chains.add(chain_id)
equivalent_chain_ids = set()
for equivalent_chain in equivalent_chains:
assert(len(equivalent_chain) == 6)
assert((equivalent_chain[:5] == '%s_' % self.pdb_id) or (equivalent_chain[:5] == '%s:' % self.pdb_id)) # ClustalW changes e.g. 1KI1:A to 1KI1_A in its output
equivalent_chain_ids.add(equivalent_chain[5])
found = False
for equivalent_chain_id in equivalent_chain_ids:
if equivalence_fiber.get(equivalent_chain_id):
found = True
assert(equivalence_fiber[equivalent_chain_id] == equivalent_chain_ids.union(set([chain_id])))
break
if not found:
equivalence_fiber[chain_id] = set(equivalent_chain_ids)
equivalence_fiber[chain_id].add(chain_id)
for c in self.chains:
if c not in matched_chains:
equivalence_fiber[c] = set([c])
self.equivalence_fiber = equivalence_fiber
self.representative_chains = equivalence_fiber.keys() |
def native_description(self):
""" todo document
"""
if self._session is None:
return None
res = self._session.res_info
if res:
return res.native_descr
else:
return None | todo document | Below is the the instruction that describes the task:
### Input:
todo document
### Response:
def native_description(self):
""" todo document
"""
if self._session is None:
return None
res = self._session.res_info
if res:
return res.native_descr
else:
return None |
def _readString(self, length_fmt="H"):
"""
Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream
"""
(length,) = self._readStruct(">{0}".format(length_fmt))
ba = self.object_stream.read(length)
return to_unicode(ba) | Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream | Below is the the instruction that describes the task:
### Input:
Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream
### Response:
def _readString(self, length_fmt="H"):
"""
Reads a serialized string
:param length_fmt: Structure format of the string length (H or Q)
:return: The deserialized string
:raise RuntimeError: Unexpected end of stream
"""
(length,) = self._readStruct(">{0}".format(length_fmt))
ba = self.object_stream.read(length)
return to_unicode(ba) |
def current_frame(self):
"""
Compute the number of the current frame (0-indexed)
"""
if not self._pause_level:
return (
int((self._clock() + self._offset) * self.frames_per_second)
% len(self._frames)
)
else:
return self._paused_frame | Compute the number of the current frame (0-indexed) | Below is the the instruction that describes the task:
### Input:
Compute the number of the current frame (0-indexed)
### Response:
def current_frame(self):
"""
Compute the number of the current frame (0-indexed)
"""
if not self._pause_level:
return (
int((self._clock() + self._offset) * self.frames_per_second)
% len(self._frames)
)
else:
return self._paused_frame |
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
if type(id_token) == bytes:
segments = id_token.split(b'.')
else:
segments = id_token.split(u'.')
if len(segments) != 3:
raise VerifyJwtTokenError(
'Wrong number of segments in token: {0}'.format(id_token))
return json.loads(
_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1]))) | Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload. | Below is the the instruction that describes the task:
### Input:
Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
### Response:
def _extract_id_token(id_token):
"""Extract the JSON payload from a JWT.
Does the extraction w/o checking the signature.
Args:
id_token: string or bytestring, OAuth 2.0 id_token.
Returns:
object, The deserialized JSON payload.
"""
if type(id_token) == bytes:
segments = id_token.split(b'.')
else:
segments = id_token.split(u'.')
if len(segments) != 3:
raise VerifyJwtTokenError(
'Wrong number of segments in token: {0}'.format(id_token))
return json.loads(
_helpers._from_bytes(_helpers._urlsafe_b64decode(segments[1]))) |
def run(self, data, runtime_dir, argv):
"""Select a concrete connector and run the process through it.
:param data: The :class:`~resolwe.flow.models.Data` object that
is to be run.
:param runtime_dir: The directory the executor is run from.
:param argv: The argument vector used to spawn the executor.
"""
process_scheduling = self.scheduling_class_map[data.process.scheduling_class]
if 'DISPATCHER_MAPPING' in getattr(settings, 'FLOW_MANAGER', {}):
class_name = settings.FLOW_MANAGER['DISPATCHER_MAPPING'][process_scheduling]
else:
class_name = getattr(settings, 'FLOW_MANAGER', {}).get('NAME', DEFAULT_CONNECTOR)
data.scheduled = now()
data.save(update_fields=['scheduled'])
async_to_sync(self.sync_counter.inc)('executor')
return self.connectors[class_name].submit(data, runtime_dir, argv) | Select a concrete connector and run the process through it.
:param data: The :class:`~resolwe.flow.models.Data` object that
is to be run.
:param runtime_dir: The directory the executor is run from.
:param argv: The argument vector used to spawn the executor. | Below is the the instruction that describes the task:
### Input:
Select a concrete connector and run the process through it.
:param data: The :class:`~resolwe.flow.models.Data` object that
is to be run.
:param runtime_dir: The directory the executor is run from.
:param argv: The argument vector used to spawn the executor.
### Response:
def run(self, data, runtime_dir, argv):
"""Select a concrete connector and run the process through it.
:param data: The :class:`~resolwe.flow.models.Data` object that
is to be run.
:param runtime_dir: The directory the executor is run from.
:param argv: The argument vector used to spawn the executor.
"""
process_scheduling = self.scheduling_class_map[data.process.scheduling_class]
if 'DISPATCHER_MAPPING' in getattr(settings, 'FLOW_MANAGER', {}):
class_name = settings.FLOW_MANAGER['DISPATCHER_MAPPING'][process_scheduling]
else:
class_name = getattr(settings, 'FLOW_MANAGER', {}).get('NAME', DEFAULT_CONNECTOR)
data.scheduled = now()
data.save(update_fields=['scheduled'])
async_to_sync(self.sync_counter.inc)('executor')
return self.connectors[class_name].submit(data, runtime_dir, argv) |
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss | Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss | Below is the the instruction that describes the task:
### Input:
Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
### Response:
def get_gan_loss(self, true_frames, gen_frames, name):
"""Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
"""
# D - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
# G - STEP
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss |
def _set_sitematrix(self):
"""
capture API sitematrix data in data attribute
"""
data = self._load_response('sitematrix')
self.params.update({'title': self.COMMONS})
matrix = data.get('sitematrix')
if matrix:
self.data['sites'] = self._sitelist(matrix)
self.data['random'] = random.choice(self.data['sites']) | capture API sitematrix data in data attribute | Below is the the instruction that describes the task:
### Input:
capture API sitematrix data in data attribute
### Response:
def _set_sitematrix(self):
"""
capture API sitematrix data in data attribute
"""
data = self._load_response('sitematrix')
self.params.update({'title': self.COMMONS})
matrix = data.get('sitematrix')
if matrix:
self.data['sites'] = self._sitelist(matrix)
self.data['random'] = random.choice(self.data['sites']) |
def float2json(value):
"""
CONVERT NUMBER TO JSON STRING, WITH BETTER CONTROL OVER ACCURACY
:param value: float, int, long, Decimal
:return: unicode
"""
if value == 0:
return u'0'
try:
sign = "-" if value < 0 else ""
value = abs(value)
sci = value.__format__(".15e")
mantissa, str_exp = sci.split("e")
digits, more_digits = _snap_to_base_10(mantissa)
int_exp = int(str_exp) + more_digits
if int_exp > 15:
return sign + digits[0] + '.' + (digits[1:].rstrip('0') or '0') + u"e" + text_type(int_exp)
elif int_exp >= 0:
return sign + (digits[:1 + int_exp] + '.' + digits[1 + int_exp:].rstrip('0')).rstrip('.')
elif -4 < int_exp:
digits = ("0" * (-int_exp)) + digits
return sign + (digits[:1] + '.' + digits[1:].rstrip('0')).rstrip('.')
else:
return sign + digits[0] + '.' + (digits[1:].rstrip('0') or '0') + u"e" + text_type(int_exp)
except Exception as e:
from mo_logs import Log
Log.error("not expected", e) | CONVERT NUMBER TO JSON STRING, WITH BETTER CONTROL OVER ACCURACY
:param value: float, int, long, Decimal
:return: unicode | Below is the the instruction that describes the task:
### Input:
CONVERT NUMBER TO JSON STRING, WITH BETTER CONTROL OVER ACCURACY
:param value: float, int, long, Decimal
:return: unicode
### Response:
def float2json(value):
"""
CONVERT NUMBER TO JSON STRING, WITH BETTER CONTROL OVER ACCURACY
:param value: float, int, long, Decimal
:return: unicode
"""
if value == 0:
return u'0'
try:
sign = "-" if value < 0 else ""
value = abs(value)
sci = value.__format__(".15e")
mantissa, str_exp = sci.split("e")
digits, more_digits = _snap_to_base_10(mantissa)
int_exp = int(str_exp) + more_digits
if int_exp > 15:
return sign + digits[0] + '.' + (digits[1:].rstrip('0') or '0') + u"e" + text_type(int_exp)
elif int_exp >= 0:
return sign + (digits[:1 + int_exp] + '.' + digits[1 + int_exp:].rstrip('0')).rstrip('.')
elif -4 < int_exp:
digits = ("0" * (-int_exp)) + digits
return sign + (digits[:1] + '.' + digits[1:].rstrip('0')).rstrip('.')
else:
return sign + digits[0] + '.' + (digits[1:].rstrip('0') or '0') + u"e" + text_type(int_exp)
except Exception as e:
from mo_logs import Log
Log.error("not expected", e) |
def get_line_value(self, context_type):
"""
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
"""
if context_type.upper() == "ENV":
return self.line_envs
elif context_type.upper() == "LABEL":
return self.line_labels | Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line | Below is the the instruction that describes the task:
### Input:
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
### Response:
def get_line_value(self, context_type):
"""
Get the values defined on this line.
:param context_type: "ENV" or "LABEL"
:return: values of given type defined on this line
"""
if context_type.upper() == "ENV":
return self.line_envs
elif context_type.upper() == "LABEL":
return self.line_labels |
def set_field(self, state, field_name, field_type, value):
"""
Sets an instance field.
"""
field_ref = SimSootValue_InstanceFieldRef.get_ref(state=state,
obj_alloc_id=self.heap_alloc_id,
field_class_name=self.type,
field_name=field_name,
field_type=field_type)
# store value in java memory
state.memory.store(field_ref, value) | Sets an instance field. | Below is the the instruction that describes the task:
### Input:
Sets an instance field.
### Response:
def set_field(self, state, field_name, field_type, value):
"""
Sets an instance field.
"""
field_ref = SimSootValue_InstanceFieldRef.get_ref(state=state,
obj_alloc_id=self.heap_alloc_id,
field_class_name=self.type,
field_name=field_name,
field_type=field_type)
# store value in java memory
state.memory.store(field_ref, value) |
def rel_posterior_mass(logx, logl):
"""Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
"""
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel | Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value. | Below is the the instruction that describes the task:
### Input:
Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
### Response:
def rel_posterior_mass(logx, logl):
"""Calculate the relative posterior mass for some array of logx values
given the likelihood, prior and number of dimensions.
The posterior mass at each logX value is proportional to L(X)X, where L(X)
is the likelihood.
The weight is returned normalized so that the integral of the weight with
respect to logX is 1.
Parameters
----------
logx: 1d numpy array
Logx values at which to calculate posterior mass.
logl: 1d numpy array
Logl values corresponding to each logx (same shape as logx).
Returns
-------
w_rel: 1d numpy array
Relative posterior mass at each input logx value.
"""
logw = logx + logl
w_rel = np.exp(logw - logw.max())
w_rel /= np.abs(np.trapz(w_rel, x=logx))
return w_rel |
def get_roc_values(motif, fg_file, bg_file):
"""Calculate ROC AUC values for ROC plots."""
#print(calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1))
#["roc_values"])
try:
# fg_result = motif.pwm_scan_score(Fasta(fg_file), cutoff=0.0, nreport=1)
# fg_vals = [sorted(x)[-1] for x in fg_result.values()]
#
# bg_result = motif.pwm_scan_score(Fasta(bg_file), cutoff=0.0, nreport=1)
# bg_vals = [sorted(x)[-1] for x in bg_result.values()]
# (x, y) = roc_values(fg_vals, bg_vals)
stats = calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1)
(x,y) = list(stats.values())[0]["roc_values"]
return None,x,y
except Exception as e:
print(motif)
print(motif.id)
raise
error = e
return error,[],[] | Calculate ROC AUC values for ROC plots. | Below is the the instruction that describes the task:
### Input:
Calculate ROC AUC values for ROC plots.
### Response:
def get_roc_values(motif, fg_file, bg_file):
"""Calculate ROC AUC values for ROC plots."""
#print(calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1))
#["roc_values"])
try:
# fg_result = motif.pwm_scan_score(Fasta(fg_file), cutoff=0.0, nreport=1)
# fg_vals = [sorted(x)[-1] for x in fg_result.values()]
#
# bg_result = motif.pwm_scan_score(Fasta(bg_file), cutoff=0.0, nreport=1)
# bg_vals = [sorted(x)[-1] for x in bg_result.values()]
# (x, y) = roc_values(fg_vals, bg_vals)
stats = calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1)
(x,y) = list(stats.values())[0]["roc_values"]
return None,x,y
except Exception as e:
print(motif)
print(motif.id)
raise
error = e
return error,[],[] |
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys | Return a set of keys filtered according to the given arguments. | Below is the the instruction that describes the task:
### Input:
Return a set of keys filtered according to the given arguments.
### Response:
def filter_keys(self, **kwargs):
"Return a set of keys filtered according to the given arguments."
self._used_index = False
keys = set(self.data.keys())
for key_filter, v_filter in kwargs.items():
if key_filter in self.indexes:
self._used_index = True
if v_filter not in self.indexes[key_filter]:
keys = set([])
else:
keys = keys.intersection(
self.indexes[key_filter][v_filter])
else:
keys = keys.intersection(
self.simple_filter(key_filter, v_filter))
return keys |
def expand_folder(files):
"""Return a clone of file list files where all directories are recursively replaced with their contents."""
expfiles = []
for file in files:
if os.path.isdir(file):
for dirpath, dirnames, filenames in os.walk(file):
for filename in filenames:
expfiles.append(os.path.join(dirpath, filename))
else:
expfiles.append(file)
for path in expfiles:
if not os.path.exists(path):
sys.stderr.write('%s: No such file or directory\n' % path)
return expfiles | Return a clone of file list files where all directories are recursively replaced with their contents. | Below is the the instruction that describes the task:
### Input:
Return a clone of file list files where all directories are recursively replaced with their contents.
### Response:
def expand_folder(files):
"""Return a clone of file list files where all directories are recursively replaced with their contents."""
expfiles = []
for file in files:
if os.path.isdir(file):
for dirpath, dirnames, filenames in os.walk(file):
for filename in filenames:
expfiles.append(os.path.join(dirpath, filename))
else:
expfiles.append(file)
for path in expfiles:
if not os.path.exists(path):
sys.stderr.write('%s: No such file or directory\n' % path)
return expfiles |
def predict(self, recording, result_format=None):
"""Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list
"""
evaluate = utils.evaluate_model_single_recording_preloaded
results = evaluate(self.preprocessing_queue,
self.feature_list,
self.model,
self.output_semantics,
recording)
if result_format == 'LaTeX':
for i in range(len(results)):
results[i]['semantics'] = results[i]['semantics'].split(";")[1]
for i in range(len(results)):
splitted = results[i]['semantics'].split(";")
results[i]['complete_latex'] = splitted[1]
return results | Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list | Below is the the instruction that describes the task:
### Input:
Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list
### Response:
def predict(self, recording, result_format=None):
"""Predict the class of the given recording.
Parameters
----------
recording : string
Recording of a single handwritten dataset in JSON format.
result_format : string, optional
If it is 'LaTeX', then only the latex code will be returned
Returns
-------
list
"""
evaluate = utils.evaluate_model_single_recording_preloaded
results = evaluate(self.preprocessing_queue,
self.feature_list,
self.model,
self.output_semantics,
recording)
if result_format == 'LaTeX':
for i in range(len(results)):
results[i]['semantics'] = results[i]['semantics'].split(";")[1]
for i in range(len(results)):
splitted = results[i]['semantics'].split(";")
results[i]['complete_latex'] = splitted[1]
return results |
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value | Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument. | Below is the the instruction that describes the task:
### Input:
Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
### Response:
def environ(on=os, **kw):
"""Update one or more environment variables.
Preserves the previous environment variable (if available) and can be
applied to remote connections that offer an @environ@ attribute using the
@on@ argument.
"""
originals = list()
for key in kw:
originals.append((key, on.environ.get(key, None)))
on.environ[key] = kw[key]
yield
for key, value in originals:
if not value:
del on.environ[key]
continue
on.environ[key] = value |
def ekcii(table, cindex, lenout=_default_len_out):
"""
Return attribute information about a column belonging to a loaded
EK table, specifying the column by table and index.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekcii_c.html
:param table: Name of table containing column.
:type table: str
:param cindex: Index of column whose attributes are to be found.
:type cindex: int
:param lenout: Maximum allowed length of column name.
:return: Name of column, Column attribute descriptor.
:rtype: tuple
"""
table = stypes.stringToCharP(table)
cindex = ctypes.c_int(cindex)
lenout = ctypes.c_int(lenout)
column = stypes.stringToCharP(lenout)
attdsc = stypes.SpiceEKAttDsc()
libspice.ekcii_c(table, cindex, lenout, column, ctypes.byref(attdsc))
return stypes.toPythonString(column), attdsc | Return attribute information about a column belonging to a loaded
EK table, specifying the column by table and index.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekcii_c.html
:param table: Name of table containing column.
:type table: str
:param cindex: Index of column whose attributes are to be found.
:type cindex: int
:param lenout: Maximum allowed length of column name.
:return: Name of column, Column attribute descriptor.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Return attribute information about a column belonging to a loaded
EK table, specifying the column by table and index.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekcii_c.html
:param table: Name of table containing column.
:type table: str
:param cindex: Index of column whose attributes are to be found.
:type cindex: int
:param lenout: Maximum allowed length of column name.
:return: Name of column, Column attribute descriptor.
:rtype: tuple
### Response:
def ekcii(table, cindex, lenout=_default_len_out):
"""
Return attribute information about a column belonging to a loaded
EK table, specifying the column by table and index.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekcii_c.html
:param table: Name of table containing column.
:type table: str
:param cindex: Index of column whose attributes are to be found.
:type cindex: int
:param lenout: Maximum allowed length of column name.
:return: Name of column, Column attribute descriptor.
:rtype: tuple
"""
table = stypes.stringToCharP(table)
cindex = ctypes.c_int(cindex)
lenout = ctypes.c_int(lenout)
column = stypes.stringToCharP(lenout)
attdsc = stypes.SpiceEKAttDsc()
libspice.ekcii_c(table, cindex, lenout, column, ctypes.byref(attdsc))
return stypes.toPythonString(column), attdsc |
def set_lang(prefix):
'''
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
'''
global API_URL
API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php'
for cached_func in (search, suggest, summary):
cached_func.clear_cache() | Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set. | Below is the the instruction that describes the task:
### Input:
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
### Response:
def set_lang(prefix):
'''
Change the language of the API being requested.
Set `prefix` to one of the two letter prefixes found on the `list of all Wikipedias <http://meta.wikimedia.org/wiki/List_of_Wikipedias>`_.
After setting the language, the cache for ``search``, ``suggest``, and ``summary`` will be cleared.
.. note:: Make sure you search for page titles in the language that you have set.
'''
global API_URL
API_URL = 'http://' + prefix.lower() + '.wikipedia.org/w/api.php'
for cached_func in (search, suggest, summary):
cached_func.clear_cache() |
def split_address(address):
"""
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
"""
invalid = None, None
if not address and address != 0:
return invalid
components = str(address).split(':')
if len(components) > 2:
return invalid
if components[0] and not valid_hostname(components[0]):
return invalid
if len(components) == 2 and not valid_port(components[1]):
return invalid
if len(components) == 1:
components.insert(0 if valid_port(components[0]) else 1, None)
host, port = components
port = int(port) if port else None
return host, port | Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid. | Below is the the instruction that describes the task:
### Input:
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
### Response:
def split_address(address):
"""
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
"""
invalid = None, None
if not address and address != 0:
return invalid
components = str(address).split(':')
if len(components) > 2:
return invalid
if components[0] and not valid_hostname(components[0]):
return invalid
if len(components) == 2 and not valid_port(components[1]):
return invalid
if len(components) == 1:
components.insert(0 if valid_port(components[0]) else 1, None)
host, port = components
port = int(port) if port else None
return host, port |
def db_from_hass_config(path=None, **kwargs):
"""Initialize a database from HASS config."""
if path is None:
path = config.find_hass_config()
url = config.db_url_from_hass_config(path)
return HassDatabase(url, **kwargs) | Initialize a database from HASS config. | Below is the the instruction that describes the task:
### Input:
Initialize a database from HASS config.
### Response:
def db_from_hass_config(path=None, **kwargs):
"""Initialize a database from HASS config."""
if path is None:
path = config.find_hass_config()
url = config.db_url_from_hass_config(path)
return HassDatabase(url, **kwargs) |
def render(self):
"""Render reply from Python object to XML string"""
tpl = '<xml>\n{data}\n</xml>'
nodes = []
msg_type = '<MsgType><![CDATA[{msg_type}]]></MsgType>'.format(
msg_type=self.type
)
nodes.append(msg_type)
for name, field in self._fields.items():
value = getattr(self, name, field.default)
node_xml = field.to_xml(value)
nodes.append(node_xml)
data = '\n'.join(nodes)
return tpl.format(data=data) | Render reply from Python object to XML string | Below is the the instruction that describes the task:
### Input:
Render reply from Python object to XML string
### Response:
def render(self):
"""Render reply from Python object to XML string"""
tpl = '<xml>\n{data}\n</xml>'
nodes = []
msg_type = '<MsgType><![CDATA[{msg_type}]]></MsgType>'.format(
msg_type=self.type
)
nodes.append(msg_type)
for name, field in self._fields.items():
value = getattr(self, name, field.default)
node_xml = field.to_xml(value)
nodes.append(node_xml)
data = '\n'.join(nodes)
return tpl.format(data=data) |
def render_config(data,ctx):
"""Render the given config data using Django's template system.
This function takes a config data string and a dict of context variables,
renders the data through Django's template system, and returns the result.
"""
djsupervisor_tags.current_context = ctx
data = "{% load djsupervisor_tags %}" + data
t = template.Template(data)
c = template.Context(ctx)
return t.render(c).encode("ascii") | Render the given config data using Django's template system.
This function takes a config data string and a dict of context variables,
renders the data through Django's template system, and returns the result. | Below is the the instruction that describes the task:
### Input:
Render the given config data using Django's template system.
This function takes a config data string and a dict of context variables,
renders the data through Django's template system, and returns the result.
### Response:
def render_config(data,ctx):
"""Render the given config data using Django's template system.
This function takes a config data string and a dict of context variables,
renders the data through Django's template system, and returns the result.
"""
djsupervisor_tags.current_context = ctx
data = "{% load djsupervisor_tags %}" + data
t = template.Template(data)
c = template.Context(ctx)
return t.render(c).encode("ascii") |
def index_delete(self, index):
'''
Delets the specified index
> search = ElasticSearch()
> search.index_delete('twitter')
{"ok" : True, "acknowledged" : True }
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, index)
response = request.delete(url)
return response | Delets the specified index
> search = ElasticSearch()
> search.index_delete('twitter')
{"ok" : True, "acknowledged" : True } | Below is the the instruction that describes the task:
### Input:
Delets the specified index
> search = ElasticSearch()
> search.index_delete('twitter')
{"ok" : True, "acknowledged" : True }
### Response:
def index_delete(self, index):
'''
Delets the specified index
> search = ElasticSearch()
> search.index_delete('twitter')
{"ok" : True, "acknowledged" : True }
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, index)
response = request.delete(url)
return response |
def delete_core_element_of_model(model, raise_exceptions=False, recursive=True, destroy=True, force=False):
"""Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else
"""
if isinstance(model, AbstractStateModel) and model.state.is_root_state:
logger.warning("Deletion is not allowed. {0} is root state of state machine.".format(model.core_element))
return False
state_m = model.parent
if state_m is None:
msg = "Model has no parent from which it could be deleted from"
if raise_exceptions:
raise ValueError(msg)
logger.error(msg)
return False
if is_selection_inside_of_library_state(selected_elements=[model]):
logger.warning("Deletion is not allowed. Element {0} is inside of a library.".format(model.core_element))
return False
assert isinstance(state_m, StateModel)
state = state_m.state
core_element = model.core_element
try:
if core_element in state:
state.remove(core_element, recursive=recursive, destroy=destroy, force=force)
return True
return False
except (AttributeError, ValueError) as e:
if raise_exceptions:
raise
logger.error("The model '{}' for core element '{}' could not be deleted: {}".format(model, core_element, e))
return False | Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else | Below is the the instruction that describes the task:
### Input:
Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else
### Response:
def delete_core_element_of_model(model, raise_exceptions=False, recursive=True, destroy=True, force=False):
"""Deletes respective core element of handed model of its state machine
If the model is one of state, data flow or transition, it is tried to delete that model together with its
data from the corresponding state machine.
:param model: The model of respective core element to delete
:param bool raise_exceptions: Whether to raise exceptions or only log errors in case of failures
:param bool destroy: Access the destroy flag of the core remove methods
:return: True if successful, False else
"""
if isinstance(model, AbstractStateModel) and model.state.is_root_state:
logger.warning("Deletion is not allowed. {0} is root state of state machine.".format(model.core_element))
return False
state_m = model.parent
if state_m is None:
msg = "Model has no parent from which it could be deleted from"
if raise_exceptions:
raise ValueError(msg)
logger.error(msg)
return False
if is_selection_inside_of_library_state(selected_elements=[model]):
logger.warning("Deletion is not allowed. Element {0} is inside of a library.".format(model.core_element))
return False
assert isinstance(state_m, StateModel)
state = state_m.state
core_element = model.core_element
try:
if core_element in state:
state.remove(core_element, recursive=recursive, destroy=destroy, force=force)
return True
return False
except (AttributeError, ValueError) as e:
if raise_exceptions:
raise
logger.error("The model '{}' for core element '{}' could not be deleted: {}".format(model, core_element, e))
return False |
def plot_phase_plane(self, indices=None, **kwargs):
""" Plots a phase portrait from last integration.
This method will be deprecated. Please use :meth:`Result.plot_phase_plane`.
See :func:`pyodesys.plotting.plot_phase_plane`
"""
return self._plot(plot_phase_plane, indices=indices, **kwargs) | Plots a phase portrait from last integration.
This method will be deprecated. Please use :meth:`Result.plot_phase_plane`.
See :func:`pyodesys.plotting.plot_phase_plane` | Below is the the instruction that describes the task:
### Input:
Plots a phase portrait from last integration.
This method will be deprecated. Please use :meth:`Result.plot_phase_plane`.
See :func:`pyodesys.plotting.plot_phase_plane`
### Response:
def plot_phase_plane(self, indices=None, **kwargs):
""" Plots a phase portrait from last integration.
This method will be deprecated. Please use :meth:`Result.plot_phase_plane`.
See :func:`pyodesys.plotting.plot_phase_plane`
"""
return self._plot(plot_phase_plane, indices=indices, **kwargs) |
def ascii2h5(dat_fname, h5_fname):
"""
Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.
"""
table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')
filter_kwargs = dict(
chunks=True,
compression='gzip',
compression_opts=3)
# Filter out pixels with all zeros
idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)
with h5py.File(h5_fname, 'w') as f:
d = np.arange(0., 4.351, 0.15).astype('f4')
dset = f.create_dataset('dists', data=d, **filter_kwargs)
dset.attrs['description'] = 'Distances at which extinction is measured'
dset.attrs['units'] = 'kpc'
dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)
dset.attrs['description'] = 'Galactic (l, b) of each pixel'
dset.attrs['units'] = 'deg'
dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)
dset.attrs['description'] = 'Extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)
dset.attrs['description'] = 'Gaussian uncertainty in extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag' | Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to. | Below is the the instruction that describes the task:
### Input:
Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.
### Response:
def ascii2h5(dat_fname, h5_fname):
"""
Converts from the original ASCII format of the Chen+ (2014) 3D dust map to
the HDF5 format.
Args:
dat_fname (:obj:`str`): Filename of the original ASCII .dat file.
h5_fname (:obj:`str`): Output filename to write the resulting HDF5 file to.
"""
table = np.loadtxt(dat_fname, skiprows=1, dtype='f4')
filter_kwargs = dict(
chunks=True,
compression='gzip',
compression_opts=3)
# Filter out pixels with all zeros
idx = ~np.all(table[:,2:32] < 1.e-5, axis=1)
with h5py.File(h5_fname, 'w') as f:
d = np.arange(0., 4.351, 0.15).astype('f4')
dset = f.create_dataset('dists', data=d, **filter_kwargs)
dset.attrs['description'] = 'Distances at which extinction is measured'
dset.attrs['units'] = 'kpc'
dset = f.create_dataset('pix_lb', data=table[idx,0:2], **filter_kwargs)
dset.attrs['description'] = 'Galactic (l, b) of each pixel'
dset.attrs['units'] = 'deg'
dset = f.create_dataset('A_r', data=table[idx,2:32], **filter_kwargs)
dset.attrs['description'] = 'Extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag'
dset = f.create_dataset('A_r_err', data=table[idx,32:], **filter_kwargs)
dset.attrs['description'] = 'Gaussian uncertainty in extinction'
dset.attrs['shape'] = '(pixel, distance)'
dset.attrs['band'] = 'r'
dset.attrs['units'] = 'mag' |
def set_log_level(self, level, keep=True):
"""
Set the log level. If keep is True, then it will not change along with
global log changes.
"""
self._set_log_level(level)
self._log_level_set_explicitly = keep | Set the log level. If keep is True, then it will not change along with
global log changes. | Below is the the instruction that describes the task:
### Input:
Set the log level. If keep is True, then it will not change along with
global log changes.
### Response:
def set_log_level(self, level, keep=True):
"""
Set the log level. If keep is True, then it will not change along with
global log changes.
"""
self._set_log_level(level)
self._log_level_set_explicitly = keep |
def create_virtualenv(venv=VENV, install_pip=False):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
install = ['virtualenv', '-q', venv]
run_command(install)
print 'done.'
print 'Installing pip in virtualenv...',
if install_pip and \
not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']):
die("Failed to install pip.")
print 'done.' | Creates the virtual environment and installs PIP only into the
virtual environment | Below is the the instruction that describes the task:
### Input:
Creates the virtual environment and installs PIP only into the
virtual environment
### Response:
def create_virtualenv(venv=VENV, install_pip=False):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
install = ['virtualenv', '-q', venv]
run_command(install)
print 'done.'
print 'Installing pip in virtualenv...',
if install_pip and \
not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']):
die("Failed to install pip.")
print 'done.' |
def content_create(self, key, model, contentid, meta, protected=False):
"""Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only.
"""
params = {'id': contentid, 'meta': meta}
if protected is not False:
params['protected'] = 'true'
data = urlencode(params)
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path,
key, data, 'POST', self._manage_by_cik) | Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only. | Below is the the instruction that describes the task:
### Input:
Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only.
### Response:
def content_create(self, key, model, contentid, meta, protected=False):
"""Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only.
"""
params = {'id': contentid, 'meta': meta}
if protected is not False:
params['protected'] = 'true'
data = urlencode(params)
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path,
key, data, 'POST', self._manage_by_cik) |
def calculate_leaf_paths(self):
"""Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves.
"""
reverse_xref = {}
leaves = set()
for v in self.value.values():
if v.leaf:
leaves.add(v)
for xref in v.value_xref:
reverse_xref.setdefault(xref, []).append(v.ident)
for leaf in leaves:
self.calculate_leaf_path(leaf, reverse_xref) | Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves. | Below is the the instruction that describes the task:
### Input:
Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves.
### Response:
def calculate_leaf_paths(self):
"""Build map of reverse xrefs then traverse backwards marking path to leaf for all leaves.
"""
reverse_xref = {}
leaves = set()
for v in self.value.values():
if v.leaf:
leaves.add(v)
for xref in v.value_xref:
reverse_xref.setdefault(xref, []).append(v.ident)
for leaf in leaves:
self.calculate_leaf_path(leaf, reverse_xref) |
def _set_sflow(self, v, load=False):
"""
Setter method for sflow, mapped from YANG variable /overlay_gateway/sflow (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("sflow_profile_name",sflow.sflow, yang_name="sflow", rest_name="sflow", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='sflow-profile-name', extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}), is_container='list', yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sflow must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("sflow_profile_name",sflow.sflow, yang_name="sflow", rest_name="sflow", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='sflow-profile-name', extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}), is_container='list', yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__sflow = t
if hasattr(self, '_set'):
self._set() | Setter method for sflow, mapped from YANG variable /overlay_gateway/sflow (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for sflow, mapped from YANG variable /overlay_gateway/sflow (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly.
### Response:
def _set_sflow(self, v, load=False):
"""
Setter method for sflow, mapped from YANG variable /overlay_gateway/sflow (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_sflow is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sflow() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("sflow_profile_name",sflow.sflow, yang_name="sflow", rest_name="sflow", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='sflow-profile-name', extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}), is_container='list', yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sflow must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("sflow_profile_name",sflow.sflow, yang_name="sflow", rest_name="sflow", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='sflow-profile-name', extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}), is_container='list', yang_name="sflow", rest_name="sflow", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Sflow for the tunnels of this gateway', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'TunnelSflowCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__sflow = t
if hasattr(self, '_set'):
self._set() |
def coalesce(self):
"""Coalesce the segments for this flag.
This method does two things:
- `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and
`~DataQualityFlag.active` segment lists
- forces the `active` segments to be a proper subset of the `known`
segments
.. note::
this operations is performed in-place.
Returns
-------
self
a view of this flag, not a copy.
"""
self.known = self.known.coalesce()
self.active = self.active.coalesce()
self.active = (self.known & self.active).coalesce()
return self | Coalesce the segments for this flag.
This method does two things:
- `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and
`~DataQualityFlag.active` segment lists
- forces the `active` segments to be a proper subset of the `known`
segments
.. note::
this operations is performed in-place.
Returns
-------
self
a view of this flag, not a copy. | Below is the the instruction that describes the task:
### Input:
Coalesce the segments for this flag.
This method does two things:
- `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and
`~DataQualityFlag.active` segment lists
- forces the `active` segments to be a proper subset of the `known`
segments
.. note::
this operations is performed in-place.
Returns
-------
self
a view of this flag, not a copy.
### Response:
def coalesce(self):
"""Coalesce the segments for this flag.
This method does two things:
- `coalesces <SegmentList.coalesce>` the `~DataQualityFlag.known` and
`~DataQualityFlag.active` segment lists
- forces the `active` segments to be a proper subset of the `known`
segments
.. note::
this operations is performed in-place.
Returns
-------
self
a view of this flag, not a copy.
"""
self.known = self.known.coalesce()
self.active = self.active.coalesce()
self.active = (self.known & self.active).coalesce()
return self |
def parse_cookie(header, charset='utf-8', errors='ignore'):
"""Parse a cookie.
:param header: the header to be used to parse the cookie.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
"""
cookie = _ExtendedCookie()
if header:
cookie.load(header)
result = {}
# decode to unicode and skip broken items. Our extended morsel
# and extended cookie will catch CookieErrors and convert them to
# `None` items which we have to skip here.
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = unquote_header_value(value.value) \
.decode(charset, errors)
return result | Parse a cookie.
:param header: the header to be used to parse the cookie.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding. | Below is the the instruction that describes the task:
### Input:
Parse a cookie.
:param header: the header to be used to parse the cookie.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
### Response:
def parse_cookie(header, charset='utf-8', errors='ignore'):
"""Parse a cookie.
:param header: the header to be used to parse the cookie.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
"""
cookie = _ExtendedCookie()
if header:
cookie.load(header)
result = {}
# decode to unicode and skip broken items. Our extended morsel
# and extended cookie will catch CookieErrors and convert them to
# `None` items which we have to skip here.
for key, value in cookie.iteritems():
if value.value is not None:
result[key] = unquote_header_value(value.value) \
.decode(charset, errors)
return result |
def blotto_game(h, t, rho, mu=0, random_state=None):
"""
Return a NormalFormGame instance of a 2-player non-zero sum Colonel
Blotto game (Hortala-Vallve and Llorente-Saguer, 2012), where the
players have an equal number `t` of troops to assign to `h` hills
(so that the number of actions for each player is equal to
(t+h-1) choose (h-1) = (t+h-1)!/(t!*(h-1)!)). Each player has a
value for each hill that he receives if he assigns strictly more
troops to the hill than his opponent (ties are broken uniformly at
random), where the values are drawn from a multivariate normal
distribution with covariance `rho`. Each player’s payoff is the sum
of the values of the hills won by that player.
Parameters
----------
h : scalar(int)
Number of hills.
t : scalar(int)
Number of troops.
rho : scalar(float)
Covariance of the players' values of each hill. Must be in
[-1, 1].
mu : scalar(float), optional(default=0)
Mean of the players' values of each hill.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = blotto_game(2, 3, 0.5, random_state=1234)
>>> g.players[0]
Player([[-0.44861083, -1.08443468, -1.08443468, -1.08443468],
[ 0.18721302, -0.44861083, -1.08443468, -1.08443468],
[ 0.18721302, 0.18721302, -0.44861083, -1.08443468],
[ 0.18721302, 0.18721302, 0.18721302, -0.44861083]])
>>> g.players[1]
Player([[-1.20042463, -1.39708658, -1.39708658, -1.39708658],
[-1.00376268, -1.20042463, -1.39708658, -1.39708658],
[-1.00376268, -1.00376268, -1.20042463, -1.39708658],
[-1.00376268, -1.00376268, -1.00376268, -1.20042463]])
"""
actions = simplex_grid(h, t)
n = actions.shape[0]
payoff_arrays = tuple(np.empty((n, n)) for i in range(2))
mean = np.array([mu, mu])
cov = np.array([[1, rho], [rho, 1]])
random_state = check_random_state(random_state)
values = random_state.multivariate_normal(mean, cov, h)
_populate_blotto_payoff_arrays(payoff_arrays, actions, values)
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g | Return a NormalFormGame instance of a 2-player non-zero sum Colonel
Blotto game (Hortala-Vallve and Llorente-Saguer, 2012), where the
players have an equal number `t` of troops to assign to `h` hills
(so that the number of actions for each player is equal to
(t+h-1) choose (h-1) = (t+h-1)!/(t!*(h-1)!)). Each player has a
value for each hill that he receives if he assigns strictly more
troops to the hill than his opponent (ties are broken uniformly at
random), where the values are drawn from a multivariate normal
distribution with covariance `rho`. Each player’s payoff is the sum
of the values of the hills won by that player.
Parameters
----------
h : scalar(int)
Number of hills.
t : scalar(int)
Number of troops.
rho : scalar(float)
Covariance of the players' values of each hill. Must be in
[-1, 1].
mu : scalar(float), optional(default=0)
Mean of the players' values of each hill.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = blotto_game(2, 3, 0.5, random_state=1234)
>>> g.players[0]
Player([[-0.44861083, -1.08443468, -1.08443468, -1.08443468],
[ 0.18721302, -0.44861083, -1.08443468, -1.08443468],
[ 0.18721302, 0.18721302, -0.44861083, -1.08443468],
[ 0.18721302, 0.18721302, 0.18721302, -0.44861083]])
>>> g.players[1]
Player([[-1.20042463, -1.39708658, -1.39708658, -1.39708658],
[-1.00376268, -1.20042463, -1.39708658, -1.39708658],
[-1.00376268, -1.00376268, -1.20042463, -1.39708658],
[-1.00376268, -1.00376268, -1.00376268, -1.20042463]]) | Below is the the instruction that describes the task:
### Input:
Return a NormalFormGame instance of a 2-player non-zero sum Colonel
Blotto game (Hortala-Vallve and Llorente-Saguer, 2012), where the
players have an equal number `t` of troops to assign to `h` hills
(so that the number of actions for each player is equal to
(t+h-1) choose (h-1) = (t+h-1)!/(t!*(h-1)!)). Each player has a
value for each hill that he receives if he assigns strictly more
troops to the hill than his opponent (ties are broken uniformly at
random), where the values are drawn from a multivariate normal
distribution with covariance `rho`. Each player’s payoff is the sum
of the values of the hills won by that player.
Parameters
----------
h : scalar(int)
Number of hills.
t : scalar(int)
Number of troops.
rho : scalar(float)
Covariance of the players' values of each hill. Must be in
[-1, 1].
mu : scalar(float), optional(default=0)
Mean of the players' values of each hill.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = blotto_game(2, 3, 0.5, random_state=1234)
>>> g.players[0]
Player([[-0.44861083, -1.08443468, -1.08443468, -1.08443468],
[ 0.18721302, -0.44861083, -1.08443468, -1.08443468],
[ 0.18721302, 0.18721302, -0.44861083, -1.08443468],
[ 0.18721302, 0.18721302, 0.18721302, -0.44861083]])
>>> g.players[1]
Player([[-1.20042463, -1.39708658, -1.39708658, -1.39708658],
[-1.00376268, -1.20042463, -1.39708658, -1.39708658],
[-1.00376268, -1.00376268, -1.20042463, -1.39708658],
[-1.00376268, -1.00376268, -1.00376268, -1.20042463]])
### Response:
def blotto_game(h, t, rho, mu=0, random_state=None):
"""
Return a NormalFormGame instance of a 2-player non-zero sum Colonel
Blotto game (Hortala-Vallve and Llorente-Saguer, 2012), where the
players have an equal number `t` of troops to assign to `h` hills
(so that the number of actions for each player is equal to
(t+h-1) choose (h-1) = (t+h-1)!/(t!*(h-1)!)). Each player has a
value for each hill that he receives if he assigns strictly more
troops to the hill than his opponent (ties are broken uniformly at
random), where the values are drawn from a multivariate normal
distribution with covariance `rho`. Each player’s payoff is the sum
of the values of the hills won by that player.
Parameters
----------
h : scalar(int)
Number of hills.
t : scalar(int)
Number of troops.
rho : scalar(float)
Covariance of the players' values of each hill. Must be in
[-1, 1].
mu : scalar(float), optional(default=0)
Mean of the players' values of each hill.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
g : NormalFormGame
Examples
--------
>>> g = blotto_game(2, 3, 0.5, random_state=1234)
>>> g.players[0]
Player([[-0.44861083, -1.08443468, -1.08443468, -1.08443468],
[ 0.18721302, -0.44861083, -1.08443468, -1.08443468],
[ 0.18721302, 0.18721302, -0.44861083, -1.08443468],
[ 0.18721302, 0.18721302, 0.18721302, -0.44861083]])
>>> g.players[1]
Player([[-1.20042463, -1.39708658, -1.39708658, -1.39708658],
[-1.00376268, -1.20042463, -1.39708658, -1.39708658],
[-1.00376268, -1.00376268, -1.20042463, -1.39708658],
[-1.00376268, -1.00376268, -1.00376268, -1.20042463]])
"""
actions = simplex_grid(h, t)
n = actions.shape[0]
payoff_arrays = tuple(np.empty((n, n)) for i in range(2))
mean = np.array([mu, mu])
cov = np.array([[1, rho], [rho, 1]])
random_state = check_random_state(random_state)
values = random_state.multivariate_normal(mean, cov, h)
_populate_blotto_payoff_arrays(payoff_arrays, actions, values)
g = NormalFormGame(
[Player(payoff_array) for payoff_array in payoff_arrays]
)
return g |
def git_commentchar():
""" Shortcut for retrieving comment char from git config """
commentchar = _git("config", "--get", "core.commentchar", _ok_code=[1])
# git will return an exit code of 1 if it can't find a config value, in this case we fall-back to # as commentchar
if hasattr(commentchar, 'exit_code') and commentchar.exit_code == 1: # pylint: disable=no-member
commentchar = "#"
return ustr(commentchar).replace(u"\n", u"") | Shortcut for retrieving comment char from git config | Below is the the instruction that describes the task:
### Input:
Shortcut for retrieving comment char from git config
### Response:
def git_commentchar():
""" Shortcut for retrieving comment char from git config """
commentchar = _git("config", "--get", "core.commentchar", _ok_code=[1])
# git will return an exit code of 1 if it can't find a config value, in this case we fall-back to # as commentchar
if hasattr(commentchar, 'exit_code') and commentchar.exit_code == 1: # pylint: disable=no-member
commentchar = "#"
return ustr(commentchar).replace(u"\n", u"") |
def generate_function(info, method=False):
"""Creates a Python callable for a GIFunctionInfo instance"""
assert isinstance(info, GIFunctionInfo)
arg_infos = list(info.get_args())
arg_types = [a.get_type() for a in arg_infos]
return_type = info.get_return_type()
func = None
messages = []
for backend in list_backends():
instance = backend()
try:
func = _generate_function(instance, info, arg_infos, arg_types,
return_type, method)
except NotImplementedError:
messages.append("%s: %s" % (backend.NAME, traceback.format_exc()))
else:
break
if func:
return func
raise NotImplementedError("\n".join(messages)) | Creates a Python callable for a GIFunctionInfo instance | Below is the the instruction that describes the task:
### Input:
Creates a Python callable for a GIFunctionInfo instance
### Response:
def generate_function(info, method=False):
"""Creates a Python callable for a GIFunctionInfo instance"""
assert isinstance(info, GIFunctionInfo)
arg_infos = list(info.get_args())
arg_types = [a.get_type() for a in arg_infos]
return_type = info.get_return_type()
func = None
messages = []
for backend in list_backends():
instance = backend()
try:
func = _generate_function(instance, info, arg_infos, arg_types,
return_type, method)
except NotImplementedError:
messages.append("%s: %s" % (backend.NAME, traceback.format_exc()))
else:
break
if func:
return func
raise NotImplementedError("\n".join(messages)) |
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize):
"""Read CZ_LSMINFO tag from file and return as dict."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError('invalid CZ_LSMINFO structure')
fh.seek(-8, 1)
if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize:
# adjust structure according to structure_size
lsminfo = []
size = 0
for name, dtype in TIFF.CZ_LSMINFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
lsminfo.append((name, dtype))
else:
lsminfo = TIFF.CZ_LSMINFO
lsminfo = fh.read_record(lsminfo, byteorder=byteorder)
lsminfo = recarray2dict(lsminfo)
# read LSM info subrecords at offsets
for name, reader in TIFF.CZ_LSMINFO_READERS.items():
if reader is None:
continue
offset = lsminfo.get('Offset' + name, 0)
if offset < 8:
continue
fh.seek(offset)
try:
lsminfo[name] = reader(fh)
except ValueError:
pass
return lsminfo | Read CZ_LSMINFO tag from file and return as dict. | Below is the the instruction that describes the task:
### Input:
Read CZ_LSMINFO tag from file and return as dict.
### Response:
def read_cz_lsminfo(fh, byteorder, dtype, count, offsetsize):
"""Read CZ_LSMINFO tag from file and return as dict."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError('invalid CZ_LSMINFO structure')
fh.seek(-8, 1)
if structure_size < numpy.dtype(TIFF.CZ_LSMINFO).itemsize:
# adjust structure according to structure_size
lsminfo = []
size = 0
for name, dtype in TIFF.CZ_LSMINFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
lsminfo.append((name, dtype))
else:
lsminfo = TIFF.CZ_LSMINFO
lsminfo = fh.read_record(lsminfo, byteorder=byteorder)
lsminfo = recarray2dict(lsminfo)
# read LSM info subrecords at offsets
for name, reader in TIFF.CZ_LSMINFO_READERS.items():
if reader is None:
continue
offset = lsminfo.get('Offset' + name, 0)
if offset < 8:
continue
fh.seek(offset)
try:
lsminfo[name] = reader(fh)
except ValueError:
pass
return lsminfo |
def _init_map(self):
"""call these all manually because non-cooperative"""
DecimalAnswerFormRecord._init_map(self)
DecimalValuesFormRecord._init_map(self)
TextAnswerFormRecord._init_map(self)
TextsFormRecord._init_map(self)
super(edXNumericResponseAnswerFormRecord, self)._init_map() | call these all manually because non-cooperative | Below is the the instruction that describes the task:
### Input:
call these all manually because non-cooperative
### Response:
def _init_map(self):
"""call these all manually because non-cooperative"""
DecimalAnswerFormRecord._init_map(self)
DecimalValuesFormRecord._init_map(self)
TextAnswerFormRecord._init_map(self)
TextsFormRecord._init_map(self)
super(edXNumericResponseAnswerFormRecord, self)._init_map() |
def stop_watching(self, cluster):
"""
Causes the thread that launched the watch of the cluster path
to end by setting the proper stop event found in `self.stop_events`.
"""
znode_path = "/".join([self.base_path, cluster.name])
if znode_path in self.stop_events:
self.stop_events[znode_path].set() | Causes the thread that launched the watch of the cluster path
to end by setting the proper stop event found in `self.stop_events`. | Below is the the instruction that describes the task:
### Input:
Causes the thread that launched the watch of the cluster path
to end by setting the proper stop event found in `self.stop_events`.
### Response:
def stop_watching(self, cluster):
"""
Causes the thread that launched the watch of the cluster path
to end by setting the proper stop event found in `self.stop_events`.
"""
znode_path = "/".join([self.base_path, cluster.name])
if znode_path in self.stop_events:
self.stop_events[znode_path].set() |
def get_stp_mst_detail_output_msti_port_rx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
rx_bpdu_count = ET.SubElement(port, "rx-bpdu-count")
rx_bpdu_count.text = kwargs.pop('rx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_msti_port_rx_bpdu_count(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
rx_bpdu_count = ET.SubElement(port, "rx-bpdu-count")
rx_bpdu_count.text = kwargs.pop('rx_bpdu_count')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def do_exit(self, arg):
"""Exit the shell session."""
if self.current:
self.current.close()
self.resource_manager.close()
del self.resource_manager
return True | Exit the shell session. | Below is the the instruction that describes the task:
### Input:
Exit the shell session.
### Response:
def do_exit(self, arg):
"""Exit the shell session."""
if self.current:
self.current.close()
self.resource_manager.close()
del self.resource_manager
return True |
def add_user(self, username, email, **kwargs):
"""Create a new user with provided details.
Add user example:
.. code-block:: python
account_management_api = AccountManagementAPI()
# Add user
user = {
"username": "test_user",
"email": "test@gmail.com",
"phone_number": "0123456789"
}
new_user = account_management_api.add_user(**user)
:param str username: The unique username of the user (Required)
:param str email: The unique email of the user (Required)
:param str full_name: The full name of the user
:param list groups: List of group IDs (`str`) which this user belongs to
:param str password: The password string of the user
:param str phone_number: Phone number of the user
:param bool terms_accepted: 'General Terms & Conditions' have been accepted
:param bool marketing_accepted: Marketing Information opt-in
:returns: the new user object
:rtype: User
"""
api = self._get_api(iam.AccountAdminApi)
kwargs.update({'username': username, 'email': email})
user = User._create_request_map(kwargs)
body = iam.UserUpdateReq(**user)
return User(api.create_user(body)) | Create a new user with provided details.
Add user example:
.. code-block:: python
account_management_api = AccountManagementAPI()
# Add user
user = {
"username": "test_user",
"email": "test@gmail.com",
"phone_number": "0123456789"
}
new_user = account_management_api.add_user(**user)
:param str username: The unique username of the user (Required)
:param str email: The unique email of the user (Required)
:param str full_name: The full name of the user
:param list groups: List of group IDs (`str`) which this user belongs to
:param str password: The password string of the user
:param str phone_number: Phone number of the user
:param bool terms_accepted: 'General Terms & Conditions' have been accepted
:param bool marketing_accepted: Marketing Information opt-in
:returns: the new user object
:rtype: User | Below is the the instruction that describes the task:
### Input:
Create a new user with provided details.
Add user example:
.. code-block:: python
account_management_api = AccountManagementAPI()
# Add user
user = {
"username": "test_user",
"email": "test@gmail.com",
"phone_number": "0123456789"
}
new_user = account_management_api.add_user(**user)
:param str username: The unique username of the user (Required)
:param str email: The unique email of the user (Required)
:param str full_name: The full name of the user
:param list groups: List of group IDs (`str`) which this user belongs to
:param str password: The password string of the user
:param str phone_number: Phone number of the user
:param bool terms_accepted: 'General Terms & Conditions' have been accepted
:param bool marketing_accepted: Marketing Information opt-in
:returns: the new user object
:rtype: User
### Response:
def add_user(self, username, email, **kwargs):
"""Create a new user with provided details.
Add user example:
.. code-block:: python
account_management_api = AccountManagementAPI()
# Add user
user = {
"username": "test_user",
"email": "test@gmail.com",
"phone_number": "0123456789"
}
new_user = account_management_api.add_user(**user)
:param str username: The unique username of the user (Required)
:param str email: The unique email of the user (Required)
:param str full_name: The full name of the user
:param list groups: List of group IDs (`str`) which this user belongs to
:param str password: The password string of the user
:param str phone_number: Phone number of the user
:param bool terms_accepted: 'General Terms & Conditions' have been accepted
:param bool marketing_accepted: Marketing Information opt-in
:returns: the new user object
:rtype: User
"""
api = self._get_api(iam.AccountAdminApi)
kwargs.update({'username': username, 'email': email})
user = User._create_request_map(kwargs)
body = iam.UserUpdateReq(**user)
return User(api.create_user(body)) |
def get_version(cls, path, memo={}):
"""
Return a string describing the version of the repository at ``path`` if
possible, otherwise throws ``subprocess.CalledProcessError``.
(Note: memoizes the result in the ``memo`` parameter)
"""
if path not in memo:
memo[path] = subprocess.check_output(
"git describe --tags --dirty 2> /dev/null",
shell=True, cwd=path).strip().decode("utf-8")
v = re.search("-[0-9]+-", memo[path])
if v is not None:
# Replace -n- with -branchname-n-
branch = r"-{0}-\1-".format(cls.get_branch(path))
(memo[path], _) = re.subn("-([0-9]+)-", branch, memo[path], 1)
return memo[path] | Return a string describing the version of the repository at ``path`` if
possible, otherwise throws ``subprocess.CalledProcessError``.
(Note: memoizes the result in the ``memo`` parameter) | Below is the the instruction that describes the task:
### Input:
Return a string describing the version of the repository at ``path`` if
possible, otherwise throws ``subprocess.CalledProcessError``.
(Note: memoizes the result in the ``memo`` parameter)
### Response:
def get_version(cls, path, memo={}):
"""
Return a string describing the version of the repository at ``path`` if
possible, otherwise throws ``subprocess.CalledProcessError``.
(Note: memoizes the result in the ``memo`` parameter)
"""
if path not in memo:
memo[path] = subprocess.check_output(
"git describe --tags --dirty 2> /dev/null",
shell=True, cwd=path).strip().decode("utf-8")
v = re.search("-[0-9]+-", memo[path])
if v is not None:
# Replace -n- with -branchname-n-
branch = r"-{0}-\1-".format(cls.get_branch(path))
(memo[path], _) = re.subn("-([0-9]+)-", branch, memo[path], 1)
return memo[path] |
def attention_lm_ae_extended():
"""Experiment with the exp_factor params."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams | Experiment with the exp_factor params. | Below is the the instruction that describes the task:
### Input:
Experiment with the exp_factor params.
### Response:
def attention_lm_ae_extended():
"""Experiment with the exp_factor params."""
hparams = attention_lm_moe_base_long_seq()
hparams.attention_layers = "eeee"
hparams.attention_local = True
# hparams.factored_logits=1 # Necessary when the number of expert grow bigger
hparams.attention_moe_k = 2
hparams.attention_exp_factor = 4
# hparams.attention_exp_inputdim = 128
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
return hparams |
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key) | Update the index with the new key/values. | Below is the the instruction that describes the task:
### Input:
Update the index with the new key/values.
### Response:
def update_index(self, key, value):
"Update the index with the new key/values."
for k, v in value.items():
if k in self.indexes:
# A non-string index value switches it into a lazy one.
if not isinstance(v, six.string_types):
self.index_defs[k]['type'] = 'lazy'
if v not in self.indexes[k]:
self.indexes[k][v] = set([])
self.indexes[k][v].add(key) |
def is_invalid_params_py2(func, *args, **kwargs):
""" Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0
"""
funcargs, varargs, varkwargs, defaults = inspect.getargspec(func)
unexpected = set(kwargs.keys()) - set(funcargs)
if len(unexpected) > 0:
return True
params = [funcarg for funcarg in funcargs if funcarg not in kwargs]
funcargs_required = funcargs[:-len(defaults)] \
if defaults is not None \
else funcargs
params_required = [
funcarg for funcarg in funcargs_required
if funcarg not in kwargs
]
return not (len(params_required) <= len(args) <= len(params)) | Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0 | Below is the the instruction that describes the task:
### Input:
Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0
### Response:
def is_invalid_params_py2(func, *args, **kwargs):
""" Check, whether function 'func' accepts parameters 'args', 'kwargs'.
NOTE: Method is called after funct(*args, **kwargs) generated TypeError,
it is aimed to destinguish TypeError because of invalid parameters from
TypeError from inside the function.
.. versionadded: 1.9.0
"""
funcargs, varargs, varkwargs, defaults = inspect.getargspec(func)
unexpected = set(kwargs.keys()) - set(funcargs)
if len(unexpected) > 0:
return True
params = [funcarg for funcarg in funcargs if funcarg not in kwargs]
funcargs_required = funcargs[:-len(defaults)] \
if defaults is not None \
else funcargs
params_required = [
funcarg for funcarg in funcargs_required
if funcarg not in kwargs
]
return not (len(params_required) <= len(args) <= len(params)) |
def get_template(self, template_name):
'''
Retrieves a template object from the pattern "app_name/template.html".
This is one of the required methods of Django template engines.
Because DMP templates are always app-specific (Django only searches
a global set of directories), the template_name MUST be in the format:
"app_name/template.html" (even on Windows). DMP splits the template_name
string on the slash to get the app name and template name.
Template rendering can be limited to a specific def/block within the template
by specifying `#def_name`, e.g. `myapp/mytemplate.html#myblockname`.
'''
dmp = apps.get_app_config('django_mako_plus')
match = RE_TEMPLATE_NAME.match(template_name)
if match is None or match.group(1) is None or match.group(3) is None:
raise TemplateDoesNotExist('Invalid template_name format for a DMP template. This method requires that the template name be in app_name/template.html format (separated by slash).')
if not dmp.is_registered_app(match.group(1)):
raise TemplateDoesNotExist('Not a DMP app, so deferring to other template engines for this template')
return self.get_template_loader(match.group(1)).get_template(match.group(3), def_name=match.group(5)) | Retrieves a template object from the pattern "app_name/template.html".
This is one of the required methods of Django template engines.
Because DMP templates are always app-specific (Django only searches
a global set of directories), the template_name MUST be in the format:
"app_name/template.html" (even on Windows). DMP splits the template_name
string on the slash to get the app name and template name.
Template rendering can be limited to a specific def/block within the template
by specifying `#def_name`, e.g. `myapp/mytemplate.html#myblockname`. | Below is the the instruction that describes the task:
### Input:
Retrieves a template object from the pattern "app_name/template.html".
This is one of the required methods of Django template engines.
Because DMP templates are always app-specific (Django only searches
a global set of directories), the template_name MUST be in the format:
"app_name/template.html" (even on Windows). DMP splits the template_name
string on the slash to get the app name and template name.
Template rendering can be limited to a specific def/block within the template
by specifying `#def_name`, e.g. `myapp/mytemplate.html#myblockname`.
### Response:
def get_template(self, template_name):
'''
Retrieves a template object from the pattern "app_name/template.html".
This is one of the required methods of Django template engines.
Because DMP templates are always app-specific (Django only searches
a global set of directories), the template_name MUST be in the format:
"app_name/template.html" (even on Windows). DMP splits the template_name
string on the slash to get the app name and template name.
Template rendering can be limited to a specific def/block within the template
by specifying `#def_name`, e.g. `myapp/mytemplate.html#myblockname`.
'''
dmp = apps.get_app_config('django_mako_plus')
match = RE_TEMPLATE_NAME.match(template_name)
if match is None or match.group(1) is None or match.group(3) is None:
raise TemplateDoesNotExist('Invalid template_name format for a DMP template. This method requires that the template name be in app_name/template.html format (separated by slash).')
if not dmp.is_registered_app(match.group(1)):
raise TemplateDoesNotExist('Not a DMP app, so deferring to other template engines for this template')
return self.get_template_loader(match.group(1)).get_template(match.group(3), def_name=match.group(5)) |
def handle_termination(cls, pid, is_cancel=True):
'''
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
try:
os.kill(child_proc.pid, signal.SIGKILL)
except (TypeError, OSError):
pass
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error, OSError):
try:
os.kill(pid, signal.SIGKILL)
except (OSError):
pass | Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag. | Below is the the instruction that describes the task:
### Input:
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
### Response:
def handle_termination(cls, pid, is_cancel=True):
'''
Internal method to terminate a subprocess spawned by `pexpect` representing an invocation of runner.
:param pid: the process id of the running the job.
:param is_cancel: flag showing whether this termination is caused by
instance's cancel_flag.
'''
try:
main_proc = psutil.Process(pid=pid)
child_procs = main_proc.children(recursive=True)
for child_proc in child_procs:
try:
os.kill(child_proc.pid, signal.SIGKILL)
except (TypeError, OSError):
pass
os.kill(main_proc.pid, signal.SIGKILL)
except (TypeError, psutil.Error, OSError):
try:
os.kill(pid, signal.SIGKILL)
except (OSError):
pass |
def validate_args(api_key, *, rate="informers", **kwargs):
"Проверяет и формирует аргументы для запроса"
rate = Rate.validate(rate)
headers = {"X-Yandex-API-Key": api_key}
url = "https://api.weather.yandex.ru/v1/{}".format(rate)
if rate == "informers":
params = ARGS_SCHEMA(kwargs)
else:
params = ARGS_FORECAST_SCHEMA(kwargs)
return (url,), {"headers": headers, "params": params} | Проверяет и формирует аргументы для запроса | Below is the the instruction that describes the task:
### Input:
Проверяет и формирует аргументы для запроса
### Response:
def validate_args(api_key, *, rate="informers", **kwargs):
"Проверяет и формирует аргументы для запроса"
rate = Rate.validate(rate)
headers = {"X-Yandex-API-Key": api_key}
url = "https://api.weather.yandex.ru/v1/{}".format(rate)
if rate == "informers":
params = ARGS_SCHEMA(kwargs)
else:
params = ARGS_FORECAST_SCHEMA(kwargs)
return (url,), {"headers": headers, "params": params} |
def getElementsByTagName(self, tagName, root='root'):
'''
getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and root.tagName == tagName:
elements.append(root)
getElementsByTagName = self.getElementsByTagName
for child in root.children:
if child.tagName == tagName:
elements.append(child)
elements += getElementsByTagName(tagName, child)
return TagCollection(elements) | getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used. | Below is the the instruction that describes the task:
### Input:
getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
### Response:
def getElementsByTagName(self, tagName, root='root'):
'''
getElementsByTagName - Searches and returns all elements with a specific tag name.
@param tagName <lowercase str> - A lowercase string of the tag name.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
elements = []
if isFromRoot is True and root.tagName == tagName:
elements.append(root)
getElementsByTagName = self.getElementsByTagName
for child in root.children:
if child.tagName == tagName:
elements.append(child)
elements += getElementsByTagName(tagName, child)
return TagCollection(elements) |
def responsive(self):
""" bool: Whether the server for this app is up and responsive. """
if self.server_thread and self.server_thread.join(0):
return False
try:
# Try to fetch the endpoint added by the middleware.
identify_url = "http://{0}:{1}/__identify__".format(self.host, self.port)
with closing(urlopen(identify_url)) as response:
body, status_code = response.read(), response.getcode()
if str(status_code)[0] == "2" or str(status_code)[0] == "3":
return decode_bytes(body) == str(id(self.app))
except URLError:
pass
return False | bool: Whether the server for this app is up and responsive. | Below is the the instruction that describes the task:
### Input:
bool: Whether the server for this app is up and responsive.
### Response:
def responsive(self):
""" bool: Whether the server for this app is up and responsive. """
if self.server_thread and self.server_thread.join(0):
return False
try:
# Try to fetch the endpoint added by the middleware.
identify_url = "http://{0}:{1}/__identify__".format(self.host, self.port)
with closing(urlopen(identify_url)) as response:
body, status_code = response.read(), response.getcode()
if str(status_code)[0] == "2" or str(status_code)[0] == "3":
return decode_bytes(body) == str(id(self.app))
except URLError:
pass
return False |
def random_chain(generators):
"""Generator to generate a set of keys from
from a set of generators, each generator is selected
at random and consumed to exhaustion.
"""
while generators:
g = random.choice(generators)
try:
v = g.next()
if v is None:
continue
yield v
except StopIteration:
generators.remove(g) | Generator to generate a set of keys from
from a set of generators, each generator is selected
at random and consumed to exhaustion. | Below is the the instruction that describes the task:
### Input:
Generator to generate a set of keys from
from a set of generators, each generator is selected
at random and consumed to exhaustion.
### Response:
def random_chain(generators):
"""Generator to generate a set of keys from
from a set of generators, each generator is selected
at random and consumed to exhaustion.
"""
while generators:
g = random.choice(generators)
try:
v = g.next()
if v is None:
continue
yield v
except StopIteration:
generators.remove(g) |
def set_public_domain(self, public_domain):
"""Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_public_domain_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(public_domain):
raise errors.InvalidArgument()
self._my_map['publicDomain'] = public_domain | Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_public_domain(self, public_domain):
"""Sets the public domain flag.
arg: public_domain (boolean): the public domain status
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.set_group_template
if self.get_public_domain_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_boolean(public_domain):
raise errors.InvalidArgument()
self._my_map['publicDomain'] = public_domain |
def _selectView( self ):
"""
Matches the view selection to the trees selection.
"""
scene = self.uiGanttVIEW.scene()
scene.blockSignals(True)
scene.clearSelection()
for item in self.uiGanttTREE.selectedItems():
item.viewItem().setSelected(True)
scene.blockSignals(False)
curr_item = self.uiGanttTREE.currentItem()
vitem = curr_item.viewItem()
if vitem:
self.uiGanttVIEW.centerOn(vitem) | Matches the view selection to the trees selection. | Below is the the instruction that describes the task:
### Input:
Matches the view selection to the trees selection.
### Response:
def _selectView( self ):
"""
Matches the view selection to the trees selection.
"""
scene = self.uiGanttVIEW.scene()
scene.blockSignals(True)
scene.clearSelection()
for item in self.uiGanttTREE.selectedItems():
item.viewItem().setSelected(True)
scene.blockSignals(False)
curr_item = self.uiGanttTREE.currentItem()
vitem = curr_item.viewItem()
if vitem:
self.uiGanttVIEW.centerOn(vitem) |
def common(self, other):
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero) | Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree. | Below is the the instruction that describes the task:
### Input:
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
### Response:
def common(self, other):
'''
Return the common part of these two mults. This is the largest mult
which can be safely subtracted from both the originals. The multiplier
on this mult could be zero: this is the case if, for example, the
multiplicands disagree.
'''
if self.multiplicand == other.multiplicand:
return mult(self.multiplicand, self.multiplier.common(other.multiplier))
# Multiplicands disagree, no common part at all.
return mult(nothing, zero) |
async def encrypt(self, message: bytes, authn: bool = False, recip: str = None) -> bytes:
"""
Encrypt plaintext for owner of DID or verification key, anonymously or via
authenticated encryption scheme. If given DID, first check wallet and then pool
for corresponding verification key.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param recip: DID or verification key of recipient, None for anchor's own
:return: ciphertext, as bytes
"""
LOGGER.debug('BaseAnchor.encrypt >>> message: %s, authn: %s, recip: %s', message, authn, recip)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.encrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await self.wallet.encrypt(message, authn, await self._verkey_for(recip))
LOGGER.debug('BaseAnchor.auth_encrypt <<< %s', rv)
return rv | Encrypt plaintext for owner of DID or verification key, anonymously or via
authenticated encryption scheme. If given DID, first check wallet and then pool
for corresponding verification key.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param recip: DID or verification key of recipient, None for anchor's own
:return: ciphertext, as bytes | Below is the the instruction that describes the task:
### Input:
Encrypt plaintext for owner of DID or verification key, anonymously or via
authenticated encryption scheme. If given DID, first check wallet and then pool
for corresponding verification key.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param recip: DID or verification key of recipient, None for anchor's own
:return: ciphertext, as bytes
### Response:
async def encrypt(self, message: bytes, authn: bool = False, recip: str = None) -> bytes:
"""
Encrypt plaintext for owner of DID or verification key, anonymously or via
authenticated encryption scheme. If given DID, first check wallet and then pool
for corresponding verification key.
Raise WalletState if the wallet is closed. Given a recipient DID not in the wallet,
raise AbsentPool if the instance has no pool or ClosedPool if its pool is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param recip: DID or verification key of recipient, None for anchor's own
:return: ciphertext, as bytes
"""
LOGGER.debug('BaseAnchor.encrypt >>> message: %s, authn: %s, recip: %s', message, authn, recip)
if not self.wallet.handle:
LOGGER.debug('BaseAnchor.encrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await self.wallet.encrypt(message, authn, await self._verkey_for(recip))
LOGGER.debug('BaseAnchor.auth_encrypt <<< %s', rv)
return rv |
def list_databases(self, name):
'''
List the SQL databases defined on the specified server name
'''
response = self._perform_get(self._get_list_databases_path(name),
None)
return _MinidomXmlToObject.parse_service_resources_response(
response, Database) | List the SQL databases defined on the specified server name | Below is the the instruction that describes the task:
### Input:
List the SQL databases defined on the specified server name
### Response:
def list_databases(self, name):
'''
List the SQL databases defined on the specified server name
'''
response = self._perform_get(self._get_list_databases_path(name),
None)
return _MinidomXmlToObject.parse_service_resources_response(
response, Database) |
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match) | Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean | Below is the the instruction that describes the task:
### Input:
Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
### Response:
def regex_in_package_file(regex, filename, package_name, return_match=False):
""" Search for a regex in a file contained within the package directory
If return_match is True, return the found object instead of a boolean
"""
filepath = package_file_path(filename, package_name)
return regex_in_file(regex, filepath, return_match=return_match) |
def get_instance(self, payload):
"""
Build an instance of AssistantInitiationActionsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
"""
return AssistantInitiationActionsInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
) | Build an instance of AssistantInitiationActionsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance | Below is the the instruction that describes the task:
### Input:
Build an instance of AssistantInitiationActionsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
### Response:
def get_instance(self, payload):
"""
Build an instance of AssistantInitiationActionsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
:rtype: twilio.rest.preview.understand.assistant.assistant_initiation_actions.AssistantInitiationActionsInstance
"""
return AssistantInitiationActionsInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
) |
def bootstrapping_dtrajs(dtrajs, lag, N_full, nbs=10000, active_set=None):
"""
Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values
"""
# Get the number of simulations:
Q = len(dtrajs)
# Get the number of states in the active set:
if active_set is not None:
N = active_set.size
else:
N = N_full
# Build up a matrix of count matrices for each simulation. Size is Q*N^2:
traj_ind = []
state1 = []
state2 = []
q = 0
for traj in dtrajs:
traj_ind.append(q*np.ones(traj[:-lag].size))
state1.append(traj[:-lag])
state2.append(traj[lag:])
q += 1
traj_inds = np.concatenate(traj_ind)
pairs = N_full * np.concatenate(state1) + np.concatenate(state2)
data = np.ones(pairs.size)
Ct_traj = scipy.sparse.coo_matrix((data, (traj_inds, pairs)), shape=(Q, N_full*N_full))
Ct_traj = Ct_traj.tocsr()
# Perform re-sampling:
svals = np.zeros((nbs, N))
for s in range(nbs):
# Choose selection:
sel = np.random.choice(Q, Q, replace=True)
# Compute count matrix for selection:
Ct_sel = Ct_traj[sel, :].sum(axis=0)
Ct_sel = np.asarray(Ct_sel).reshape((N_full, N_full))
if active_set is not None:
from pyemma.util.linalg import submatrix
Ct_sel = submatrix(Ct_sel, active_set)
svals[s, :] = scl.svdvals(Ct_sel)
# Compute mean and uncertainties:
smean = np.mean(svals, axis=0)
sdev = np.std(svals, axis=0)
return smean, sdev | Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values | Below is the the instruction that describes the task:
### Input:
Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values
### Response:
def bootstrapping_dtrajs(dtrajs, lag, N_full, nbs=10000, active_set=None):
"""
Perform trajectory based re-sampling.
Parameters
----------
dtrajs : list of discrete trajectories
lag : int
lag time
N_full : int
Number of states in discrete trajectories.
nbs : int, optional
Number of bootstrapping samples
active_set : ndarray
Indices of active set, all count matrices will be restricted
to active set.
Returns
-------
smean : ndarray(N,)
mean values of singular values
sdev : ndarray(N,)
standard deviations of singular values
"""
# Get the number of simulations:
Q = len(dtrajs)
# Get the number of states in the active set:
if active_set is not None:
N = active_set.size
else:
N = N_full
# Build up a matrix of count matrices for each simulation. Size is Q*N^2:
traj_ind = []
state1 = []
state2 = []
q = 0
for traj in dtrajs:
traj_ind.append(q*np.ones(traj[:-lag].size))
state1.append(traj[:-lag])
state2.append(traj[lag:])
q += 1
traj_inds = np.concatenate(traj_ind)
pairs = N_full * np.concatenate(state1) + np.concatenate(state2)
data = np.ones(pairs.size)
Ct_traj = scipy.sparse.coo_matrix((data, (traj_inds, pairs)), shape=(Q, N_full*N_full))
Ct_traj = Ct_traj.tocsr()
# Perform re-sampling:
svals = np.zeros((nbs, N))
for s in range(nbs):
# Choose selection:
sel = np.random.choice(Q, Q, replace=True)
# Compute count matrix for selection:
Ct_sel = Ct_traj[sel, :].sum(axis=0)
Ct_sel = np.asarray(Ct_sel).reshape((N_full, N_full))
if active_set is not None:
from pyemma.util.linalg import submatrix
Ct_sel = submatrix(Ct_sel, active_set)
svals[s, :] = scl.svdvals(Ct_sel)
# Compute mean and uncertainties:
smean = np.mean(svals, axis=0)
sdev = np.std(svals, axis=0)
return smean, sdev |
def extract_source_geom(dstore, srcidxs):
"""
Extract the geometry of a given sources
Example:
http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3
"""
for i in srcidxs.split(','):
rec = dstore['source_info'][int(i)]
geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']]
yield rec['source_id'], geom | Extract the geometry of a given sources
Example:
http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3 | Below is the the instruction that describes the task:
### Input:
Extract the geometry of a given sources
Example:
http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3
### Response:
def extract_source_geom(dstore, srcidxs):
"""
Extract the geometry of a given sources
Example:
http://127.0.0.1:8800/v1/calc/30/extract/source_geom/1,2,3
"""
for i in srcidxs.split(','):
rec = dstore['source_info'][int(i)]
geom = dstore['source_geom'][rec['gidx1']:rec['gidx2']]
yield rec['source_id'], geom |
def resolve_blocks(template, context):
'''
Return a BlockContext instance of all the {% block %} tags in the template.
If template is a string, it will be resolved through get_template
'''
try:
blocks = context.render_context[BLOCK_CONTEXT_KEY]
except KeyError:
blocks = context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
# If it's just the name, resolve into template
if isinstance(template, six.string_types):
template = get_template(template)
# For Django 1.8 compatibility
template = getattr(template, 'template', template)
# Add this templates blocks as the first
local_blocks = {
block.name: block
for block in template.nodelist.get_nodes_by_type(BlockNode)
}
blocks.add_blocks(local_blocks)
# Do we extend a parent template?
extends = template.nodelist.get_nodes_by_type(ExtendsNode)
if extends:
# Can only have one extends in a template
extends_node = extends[0]
# Get the parent, and recurse
parent_template = extends_node.get_parent(context)
resolve_blocks(parent_template, context)
return blocks | Return a BlockContext instance of all the {% block %} tags in the template.
If template is a string, it will be resolved through get_template | Below is the the instruction that describes the task:
### Input:
Return a BlockContext instance of all the {% block %} tags in the template.
If template is a string, it will be resolved through get_template
### Response:
def resolve_blocks(template, context):
'''
Return a BlockContext instance of all the {% block %} tags in the template.
If template is a string, it will be resolved through get_template
'''
try:
blocks = context.render_context[BLOCK_CONTEXT_KEY]
except KeyError:
blocks = context.render_context[BLOCK_CONTEXT_KEY] = BlockContext()
# If it's just the name, resolve into template
if isinstance(template, six.string_types):
template = get_template(template)
# For Django 1.8 compatibility
template = getattr(template, 'template', template)
# Add this templates blocks as the first
local_blocks = {
block.name: block
for block in template.nodelist.get_nodes_by_type(BlockNode)
}
blocks.add_blocks(local_blocks)
# Do we extend a parent template?
extends = template.nodelist.get_nodes_by_type(ExtendsNode)
if extends:
# Can only have one extends in a template
extends_node = extends[0]
# Get the parent, and recurse
parent_template = extends_node.get_parent(context)
resolve_blocks(parent_template, context)
return blocks |
def rpc(ctx, call, arguments, api):
""" Construct RPC call directly
\b
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
"""
try:
data = list(eval(d) for d in arguments)
except:
data = arguments
ret = getattr(ctx.peerplays.rpc, call)(*data, api=api)
pprint(ret) | Construct RPC call directly
\b
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']" | Below is the the instruction that describes the task:
### Input:
Construct RPC call directly
\b
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
### Response:
def rpc(ctx, call, arguments, api):
""" Construct RPC call directly
\b
You can specify which API to send the call to:
peerplays rpc --api bookie get_matched_bets_for_bettor 1.2.0
You can also specify lists using
peerplays rpc get_objects "['2.0.0', '2.1.0']"
"""
try:
data = list(eval(d) for d in arguments)
except:
data = arguments
ret = getattr(ctx.peerplays.rpc, call)(*data, api=api)
pprint(ret) |
def process_service_check_result(self, service, return_code, plugin_output):
"""Process service check result
Format of the line that triggers function call::
PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
:param service: service to process check to
:type service: alignak.objects.service.Service
:param return_code: exit code of plugin
:type return_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
"""
now = time.time()
cls = service.__class__
# If globally disabled OR service disabled, do not launch..
if not cls.accept_passive_checks or not service.passive_checks_enabled:
return
try:
plugin_output = plugin_output.decode('utf8', 'ignore')
logger.debug('%s > Passive service check plugin output: %s',
service.get_full_name(), plugin_output)
except AttributeError:
# Python 3 will raise an exception
pass
except UnicodeError:
pass
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < service.last_chk:
logger.debug('%s > Passive service check is too old (%d seconds). '
'Ignoring, check output: %s',
service.get_full_name(), self.current_timestamp < service.last_chk,
plugin_output)
return
# Create a check object from the external command
chk = service.launch_check(now, self.hosts, self.services, self.timeperiods,
self.daemon.macromodulations, self.daemon.checkmodulations,
self.daemon.checks, force=True)
# Should not be possible to not find the check, but if so, don't crash
if not chk:
logger.error('%s > Passive service check failed. None check launched !?',
service.get_full_name())
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the service
chk.exit_status = return_code
chk.get_outputs(plugin_output, service.max_plugins_output_length)
logger.debug('%s > Passive service check output: %s',
service.get_full_name(), chk.output)
chk.status = ACT_STATUS_WAIT_CONSUME
chk.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding service's check type to passive
chk.set_type_passive()
# self.daemon.nb_check_received += 1
self.send_an_element(chk)
# Ok now this result will be read by the scheduler the next loop
# raise a passive check log only if needed
if self.my_conf.log_passive_checks:
log_level = 'info'
if return_code == 1: # WARNING
log_level = 'warning'
if return_code == 2: # CRITICAL
log_level = 'error'
self.send_an_element(make_monitoring_log(
log_level, 'PASSIVE SERVICE CHECK: %s;%s;%d;%s;%s;%s' % (
self.hosts[service.host].get_name(), service.get_name(),
return_code, chk.output, chk.long_output, chk.perf_data))) | Process service check result
Format of the line that triggers function call::
PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
:param service: service to process check to
:type service: alignak.objects.service.Service
:param return_code: exit code of plugin
:type return_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None | Below is the the instruction that describes the task:
### Input:
Process service check result
Format of the line that triggers function call::
PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
:param service: service to process check to
:type service: alignak.objects.service.Service
:param return_code: exit code of plugin
:type return_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
### Response:
def process_service_check_result(self, service, return_code, plugin_output):
"""Process service check result
Format of the line that triggers function call::
PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output>
:param service: service to process check to
:type service: alignak.objects.service.Service
:param return_code: exit code of plugin
:type return_code: int
:param plugin_output: plugin output
:type plugin_output: str
:return: None
"""
now = time.time()
cls = service.__class__
# If globally disabled OR service disabled, do not launch..
if not cls.accept_passive_checks or not service.passive_checks_enabled:
return
try:
plugin_output = plugin_output.decode('utf8', 'ignore')
logger.debug('%s > Passive service check plugin output: %s',
service.get_full_name(), plugin_output)
except AttributeError:
# Python 3 will raise an exception
pass
except UnicodeError:
pass
# Maybe the check is just too old, if so, bail out!
if self.current_timestamp < service.last_chk:
logger.debug('%s > Passive service check is too old (%d seconds). '
'Ignoring, check output: %s',
service.get_full_name(), self.current_timestamp < service.last_chk,
plugin_output)
return
# Create a check object from the external command
chk = service.launch_check(now, self.hosts, self.services, self.timeperiods,
self.daemon.macromodulations, self.daemon.checkmodulations,
self.daemon.checks, force=True)
# Should not be possible to not find the check, but if so, don't crash
if not chk:
logger.error('%s > Passive service check failed. None check launched !?',
service.get_full_name())
return
# Now we 'transform the check into a result'
# So exit_status, output and status is eaten by the service
chk.exit_status = return_code
chk.get_outputs(plugin_output, service.max_plugins_output_length)
logger.debug('%s > Passive service check output: %s',
service.get_full_name(), chk.output)
chk.status = ACT_STATUS_WAIT_CONSUME
chk.check_time = self.current_timestamp # we are using the external command timestamps
# Set the corresponding service's check type to passive
chk.set_type_passive()
# self.daemon.nb_check_received += 1
self.send_an_element(chk)
# Ok now this result will be read by the scheduler the next loop
# raise a passive check log only if needed
if self.my_conf.log_passive_checks:
log_level = 'info'
if return_code == 1: # WARNING
log_level = 'warning'
if return_code == 2: # CRITICAL
log_level = 'error'
self.send_an_element(make_monitoring_log(
log_level, 'PASSIVE SERVICE CHECK: %s;%s;%d;%s;%s;%s' % (
self.hosts[service.host].get_name(), service.get_name(),
return_code, chk.output, chk.long_output, chk.perf_data))) |
def _request_tls(self):
"""Request a TLS-encrypted connection.
[initiating entity only]"""
self.requested = True
element = ElementTree.Element(STARTTLS_TAG)
self.stream.write_element(element) | Request a TLS-encrypted connection.
[initiating entity only] | Below is the the instruction that describes the task:
### Input:
Request a TLS-encrypted connection.
[initiating entity only]
### Response:
def _request_tls(self):
"""Request a TLS-encrypted connection.
[initiating entity only]"""
self.requested = True
element = ElementTree.Element(STARTTLS_TAG)
self.stream.write_element(element) |
def serialize_args(self):
"""Returns (args, kwargs) to be used when deserializing this parameter."""
args, kwargs = super(MultiParameter, self).serialize_args()
args.insert(0, [[t.id, t.serialize_args()] for t in self.types])
return args, kwargs | Returns (args, kwargs) to be used when deserializing this parameter. | Below is the the instruction that describes the task:
### Input:
Returns (args, kwargs) to be used when deserializing this parameter.
### Response:
def serialize_args(self):
"""Returns (args, kwargs) to be used when deserializing this parameter."""
args, kwargs = super(MultiParameter, self).serialize_args()
args.insert(0, [[t.id, t.serialize_args()] for t in self.types])
return args, kwargs |
def get_root(w):
"""
Simple method to access root for a widget
"""
next_level = w
while next_level.master:
next_level = next_level.master
return next_level | Simple method to access root for a widget | Below is the the instruction that describes the task:
### Input:
Simple method to access root for a widget
### Response:
def get_root(w):
"""
Simple method to access root for a widget
"""
next_level = w
while next_level.master:
next_level = next_level.master
return next_level |
def tag_fig_ordinal(tag):
"""
Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure
"""
tag_count = 0
if 'specific-use' not in tag.attrs:
# Look for tags with no "specific-use" attribute
return len(list(filter(lambda tag: 'specific-use' not in tag.attrs,
tag.find_all_previous(tag.name)))) + 1 | Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure | Below is the the instruction that describes the task:
### Input:
Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure
### Response:
def tag_fig_ordinal(tag):
"""
Meant for finding the position of fig tags with respect to whether
they are for a main figure or a child figure
"""
tag_count = 0
if 'specific-use' not in tag.attrs:
# Look for tags with no "specific-use" attribute
return len(list(filter(lambda tag: 'specific-use' not in tag.attrs,
tag.find_all_previous(tag.name)))) + 1 |
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self | Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right | Below is the the instruction that describes the task:
### Input:
Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
### Response:
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self |
def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH):
"""
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
"""
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width | Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping. | Below is the the instruction that describes the task:
### Input:
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
### Response:
def line_width(default_width=DEFAULT_LINE_WIDTH, max_width=MAX_LINE_WIDTH):
"""
Return the ideal column width for the output from :func:`see.see`, taking
the terminal width into account to avoid wrapping.
"""
width = term_width()
if width: # pragma: no cover (no terminal info in Travis CI)
return min(width, max_width)
else:
return default_width |
def parse_value_refarray(self, tup_tree):
"""
Parse a VALUE.REFARRAY element and return the array of instance paths
or class paths it represents as a list of CIMInstanceName or
CIMClassName objects, respectively.
::
<!ELEMENT VALUE.REFARRAY (VALUE.REFERENCE | VALUE.NULL)*>
"""
self.check_node(tup_tree, 'VALUE.REFARRAY')
children = self.list_of_various(tup_tree,
('VALUE.REFERENCE', 'VALUE.NULL'))
return children | Parse a VALUE.REFARRAY element and return the array of instance paths
or class paths it represents as a list of CIMInstanceName or
CIMClassName objects, respectively.
::
<!ELEMENT VALUE.REFARRAY (VALUE.REFERENCE | VALUE.NULL)*> | Below is the the instruction that describes the task:
### Input:
Parse a VALUE.REFARRAY element and return the array of instance paths
or class paths it represents as a list of CIMInstanceName or
CIMClassName objects, respectively.
::
<!ELEMENT VALUE.REFARRAY (VALUE.REFERENCE | VALUE.NULL)*>
### Response:
def parse_value_refarray(self, tup_tree):
"""
Parse a VALUE.REFARRAY element and return the array of instance paths
or class paths it represents as a list of CIMInstanceName or
CIMClassName objects, respectively.
::
<!ELEMENT VALUE.REFARRAY (VALUE.REFERENCE | VALUE.NULL)*>
"""
self.check_node(tup_tree, 'VALUE.REFARRAY')
children = self.list_of_various(tup_tree,
('VALUE.REFERENCE', 'VALUE.NULL'))
return children |
def parse_args(self, ap_mac, ssid, passphrase,
channel=None,
# KRACK attack options
double_3handshake=True,
encrypt_3handshake=True,
wait_3handshake=0,
double_gtk_refresh=True,
arp_target_ip=None,
arp_source_ip=None,
wait_gtk=10,
**kwargs):
"""
Mandatory arguments:
@iface: interface to use (must be in monitor mode)
@ap_mac: AP's MAC
@ssid: AP's SSID
@passphrase: AP's Passphrase (min 8 char.)
Optional arguments:
@channel: used by the interface. Default 6, autodetected on windows
Krack attacks options:
- Msg 3/4 handshake replay:
double_3handshake: double the 3/4 handshake message
encrypt_3handshake: encrypt the second 3/4 handshake message
wait_3handshake: time to wait (in sec.) before sending the second 3/4
- double GTK rekeying:
double_gtk_refresh: double the 1/2 GTK rekeying message
wait_gtk: time to wait (in sec.) before sending the GTK rekeying
arp_target_ip: Client IP to use in ARP req. (to detect attack success)
If None, use a DHCP server
arp_source_ip: Server IP to use in ARP req. (to detect attack success)
If None, use the DHCP server gateway address
"""
super(KrackAP, self).parse_args(**kwargs)
# Main AP options
self.mac = ap_mac
self.ssid = ssid
self.passphrase = passphrase
if channel is None:
if WINDOWS:
try:
channel = kwargs.get("iface", conf.iface).channel()
except (Scapy_Exception, AttributeError):
channel = 6
else:
channel = 6
self.channel = channel
# Internal structures
self.last_iv = None
self.client = None
self.seq_num = count()
self.replay_counter = count()
self.time_handshake_end = None
self.dhcp_server = DHCPOverWPA(send_func=self.send_ether_over_wpa,
pool=Net("192.168.42.128/25"),
network="192.168.42.0/24",
gw="192.168.42.1")
self.arp_sent = []
self.arp_to_send = 0
self.arp_retry = 0
# Bit 0: 3way handshake sent
# Bit 1: GTK rekeying sent
# Bit 2: ARP response obtained
self.krack_state = 0
# Krack options
self.double_3handshake = double_3handshake
self.encrypt_3handshake = encrypt_3handshake
self.wait_3handshake = wait_3handshake
self.double_gtk_refresh = double_gtk_refresh
self.arp_target_ip = arp_target_ip
if arp_source_ip is None:
# Use the DHCP server Gateway address
arp_source_ip = self.dhcp_server.gw
self.arp_source_ip = arp_source_ip
self.wait_gtk = wait_gtk
# May take several seconds
self.install_PMK() | Mandatory arguments:
@iface: interface to use (must be in monitor mode)
@ap_mac: AP's MAC
@ssid: AP's SSID
@passphrase: AP's Passphrase (min 8 char.)
Optional arguments:
@channel: used by the interface. Default 6, autodetected on windows
Krack attacks options:
- Msg 3/4 handshake replay:
double_3handshake: double the 3/4 handshake message
encrypt_3handshake: encrypt the second 3/4 handshake message
wait_3handshake: time to wait (in sec.) before sending the second 3/4
- double GTK rekeying:
double_gtk_refresh: double the 1/2 GTK rekeying message
wait_gtk: time to wait (in sec.) before sending the GTK rekeying
arp_target_ip: Client IP to use in ARP req. (to detect attack success)
If None, use a DHCP server
arp_source_ip: Server IP to use in ARP req. (to detect attack success)
If None, use the DHCP server gateway address | Below is the the instruction that describes the task:
### Input:
Mandatory arguments:
@iface: interface to use (must be in monitor mode)
@ap_mac: AP's MAC
@ssid: AP's SSID
@passphrase: AP's Passphrase (min 8 char.)
Optional arguments:
@channel: used by the interface. Default 6, autodetected on windows
Krack attacks options:
- Msg 3/4 handshake replay:
double_3handshake: double the 3/4 handshake message
encrypt_3handshake: encrypt the second 3/4 handshake message
wait_3handshake: time to wait (in sec.) before sending the second 3/4
- double GTK rekeying:
double_gtk_refresh: double the 1/2 GTK rekeying message
wait_gtk: time to wait (in sec.) before sending the GTK rekeying
arp_target_ip: Client IP to use in ARP req. (to detect attack success)
If None, use a DHCP server
arp_source_ip: Server IP to use in ARP req. (to detect attack success)
If None, use the DHCP server gateway address
### Response:
def parse_args(self, ap_mac, ssid, passphrase,
channel=None,
# KRACK attack options
double_3handshake=True,
encrypt_3handshake=True,
wait_3handshake=0,
double_gtk_refresh=True,
arp_target_ip=None,
arp_source_ip=None,
wait_gtk=10,
**kwargs):
"""
Mandatory arguments:
@iface: interface to use (must be in monitor mode)
@ap_mac: AP's MAC
@ssid: AP's SSID
@passphrase: AP's Passphrase (min 8 char.)
Optional arguments:
@channel: used by the interface. Default 6, autodetected on windows
Krack attacks options:
- Msg 3/4 handshake replay:
double_3handshake: double the 3/4 handshake message
encrypt_3handshake: encrypt the second 3/4 handshake message
wait_3handshake: time to wait (in sec.) before sending the second 3/4
- double GTK rekeying:
double_gtk_refresh: double the 1/2 GTK rekeying message
wait_gtk: time to wait (in sec.) before sending the GTK rekeying
arp_target_ip: Client IP to use in ARP req. (to detect attack success)
If None, use a DHCP server
arp_source_ip: Server IP to use in ARP req. (to detect attack success)
If None, use the DHCP server gateway address
"""
super(KrackAP, self).parse_args(**kwargs)
# Main AP options
self.mac = ap_mac
self.ssid = ssid
self.passphrase = passphrase
if channel is None:
if WINDOWS:
try:
channel = kwargs.get("iface", conf.iface).channel()
except (Scapy_Exception, AttributeError):
channel = 6
else:
channel = 6
self.channel = channel
# Internal structures
self.last_iv = None
self.client = None
self.seq_num = count()
self.replay_counter = count()
self.time_handshake_end = None
self.dhcp_server = DHCPOverWPA(send_func=self.send_ether_over_wpa,
pool=Net("192.168.42.128/25"),
network="192.168.42.0/24",
gw="192.168.42.1")
self.arp_sent = []
self.arp_to_send = 0
self.arp_retry = 0
# Bit 0: 3way handshake sent
# Bit 1: GTK rekeying sent
# Bit 2: ARP response obtained
self.krack_state = 0
# Krack options
self.double_3handshake = double_3handshake
self.encrypt_3handshake = encrypt_3handshake
self.wait_3handshake = wait_3handshake
self.double_gtk_refresh = double_gtk_refresh
self.arp_target_ip = arp_target_ip
if arp_source_ip is None:
# Use the DHCP server Gateway address
arp_source_ip = self.dhcp_server.gw
self.arp_source_ip = arp_source_ip
self.wait_gtk = wait_gtk
# May take several seconds
self.install_PMK() |
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = data[0].shape[1]
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
(chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor(
sigma_s_rhos, check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response | Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response. | Below is the the instruction that describes the task:
### Input:
Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
### Response:
def _srm(self, data):
"""Expectation-Maximization algorithm for fitting the probabilistic SRM.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
Returns
-------
sigma_s : array, shape=[features, features]
The covariance :math:`\\Sigma_s` of the shared response Normal
distribution.
w : list of array, element i has shape=[voxels_i, features]
The orthogonal transforms (mappings) :math:`W_i` for each subject.
mu : list of array, element i has shape=[voxels_i]
The voxel means :math:`\\mu_i` over the samples for each subject.
rho2 : array, shape=[subjects]
The estimated noise variance :math:`\\rho_i^2` for each subject
s : array, shape=[features, samples]
The shared response.
"""
samples = data[0].shape[1]
subjects = len(data)
np.random.seed(self.rand_seed)
# Initialization step: initialize the outputs with initial values,
# voxels with the number of voxels in each subject, and trace_xtx with
# the ||X_i||_F^2 of each subject.
w, voxels = _init_w_transforms(data, self.features)
x, mu, rho2, trace_xtx = self._init_structures(data, subjects)
shared_response = np.zeros((self.features, samples))
sigma_s = np.identity(self.features)
# Main loop of the algorithm (run
for iteration in range(self.n_iter):
logger.info('Iteration %d' % (iteration + 1))
# E-step:
# Sum the inverted the rho2 elements for computing W^T * Psi^-1 * W
rho0 = (1 / rho2).sum()
# Invert Sigma_s using Cholesky factorization
(chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor(
sigma_s, check_finite=False)
inv_sigma_s = scipy.linalg.cho_solve(
(chol_sigma_s, lower_sigma_s), np.identity(self.features),
check_finite=False)
# Invert (Sigma_s + rho_0 * I) using Cholesky factorization
sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0
(chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor(
sigma_s_rhos, check_finite=False)
inv_sigma_s_rhos = scipy.linalg.cho_solve(
(chol_sigma_s_rhos, lower_sigma_s_rhos),
np.identity(self.features), check_finite=False)
# Compute the sum of W_i^T * rho_i^-2 * X_i, and the sum of traces
# of X_i^T * rho_i^-2 * X_i
wt_invpsi_x = np.zeros((self.features, samples))
trace_xt_invsigma2_x = 0.0
for subject in range(subjects):
wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject]
trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject]
log_det_psi = np.sum(np.log(rho2) * voxels)
# Update the shared response
shared_response = sigma_s.dot(
np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot(
wt_invpsi_x)
# M-step
# Update Sigma_s and compute its trace
sigma_s = (inv_sigma_s_rhos
+ shared_response.dot(shared_response.T) / samples)
trace_sigma_s = samples * np.trace(sigma_s)
# Update each subject's mapping transform W_i and error variance
# rho_i^2
for subject in range(subjects):
a_subject = x[subject].dot(shared_response.T)
perturbation = np.zeros(a_subject.shape)
np.fill_diagonal(perturbation, 0.001)
u_subject, s_subject, v_subject = np.linalg.svd(
a_subject + perturbation, full_matrices=False)
w[subject] = u_subject.dot(v_subject)
rho2[subject] = trace_xtx[subject]
rho2[subject] += -2 * np.sum(w[subject] * a_subject).sum()
rho2[subject] += trace_sigma_s
rho2[subject] /= samples * voxels[subject]
if logger.isEnabledFor(logging.INFO):
# Calculate and log the current log-likelihood for checking
# convergence
loglike = self._likelihood(
chol_sigma_s_rhos, log_det_psi, chol_sigma_s,
trace_xt_invsigma2_x, inv_sigma_s_rhos, wt_invpsi_x,
samples)
logger.info('Objective function %f' % loglike)
return sigma_s, w, mu, rho2, shared_response |
def H9(self):
"Entropy."
if not hasattr(self, '_H9'):
self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1)
return self._H9 | Entropy. | Below is the the instruction that describes the task:
### Input:
Entropy.
### Response:
def H9(self):
"Entropy."
if not hasattr(self, '_H9'):
self._H9 = -(self.P * np.log(self.P + self.eps)).sum(2).sum(1)
return self._H9 |
def as_sql(self, compiler, connection):
"""Compiles this expression into SQL."""
sql, params = super().as_sql(compiler, connection)
return 'EXTRACT(epoch FROM {})'.format(sql), params | Compiles this expression into SQL. | Below is the the instruction that describes the task:
### Input:
Compiles this expression into SQL.
### Response:
def as_sql(self, compiler, connection):
"""Compiles this expression into SQL."""
sql, params = super().as_sql(compiler, connection)
return 'EXTRACT(epoch FROM {})'.format(sql), params |
def write_record(self, event_str):
"""Writes a serialized event to file."""
header = struct.pack('Q', len(event_str))
header += struct.pack('I', masked_crc32c(header))
footer = struct.pack('I', masked_crc32c(event_str))
self._writer.write(header + event_str + footer) | Writes a serialized event to file. | Below is the the instruction that describes the task:
### Input:
Writes a serialized event to file.
### Response:
def write_record(self, event_str):
"""Writes a serialized event to file."""
header = struct.pack('Q', len(event_str))
header += struct.pack('I', masked_crc32c(header))
footer = struct.pack('I', masked_crc32c(event_str))
self._writer.write(header + event_str + footer) |
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name] | Adds a collections index entries to the cache if not present | Below is the the instruction that describes the task:
### Input:
Adds a collections index entries to the cache if not present
### Response:
def _ensure_index_cache(self, db_uri, db_name, collection_name):
"""Adds a collections index entries to the cache if not present"""
if not self._check_indexes or db_uri is None:
return {'indexes': None}
if db_name not in self.get_cache():
self._internal_map[db_name] = {}
if collection_name not in self._internal_map[db_name]:
indexes = []
try:
if self._index_cache_connection is None:
self._index_cache_connection = pymongo.MongoClient(db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
db = self._index_cache_connection[db_name]
indexes = db[collection_name].index_information()
except:
warning = 'Warning: unable to connect to ' + db_uri + "\n"
else:
internal_map_entry = {'indexes': indexes}
self.get_cache()[db_name][collection_name] = internal_map_entry
return self.get_cache()[db_name][collection_name] |
def _build_parser(self):
"""Build command line argument parser.
Returns:
:class:`argparse.ArgumentParser`: the command line argument parser.
You probably won't need to use it directly. To parse command line
arguments and update the :class:`ConfigurationManager` instance
accordingly, use the :meth:`parse_args` method.
"""
main_parser = argparse.ArgumentParser(description=self.common.help,
prefix_chars='-+')
self._add_options_to_parser(self._opt_bare, main_parser)
main_parser.set_defaults(**self.common.defaults)
if self.bare is not None:
main_parser.set_defaults(**self.bare.defaults)
subparsers = main_parser.add_subparsers(dest='loam_sub_name')
for cmd_name, meta in self.subcmds.items():
kwargs = {'prefix_chars': '+-', 'help': meta.help}
dummy_parser = subparsers.add_parser(cmd_name, **kwargs)
self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)
dummy_parser.set_defaults(**meta.defaults)
return main_parser | Build command line argument parser.
Returns:
:class:`argparse.ArgumentParser`: the command line argument parser.
You probably won't need to use it directly. To parse command line
arguments and update the :class:`ConfigurationManager` instance
accordingly, use the :meth:`parse_args` method. | Below is the the instruction that describes the task:
### Input:
Build command line argument parser.
Returns:
:class:`argparse.ArgumentParser`: the command line argument parser.
You probably won't need to use it directly. To parse command line
arguments and update the :class:`ConfigurationManager` instance
accordingly, use the :meth:`parse_args` method.
### Response:
def _build_parser(self):
"""Build command line argument parser.
Returns:
:class:`argparse.ArgumentParser`: the command line argument parser.
You probably won't need to use it directly. To parse command line
arguments and update the :class:`ConfigurationManager` instance
accordingly, use the :meth:`parse_args` method.
"""
main_parser = argparse.ArgumentParser(description=self.common.help,
prefix_chars='-+')
self._add_options_to_parser(self._opt_bare, main_parser)
main_parser.set_defaults(**self.common.defaults)
if self.bare is not None:
main_parser.set_defaults(**self.bare.defaults)
subparsers = main_parser.add_subparsers(dest='loam_sub_name')
for cmd_name, meta in self.subcmds.items():
kwargs = {'prefix_chars': '+-', 'help': meta.help}
dummy_parser = subparsers.add_parser(cmd_name, **kwargs)
self._add_options_to_parser(self._opt_cmds[cmd_name], dummy_parser)
dummy_parser.set_defaults(**meta.defaults)
return main_parser |
def initialize_ui(self):
"""
Initializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))
self.__Port_spinBox_set_ui()
self.__Address_lineEdit_set_ui()
self.__File_Command_lineEdit_set_ui()
self.__Connection_End_lineEdit_set_ui()
self.__add_actions()
# Signals / Slots.
self.Port_spinBox.valueChanged.connect(self.__Port_spinBox__valueChanged)
self.Address_lineEdit.editingFinished.connect(self.__Address_lineEdit__editFinished)
self.File_Command_lineEdit.editingFinished.connect(self.__File_Command_lineEdit__editFinished)
self.Connection_End_lineEdit.editingFinished.connect(self.__Connection_End_lineEdit__editFinished)
self.initialized_ui = True
return True | Initializes the Component ui.
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Initializes the Component ui.
:return: Method success.
:rtype: bool
### Response:
def initialize_ui(self):
"""
Initializes the Component ui.
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Initializing '{0}' Component ui.".format(self.__class__.__name__))
self.__Port_spinBox_set_ui()
self.__Address_lineEdit_set_ui()
self.__File_Command_lineEdit_set_ui()
self.__Connection_End_lineEdit_set_ui()
self.__add_actions()
# Signals / Slots.
self.Port_spinBox.valueChanged.connect(self.__Port_spinBox__valueChanged)
self.Address_lineEdit.editingFinished.connect(self.__Address_lineEdit__editFinished)
self.File_Command_lineEdit.editingFinished.connect(self.__File_Command_lineEdit__editFinished)
self.Connection_End_lineEdit.editingFinished.connect(self.__Connection_End_lineEdit__editFinished)
self.initialized_ui = True
return True |
def get_post(post_id, username, password):
"""
metaWeblog.getPost(post_id, username, password)
=> post structure
"""
user = authenticate(username, password)
site = Site.objects.get_current()
return post_structure(Entry.objects.get(id=post_id, authors=user), site) | metaWeblog.getPost(post_id, username, password)
=> post structure | Below is the the instruction that describes the task:
### Input:
metaWeblog.getPost(post_id, username, password)
=> post structure
### Response:
def get_post(post_id, username, password):
"""
metaWeblog.getPost(post_id, username, password)
=> post structure
"""
user = authenticate(username, password)
site = Site.objects.get_current()
return post_structure(Entry.objects.get(id=post_id, authors=user), site) |
def get_resources(cls):
"""Returns Ext Resources."""
job_controller = JobsController(
directory.get_plugin())
resources = []
resources.append(extensions.ResourceExtension(
Jobs.get_alias(),
job_controller))
return resources | Returns Ext Resources. | Below is the the instruction that describes the task:
### Input:
Returns Ext Resources.
### Response:
def get_resources(cls):
"""Returns Ext Resources."""
job_controller = JobsController(
directory.get_plugin())
resources = []
resources.append(extensions.ResourceExtension(
Jobs.get_alias(),
job_controller))
return resources |
def extended_cigar(aligned_template, aligned_query):
''' Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True
'''
# - Go through each position in the alignment
insertion = []
deletion = []
matches = []
cigar = []
for r_aa, q_aa in zip(aligned_template.lower(), aligned_query.lower()):
gap_ref = r_aa == '-'
gap_que = q_aa == '-'
match = r_aa == q_aa
if matches and not match:
# End match block
cigar.append(":%s"%len(matches))
matches = []
if insertion and not gap_ref:
# End insertion
cigar.append("+%s"%''.join(insertion))
insertion = []
elif deletion and not gap_que:
# End deletion
cigar.append("-%s"%''.join(deletion))
deletion = []
if gap_ref:
if insertion:
# Extend insertion
insertion.append(q_aa)
else:
# Start insertion
insertion = [q_aa]
elif gap_que:
if deletion:
# Extend deletion
deletion.append(r_aa)
else:
# Start deletion
deletion = [r_aa]
elif match:
if matches:
# Extend match block
matches.append(r_aa)
else:
# Start match block
matches = [r_aa]
else:
# Add SNP annotation
cigar.append("*%s%s"%(r_aa, q_aa))
if matches:
cigar.append(":%s"%len(matches))
del matches
if insertion:
# End insertion
cigar.append("+%s"%''.join(insertion))
del insertion
elif deletion:
# End deletion
cigar.append("-%s"%''.join(deletion))
del deletion
return ''.join(cigar) | Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True | Below is the the instruction that describes the task:
### Input:
Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True
### Response:
def extended_cigar(aligned_template, aligned_query):
''' Convert mutation annotations to extended cigar format
https://github.com/lh3/minimap2#the-cs-optional-tag
USAGE:
>>> template = 'CGATCGATAAATAGAGTAG---GAATAGCA'
>>> query = 'CGATCG---AATAGAGTAGGTCGAATtGCA'
>>> extended_cigar(template, query) == ':6-ata:10+gtc:4*at:3'
True
'''
# - Go through each position in the alignment
insertion = []
deletion = []
matches = []
cigar = []
for r_aa, q_aa in zip(aligned_template.lower(), aligned_query.lower()):
gap_ref = r_aa == '-'
gap_que = q_aa == '-'
match = r_aa == q_aa
if matches and not match:
# End match block
cigar.append(":%s"%len(matches))
matches = []
if insertion and not gap_ref:
# End insertion
cigar.append("+%s"%''.join(insertion))
insertion = []
elif deletion and not gap_que:
# End deletion
cigar.append("-%s"%''.join(deletion))
deletion = []
if gap_ref:
if insertion:
# Extend insertion
insertion.append(q_aa)
else:
# Start insertion
insertion = [q_aa]
elif gap_que:
if deletion:
# Extend deletion
deletion.append(r_aa)
else:
# Start deletion
deletion = [r_aa]
elif match:
if matches:
# Extend match block
matches.append(r_aa)
else:
# Start match block
matches = [r_aa]
else:
# Add SNP annotation
cigar.append("*%s%s"%(r_aa, q_aa))
if matches:
cigar.append(":%s"%len(matches))
del matches
if insertion:
# End insertion
cigar.append("+%s"%''.join(insertion))
del insertion
elif deletion:
# End deletion
cigar.append("-%s"%''.join(deletion))
del deletion
return ''.join(cigar) |
def intf_up(self, interface):
'''
Can be called when an interface is put in service.
FIXME: not currently used; more needs to be done to
correctly put a new intf into service.
'''
if interface.name not in self._devinfo:
self._devinfo[interface.name] = interface
if self._devupdown_callback:
self._devupdown_callback(interface, 'up')
else:
raise ValueError("Interface already registered") | Can be called when an interface is put in service.
FIXME: not currently used; more needs to be done to
correctly put a new intf into service. | Below is the the instruction that describes the task:
### Input:
Can be called when an interface is put in service.
FIXME: not currently used; more needs to be done to
correctly put a new intf into service.
### Response:
def intf_up(self, interface):
'''
Can be called when an interface is put in service.
FIXME: not currently used; more needs to be done to
correctly put a new intf into service.
'''
if interface.name not in self._devinfo:
self._devinfo[interface.name] = interface
if self._devupdown_callback:
self._devupdown_callback(interface, 'up')
else:
raise ValueError("Interface already registered") |
def bind(self, *targets):
"""
Tries to bind the PV to one of the supplied targets. Targets are inspected according to
the order in which they are supplied.
:param targets: Objects to inspect from.
:return: BoundPV instance with the PV bound to the target property.
"""
self.property = 'value'
self.meta_data_property = 'meta'
return BoundPV(self,
self._get_target(self.property, *targets),
self._get_target(self.meta_data_property, *targets)) | Tries to bind the PV to one of the supplied targets. Targets are inspected according to
the order in which they are supplied.
:param targets: Objects to inspect from.
:return: BoundPV instance with the PV bound to the target property. | Below is the the instruction that describes the task:
### Input:
Tries to bind the PV to one of the supplied targets. Targets are inspected according to
the order in which they are supplied.
:param targets: Objects to inspect from.
:return: BoundPV instance with the PV bound to the target property.
### Response:
def bind(self, *targets):
"""
Tries to bind the PV to one of the supplied targets. Targets are inspected according to
the order in which they are supplied.
:param targets: Objects to inspect from.
:return: BoundPV instance with the PV bound to the target property.
"""
self.property = 'value'
self.meta_data_property = 'meta'
return BoundPV(self,
self._get_target(self.property, *targets),
self._get_target(self.meta_data_property, *targets)) |
def get_annotation(cls, fn):
"""Find the _schema_annotation attribute for the given function.
This will descend through decorators until it finds something that has
the attribute. If it doesn't find it anywhere, it will return None.
:param func fn: Find the attribute on this function.
:returns: an instance of
:class:`~doctor.resource.ResourceSchemaAnnotation` or
None.
"""
while fn is not None:
if hasattr(fn, '_schema_annotation'):
return fn._schema_annotation
fn = getattr(fn, 'im_func', fn)
closure = getattr(fn, '__closure__', None)
fn = closure[0].cell_contents if closure is not None else None
return None | Find the _schema_annotation attribute for the given function.
This will descend through decorators until it finds something that has
the attribute. If it doesn't find it anywhere, it will return None.
:param func fn: Find the attribute on this function.
:returns: an instance of
:class:`~doctor.resource.ResourceSchemaAnnotation` or
None. | Below is the the instruction that describes the task:
### Input:
Find the _schema_annotation attribute for the given function.
This will descend through decorators until it finds something that has
the attribute. If it doesn't find it anywhere, it will return None.
:param func fn: Find the attribute on this function.
:returns: an instance of
:class:`~doctor.resource.ResourceSchemaAnnotation` or
None.
### Response:
def get_annotation(cls, fn):
"""Find the _schema_annotation attribute for the given function.
This will descend through decorators until it finds something that has
the attribute. If it doesn't find it anywhere, it will return None.
:param func fn: Find the attribute on this function.
:returns: an instance of
:class:`~doctor.resource.ResourceSchemaAnnotation` or
None.
"""
while fn is not None:
if hasattr(fn, '_schema_annotation'):
return fn._schema_annotation
fn = getattr(fn, 'im_func', fn)
closure = getattr(fn, '__closure__', None)
fn = closure[0].cell_contents if closure is not None else None
return None |
def _compute_value(power, wg):
"""Return the weight corresponding to single power."""
if power not in wg:
p1, p2 = power
# y power
if p1 == 0:
yy = wg[(0, -1)]
wg[power] = numpy.power(yy, p2 / 2).sum() / len(yy)
# x power
else:
xx = wg[(-1, 0)]
wg[power] = numpy.power(xx, p1 / 2).sum() / len(xx)
return wg[power] | Return the weight corresponding to single power. | Below is the the instruction that describes the task:
### Input:
Return the weight corresponding to single power.
### Response:
def _compute_value(power, wg):
"""Return the weight corresponding to single power."""
if power not in wg:
p1, p2 = power
# y power
if p1 == 0:
yy = wg[(0, -1)]
wg[power] = numpy.power(yy, p2 / 2).sum() / len(yy)
# x power
else:
xx = wg[(-1, 0)]
wg[power] = numpy.power(xx, p1 / 2).sum() / len(xx)
return wg[power] |
def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
"""
self.tx_statistics = TgnObjectsDict()
for port in self.session.ports.values():
for stream in port.streams.values():
self.tx_statistics[stream] = stream.read_stats()
tpld_statistics = XenaTpldsStats(self.session).read_stats()
self.statistics = TgnObjectsDict()
for stream, stream_stats in self.tx_statistics.items():
self.statistics[stream] = OrderedDict()
self.statistics[stream]['tx'] = stream_stats
self.statistics[stream]['rx'] = OrderedDict()
stream_tpld = stream.get_attribute('ps_tpldid')
for tpld, tpld_stats in tpld_statistics.items():
if tpld.id == stream_tpld:
self.statistics[stream]['rx'][tpld] = tpld_stats
return self.statistics | Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}} | Below is the the instruction that describes the task:
### Input:
Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
### Response:
def read_stats(self):
""" Read current statistics from chassis.
:return: dictionary {stream: {tx: {stat name: stat value}} rx: {tpld: {stat group {stat name: value}}}}
"""
self.tx_statistics = TgnObjectsDict()
for port in self.session.ports.values():
for stream in port.streams.values():
self.tx_statistics[stream] = stream.read_stats()
tpld_statistics = XenaTpldsStats(self.session).read_stats()
self.statistics = TgnObjectsDict()
for stream, stream_stats in self.tx_statistics.items():
self.statistics[stream] = OrderedDict()
self.statistics[stream]['tx'] = stream_stats
self.statistics[stream]['rx'] = OrderedDict()
stream_tpld = stream.get_attribute('ps_tpldid')
for tpld, tpld_stats in tpld_statistics.items():
if tpld.id == stream_tpld:
self.statistics[stream]['rx'][tpld] = tpld_stats
return self.statistics |
def stem(self, word):
"""Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# remove umlauts
word = word.translate(self._umlauts)
# remove plurals
wlen = len(word) - 1
if wlen > 3:
if wlen > 5:
if word[-3:] == 'nen':
return word[:-3]
if wlen > 4:
if word[-2:] in {'en', 'se', 'es', 'er'}:
return word[:-2]
if word[-1] in {'e', 'n', 'r', 's'}:
return word[:-1]
return word | Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier' | Below is the the instruction that describes the task:
### Input:
Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
### Response:
def stem(self, word):
"""Return CLEF German stem.
Parameters
----------
word : str
The word to stem
Returns
-------
str
Word stem
Examples
--------
>>> stmr = CLEFGerman()
>>> stmr.stem('lesen')
'lese'
>>> stmr.stem('graues')
'grau'
>>> stmr.stem('buchstabieren')
'buchstabier'
"""
# lowercase, normalize, and compose
word = normalize('NFC', text_type(word.lower()))
# remove umlauts
word = word.translate(self._umlauts)
# remove plurals
wlen = len(word) - 1
if wlen > 3:
if wlen > 5:
if word[-3:] == 'nen':
return word[:-3]
if wlen > 4:
if word[-2:] in {'en', 'se', 'es', 'er'}:
return word[:-2]
if word[-1] in {'e', 'n', 'r', 's'}:
return word[:-1]
return word |
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions. In addition, it expects the input_batch to have
# dimension 4, so we reshape it accordingly.
gamma_flatten = tf.reshape(self._gamma, shape=(self._num_channels,))
beta_flatten = tf.reshape(self._beta, shape=(self._num_channels,))
flatten_mean = tf.reshape(mean, shape=(self._num_channels,))
flatten_variance = tf.reshape(variance, shape=(self._num_channels,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
input_shape = input_batch.get_shape()
output_shape = [-1] + input_shape.as_list()[1:]
flat_image_size = np.prod(self._image_shape, dtype=np.int32)
if len(self._data_format) == 4:
fusable_data_format = self._data_format
fusable_batch = input_batch
elif self._channel_index == 1 and self._image_shape:
fusable_data_format = "NCHW"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, self._num_channels, 1, flat_image_size))
else:
# The CPU implementation of FusedBatchNorm only supports NHWC tensor
# format for now.
fusable_data_format = "NHWC"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, 1, flat_image_size, self._num_channels))
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": fusable_data_format,
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=None,
variance=None,
is_training=True,
**common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=flatten_mean,
variance=flatten_variance,
is_training=False,
**common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
if len(self._data_format) != 4:
batch_norm_op = tf.reshape(batch_norm_op, output_shape)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance | Creates a fused batch normalization op. | Below is the the instruction that describes the task:
### Input:
Creates a fused batch normalization op.
### Response:
def _fused_batch_norm_op(self, input_batch, mean, variance, use_batch_stats):
"""Creates a fused batch normalization op."""
# Store the original shape of the mean and variance.
mean_shape = mean.get_shape()
variance_shape = variance.get_shape()
# The fused batch norm expects the mean, variance, gamma and beta
# tensors to have dimension 1, so we flatten them to remove the
# extra dimensions. In addition, it expects the input_batch to have
# dimension 4, so we reshape it accordingly.
gamma_flatten = tf.reshape(self._gamma, shape=(self._num_channels,))
beta_flatten = tf.reshape(self._beta, shape=(self._num_channels,))
flatten_mean = tf.reshape(mean, shape=(self._num_channels,))
flatten_variance = tf.reshape(variance, shape=(self._num_channels,))
use_batch_stats = tf.convert_to_tensor(use_batch_stats)
input_shape = input_batch.get_shape()
output_shape = [-1] + input_shape.as_list()[1:]
flat_image_size = np.prod(self._image_shape, dtype=np.int32)
if len(self._data_format) == 4:
fusable_data_format = self._data_format
fusable_batch = input_batch
elif self._channel_index == 1 and self._image_shape:
fusable_data_format = "NCHW"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, self._num_channels, 1, flat_image_size))
else:
# The CPU implementation of FusedBatchNorm only supports NHWC tensor
# format for now.
fusable_data_format = "NHWC"
fusable_batch = tf.reshape(
input_batch,
shape=(-1, 1, flat_image_size, self._num_channels))
common_args = {
"scale": gamma_flatten,
"offset": beta_flatten,
"epsilon": self._eps,
"data_format": fusable_data_format,
"name": "batch_norm"
}
def use_batch_stats_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=None,
variance=None,
is_training=True,
**common_args)
def moving_average_fused_batch_norm():
return tf.nn.fused_batch_norm(
fusable_batch,
mean=flatten_mean,
variance=flatten_variance,
is_training=False,
**common_args)
batch_norm_op, mean, variance = utils.smart_cond(
use_batch_stats, use_batch_stats_fused_batch_norm,
moving_average_fused_batch_norm)
if len(self._data_format) != 4:
batch_norm_op = tf.reshape(batch_norm_op, output_shape)
mean = tf.reshape(mean, mean_shape)
variance = tf.reshape(variance, variance_shape)
return batch_norm_op, mean, variance |
def _get_param_types_maxint(params):
"""
Returns characteristics of parameters
:param params: dictionary of pairs
it must have parameter_name:list of possible values:
params = {"kernel": ["rbf"],
"C" : [1,2,3,4,5,6,7,8],
"gamma" : np.logspace(-9, 9, num=25, base=10)}
:return: name_values pairs - list of (name,possible_values) tuples for each parameter
types - list of types for each parameter
maxints - list of maximum integer for each particular gene in chromosome
"""
name_values = list(params.items())
types = []
for _, possible_values in name_values:
if isinstance(possible_values[0], float):
types.append(param_types.Numerical)
else:
types.append(param_types.Categorical)
maxints = [len(possible_values) - 1 for _, possible_values in name_values]
return name_values, types, maxints | Returns characteristics of parameters
:param params: dictionary of pairs
it must have parameter_name:list of possible values:
params = {"kernel": ["rbf"],
"C" : [1,2,3,4,5,6,7,8],
"gamma" : np.logspace(-9, 9, num=25, base=10)}
:return: name_values pairs - list of (name,possible_values) tuples for each parameter
types - list of types for each parameter
maxints - list of maximum integer for each particular gene in chromosome | Below is the the instruction that describes the task:
### Input:
Returns characteristics of parameters
:param params: dictionary of pairs
it must have parameter_name:list of possible values:
params = {"kernel": ["rbf"],
"C" : [1,2,3,4,5,6,7,8],
"gamma" : np.logspace(-9, 9, num=25, base=10)}
:return: name_values pairs - list of (name,possible_values) tuples for each parameter
types - list of types for each parameter
maxints - list of maximum integer for each particular gene in chromosome
### Response:
def _get_param_types_maxint(params):
"""
Returns characteristics of parameters
:param params: dictionary of pairs
it must have parameter_name:list of possible values:
params = {"kernel": ["rbf"],
"C" : [1,2,3,4,5,6,7,8],
"gamma" : np.logspace(-9, 9, num=25, base=10)}
:return: name_values pairs - list of (name,possible_values) tuples for each parameter
types - list of types for each parameter
maxints - list of maximum integer for each particular gene in chromosome
"""
name_values = list(params.items())
types = []
for _, possible_values in name_values:
if isinstance(possible_values[0], float):
types.append(param_types.Numerical)
else:
types.append(param_types.Categorical)
maxints = [len(possible_values) - 1 for _, possible_values in name_values]
return name_values, types, maxints |
def get_minkowski_red(structure):
"""
Get a minkowski reduced structure
"""
output = run_aconvasp_command(["aconvasp", "--kpath"], structure)
started = False
poscar_string = ""
if "ERROR" in output[1]:
raise AconvaspError(output[1])
for line in output[0].split("\n"):
if started or line.find("KPOINTS TO RUN") != -1:
poscar_string = poscar_string + line + "\n"
if line.find("STRUCTURE TO RUN") != -1:
started = True
if line.find("KPOINTS TO RUN") != -1:
started = False
return Poscar.from_string(poscar_string).structure | Get a minkowski reduced structure | Below is the the instruction that describes the task:
### Input:
Get a minkowski reduced structure
### Response:
def get_minkowski_red(structure):
"""
Get a minkowski reduced structure
"""
output = run_aconvasp_command(["aconvasp", "--kpath"], structure)
started = False
poscar_string = ""
if "ERROR" in output[1]:
raise AconvaspError(output[1])
for line in output[0].split("\n"):
if started or line.find("KPOINTS TO RUN") != -1:
poscar_string = poscar_string + line + "\n"
if line.find("STRUCTURE TO RUN") != -1:
started = True
if line.find("KPOINTS TO RUN") != -1:
started = False
return Poscar.from_string(poscar_string).structure |
def _set_clock_foreign_masters(self, v, load=False):
"""
Setter method for clock_foreign_masters, mapped from YANG variable /ptp_state/clock_foreign_masters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_clock_foreign_masters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clock_foreign_masters() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=clock_foreign_masters.clock_foreign_masters, is_container='container', presence=False, yang_name="clock-foreign-masters", rest_name="clock-foreign-masters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-clock-foreign-masters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clock_foreign_masters must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=clock_foreign_masters.clock_foreign_masters, is_container='container', presence=False, yang_name="clock-foreign-masters", rest_name="clock-foreign-masters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-clock-foreign-masters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)""",
})
self.__clock_foreign_masters = t
if hasattr(self, '_set'):
self._set() | Setter method for clock_foreign_masters, mapped from YANG variable /ptp_state/clock_foreign_masters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_clock_foreign_masters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clock_foreign_masters() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for clock_foreign_masters, mapped from YANG variable /ptp_state/clock_foreign_masters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_clock_foreign_masters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clock_foreign_masters() directly.
### Response:
def _set_clock_foreign_masters(self, v, load=False):
"""
Setter method for clock_foreign_masters, mapped from YANG variable /ptp_state/clock_foreign_masters (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_clock_foreign_masters is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_clock_foreign_masters() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=clock_foreign_masters.clock_foreign_masters, is_container='container', presence=False, yang_name="clock-foreign-masters", rest_name="clock-foreign-masters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-clock-foreign-masters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """clock_foreign_masters must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=clock_foreign_masters.clock_foreign_masters, is_container='container', presence=False, yang_name="clock-foreign-masters", rest_name="clock-foreign-masters", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-clock-foreign-masters', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)""",
})
self.__clock_foreign_masters = t
if hasattr(self, '_set'):
self._set() |
def predict_w(self, data3d, voxelsize_mm, weight, label0=0, label1=1):
"""
segmentation with weight factor
:param data3d:
:param voxelsize_mm:
:param weight:
:return:
"""
scores = self.scores(data3d, voxelsize_mm)
out = scores[label1] > (weight * scores[label0])
return out | segmentation with weight factor
:param data3d:
:param voxelsize_mm:
:param weight:
:return: | Below is the the instruction that describes the task:
### Input:
segmentation with weight factor
:param data3d:
:param voxelsize_mm:
:param weight:
:return:
### Response:
def predict_w(self, data3d, voxelsize_mm, weight, label0=0, label1=1):
"""
segmentation with weight factor
:param data3d:
:param voxelsize_mm:
:param weight:
:return:
"""
scores = self.scores(data3d, voxelsize_mm)
out = scores[label1] > (weight * scores[label0])
return out |
def transform_system(principal_vec, principal_default, other_vecs,
matrix=None):
"""Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``.
"""
transformed_vecs = []
principal_vec = np.asarray(principal_vec, dtype=float)
ndim = principal_vec.shape[0]
if matrix is None:
# Separate into dilation and rotation. The dilation is only used
# for comparison, not in the final matrix.
principal_default = np.asarray(principal_default, dtype=float)
pr_norm = np.linalg.norm(principal_vec)
pr_default_norm = np.linalg.norm(principal_default)
if pr_default_norm == 0.0 and pr_norm != 0.0:
raise ValueError('no transformation from {} to {}'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm != 0.0:
raise ValueError('transformation from {} to {} is singular'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm == 0.0:
dilation = 1.0
else:
dilation = (np.linalg.norm(principal_vec) /
np.linalg.norm(principal_default))
# Determine the rotation part
if np.allclose(principal_vec, dilation * principal_default):
# Dilation only
matrix = np.eye(ndim)
else:
matrix = rotation_matrix_from_to(principal_default, principal_vec)
# This one goes straight in
transformed_vecs.append(principal_vec)
else:
matrix = np.asarray(matrix, dtype=float)
if matrix.shape != (ndim, ndim):
raise ValueError('matrix shape must be {}, got {}'
''.format((ndim, ndim), matrix.shape))
# Check matrix condition
svals = np.linalg.svd(matrix, compute_uv=False)
condition = np.inf if 0.0 in svals else svals[0] / svals[-1]
if condition > 1e6:
raise np.linalg.LinAlgError(
'matrix is badly conditioned: condition number is {}'
''.format(condition))
transformed_vecs.append(matrix.dot(principal_vec))
for vec in other_vecs:
if vec is None:
transformed_vecs.append(None)
else:
transformed_vecs.append(matrix.dot(vec))
return tuple(transformed_vecs) | Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``. | Below is the the instruction that describes the task:
### Input:
Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``.
### Response:
def transform_system(principal_vec, principal_default, other_vecs,
matrix=None):
"""Transform vectors with either ``matrix`` or based on ``principal_vec``.
The logic of this function is as follows:
- If ``matrix`` is not ``None``, transform ``principal_vec`` and
all vectors in ``other_vecs`` by ``matrix``, ignoring
``principal_default``.
- If ``matrix`` is ``None``, compute the rotation matrix from
``principal_default`` to ``principal_vec``, not including the
dilation. Apply that rotation to all vectors in ``other_vecs``.
**Note:** All vectors must have the same shape and match the shape
of ``matrix`` if given.
Parameters
----------
principal_vec : `array-like`, shape ``(ndim,)``
Vector that defines the transformation if ``matrix`` is not
provided.
principal_default : `array-like`, shape ``(ndim,)``
Default value for ``principal_vec``. The deviation from this
determines the transformation.
If ``matrix`` is given, this has no effect.
other_vecs : sequence of ``None`` or `array-like`'s with shape ``(ndim,)``
The other vectors that should be transformed. ``None`` entries
are just appended as-is.
matrix : `array-like`, shape ``(ndim, ndim)``, optional
Explicit transformation matrix to be applied to the vectors.
It is allowed to include a constant scaling but shouldn't have
strongly varying directional scaling (bad condition).
Returns
-------
transformed_vecs : tuple of `numpy.ndarray`, shape ``(ndim,)``
The transformed vectors. The first entry is (the transformed)
``principal_vec``, followed by the transformed ``other_vecs``.
Thus the length of the tuple is ``len(other_vecs) + 1``.
"""
transformed_vecs = []
principal_vec = np.asarray(principal_vec, dtype=float)
ndim = principal_vec.shape[0]
if matrix is None:
# Separate into dilation and rotation. The dilation is only used
# for comparison, not in the final matrix.
principal_default = np.asarray(principal_default, dtype=float)
pr_norm = np.linalg.norm(principal_vec)
pr_default_norm = np.linalg.norm(principal_default)
if pr_default_norm == 0.0 and pr_norm != 0.0:
raise ValueError('no transformation from {} to {}'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm != 0.0:
raise ValueError('transformation from {} to {} is singular'
''.format(principal_default, principal_vec))
elif pr_norm == 0.0 and pr_default_norm == 0.0:
dilation = 1.0
else:
dilation = (np.linalg.norm(principal_vec) /
np.linalg.norm(principal_default))
# Determine the rotation part
if np.allclose(principal_vec, dilation * principal_default):
# Dilation only
matrix = np.eye(ndim)
else:
matrix = rotation_matrix_from_to(principal_default, principal_vec)
# This one goes straight in
transformed_vecs.append(principal_vec)
else:
matrix = np.asarray(matrix, dtype=float)
if matrix.shape != (ndim, ndim):
raise ValueError('matrix shape must be {}, got {}'
''.format((ndim, ndim), matrix.shape))
# Check matrix condition
svals = np.linalg.svd(matrix, compute_uv=False)
condition = np.inf if 0.0 in svals else svals[0] / svals[-1]
if condition > 1e6:
raise np.linalg.LinAlgError(
'matrix is badly conditioned: condition number is {}'
''.format(condition))
transformed_vecs.append(matrix.dot(principal_vec))
for vec in other_vecs:
if vec is None:
transformed_vecs.append(None)
else:
transformed_vecs.append(matrix.dot(vec))
return tuple(transformed_vecs) |
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):
"""Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch
"""
if len_filter is not None:
trX, trY = len_filter.filter(trX, trY)
trY = standardize_targets(trY, cost=self.cost)
n = 0.
t = time()
costs = []
for e in range(n_epochs):
epoch_costs = []
for xmb, ymb in self.iterator.iterXY(trX, trY):
c = self._train(xmb, ymb)
epoch_costs.append(c)
n += len(ymb)
if self.verbose >= 2:
n_per_sec = n / (time() - t)
n_left = len(trY) - n % len(trY)
time_left = n_left/n_per_sec
sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left))
sys.stdout.flush()
costs.extend(epoch_costs)
status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t)
if self.verbose >= 2:
sys.stdout.write("\r"+status)
sys.stdout.flush()
sys.stdout.write("\n")
elif self.verbose == 1:
print(status)
if path and e % snapshot_freq == 0:
save(self, "{0}.{1}".format(path, e))
return costs | Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch | Below is the the instruction that describes the task:
### Input:
Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch
### Response:
def fit(self, trX, trY, batch_size=64, n_epochs=1, len_filter=LenFilter(), snapshot_freq=1, path=None):
"""Train model on given training examples and return the list of costs after each minibatch is processed.
Args:
trX (list) -- Inputs
trY (list) -- Outputs
batch_size (int, optional) -- number of examples in a minibatch (default 64)
n_epochs (int, optional) -- number of epochs to train for (default 1)
len_filter (object, optional) -- object to filter training example by length (default LenFilter())
snapshot_freq (int, optional) -- number of epochs between saving model snapshots (default 1)
path (str, optional) -- prefix of path where model snapshots are saved.
If None, no snapshots are saved (default None)
Returns:
list -- costs of model after processing each minibatch
"""
if len_filter is not None:
trX, trY = len_filter.filter(trX, trY)
trY = standardize_targets(trY, cost=self.cost)
n = 0.
t = time()
costs = []
for e in range(n_epochs):
epoch_costs = []
for xmb, ymb in self.iterator.iterXY(trX, trY):
c = self._train(xmb, ymb)
epoch_costs.append(c)
n += len(ymb)
if self.verbose >= 2:
n_per_sec = n / (time() - t)
n_left = len(trY) - n % len(trY)
time_left = n_left/n_per_sec
sys.stdout.write("\rEpoch %d Seen %d samples Avg cost %0.4f Time left %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time_left))
sys.stdout.flush()
costs.extend(epoch_costs)
status = "Epoch %d Seen %d samples Avg cost %0.4f Time elapsed %d seconds" % (e, n, np.mean(epoch_costs[-250:]), time() - t)
if self.verbose >= 2:
sys.stdout.write("\r"+status)
sys.stdout.flush()
sys.stdout.write("\n")
elif self.verbose == 1:
print(status)
if path and e % snapshot_freq == 0:
save(self, "{0}.{1}".format(path, e))
return costs |
def open_icmp_firewall(host):
"""Temporarily open the ICMP firewall. Tricks Windows into allowing
ICMP packets for a short period of time (~ 1 minute)"""
# We call ping with a timeout of 1ms: will return instantly
with open(os.devnull, 'wb') as DEVNULL:
return subprocess.Popen("ping -4 -w 1 -n 1 %s" % host,
shell=True,
stdout=DEVNULL,
stderr=DEVNULL).wait() | Temporarily open the ICMP firewall. Tricks Windows into allowing
ICMP packets for a short period of time (~ 1 minute) | Below is the the instruction that describes the task:
### Input:
Temporarily open the ICMP firewall. Tricks Windows into allowing
ICMP packets for a short period of time (~ 1 minute)
### Response:
def open_icmp_firewall(host):
"""Temporarily open the ICMP firewall. Tricks Windows into allowing
ICMP packets for a short period of time (~ 1 minute)"""
# We call ping with a timeout of 1ms: will return instantly
with open(os.devnull, 'wb') as DEVNULL:
return subprocess.Popen("ping -4 -w 1 -n 1 %s" % host,
shell=True,
stdout=DEVNULL,
stderr=DEVNULL).wait() |
def normalize_jr(jr, url=None):
""" normalize JSON reference, also fix
implicit reference of JSON pointer.
input:
- #/definitions/User
- http://test.com/swagger.json#/definitions/User
output:
- http://test.com/swagger.json#/definitions/User
input:
- some_folder/User.json
output:
- http://test.com/some_folder/User.json
"""
if jr == None:
return jr
idx = jr.find('#')
path, jp = (jr[:idx], jr[idx+1:]) if idx != -1 else (jr, None)
if len(path) > 0:
p = six.moves.urllib.parse.urlparse(path)
if p.scheme == '' and url:
p = six.moves.urllib.parse.urlparse(url)
# it's the path of relative file
path = six.moves.urllib.parse.urlunparse(p[:2]+('/'.join([os.path.dirname(p.path), path]),)+p[3:])
path = derelativise_url(path)
else:
path = url
if path:
return ''.join([path, '#', jp]) if jp else path
else:
return '#' + jp | normalize JSON reference, also fix
implicit reference of JSON pointer.
input:
- #/definitions/User
- http://test.com/swagger.json#/definitions/User
output:
- http://test.com/swagger.json#/definitions/User
input:
- some_folder/User.json
output:
- http://test.com/some_folder/User.json | Below is the the instruction that describes the task:
### Input:
normalize JSON reference, also fix
implicit reference of JSON pointer.
input:
- #/definitions/User
- http://test.com/swagger.json#/definitions/User
output:
- http://test.com/swagger.json#/definitions/User
input:
- some_folder/User.json
output:
- http://test.com/some_folder/User.json
### Response:
def normalize_jr(jr, url=None):
""" normalize JSON reference, also fix
implicit reference of JSON pointer.
input:
- #/definitions/User
- http://test.com/swagger.json#/definitions/User
output:
- http://test.com/swagger.json#/definitions/User
input:
- some_folder/User.json
output:
- http://test.com/some_folder/User.json
"""
if jr == None:
return jr
idx = jr.find('#')
path, jp = (jr[:idx], jr[idx+1:]) if idx != -1 else (jr, None)
if len(path) > 0:
p = six.moves.urllib.parse.urlparse(path)
if p.scheme == '' and url:
p = six.moves.urllib.parse.urlparse(url)
# it's the path of relative file
path = six.moves.urllib.parse.urlunparse(p[:2]+('/'.join([os.path.dirname(p.path), path]),)+p[3:])
path = derelativise_url(path)
else:
path = url
if path:
return ''.join([path, '#', jp]) if jp else path
else:
return '#' + jp |
def extend(self, iterable):
"""
Add each item from iterable to the end of the list
"""
with self.lock:
for item in iterable:
self.append(item) | Add each item from iterable to the end of the list | Below is the the instruction that describes the task:
### Input:
Add each item from iterable to the end of the list
### Response:
def extend(self, iterable):
"""
Add each item from iterable to the end of the list
"""
with self.lock:
for item in iterable:
self.append(item) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.