code
stringlengths 64
7.01k
| docstring
stringlengths 2
15.8k
| text
stringlengths 144
19.2k
|
|---|---|---|
#vtb
def increment(version):
release_version = os.environ.get("RELEASE_VERSION", None)
if release_version is not None:
return release_version
if isinstance(version, LegacyVersion):
msg =
raise Exception(msg.format(version))
release_type = os.environ.get("RELEASE_TYPE", "micro")
v = version._version
epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!")
pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre")
post_name, post = VersionUtils.get_version_number(v, 4, None, "post")
dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev")
_, major = VersionUtils.get_version_number(v[1], 0, 0)
_, minor = VersionUtils.get_version_number(v[1], 1, None)
_, micro = VersionUtils.get_version_number(v[1], 2, None)
if release_type == "pre":
micro, post, pre = VersionUtils.process_pre(micro, post, pre)
if release_type == "post":
dev, post = VersionUtils.process_post(dev, post)
if release_type == "dev":
dev = VersionUtils.process_dev(dev)
if release_type == "micro":
dev, micro, minor, post, pre = VersionUtils.process_micro(
dev, micro, minor, post, pre
)
if release_type == "minor":
dev, micro, minor, post, pre = VersionUtils.process_minor(
dev, micro, minor, post, pre
)
if release_type == "major":
dev, major, micro, minor, post, pre = VersionUtils.process_major(
dev, major, micro, minor, post, pre
)
if release_type == "epoch":
dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(
dev, epoch, major, micro, minor, post, pre
)
local = "".join(v[5] or []) or None
version_list = [major, minor, micro]
if release_type not in ["epoch", "major", "minor", "micro", "pre"]:
version_list += list(v[1][3:])
version_string = ".".join([str(x) for x in version_list if x or x == 0])
if epoch:
version_string = str(epoch) + epoch_name + version_string
if pre is not None:
version_string = VersionUtils.calc_pre_version_string(
pre, pre_name, version_string
)
if post is not None:
version_string += "." + post_name + str(post)
if dev is not None:
version_string += "." + dev_name + str(dev)
if local is not None:
version_string += "." + str(local)
return version_string
|
Return an incremented version string.
|
### Input:
Return an incremented version string.
### Response:
#vtb
def increment(version):
release_version = os.environ.get("RELEASE_VERSION", None)
if release_version is not None:
return release_version
if isinstance(version, LegacyVersion):
msg =
raise Exception(msg.format(version))
release_type = os.environ.get("RELEASE_TYPE", "micro")
v = version._version
epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, "!")
pre_name, pre = VersionUtils.get_version_number(v, 3, None, "pre")
post_name, post = VersionUtils.get_version_number(v, 4, None, "post")
dev_name, dev = VersionUtils.get_version_number(v, 2, None, "dev")
_, major = VersionUtils.get_version_number(v[1], 0, 0)
_, minor = VersionUtils.get_version_number(v[1], 1, None)
_, micro = VersionUtils.get_version_number(v[1], 2, None)
if release_type == "pre":
micro, post, pre = VersionUtils.process_pre(micro, post, pre)
if release_type == "post":
dev, post = VersionUtils.process_post(dev, post)
if release_type == "dev":
dev = VersionUtils.process_dev(dev)
if release_type == "micro":
dev, micro, minor, post, pre = VersionUtils.process_micro(
dev, micro, minor, post, pre
)
if release_type == "minor":
dev, micro, minor, post, pre = VersionUtils.process_minor(
dev, micro, minor, post, pre
)
if release_type == "major":
dev, major, micro, minor, post, pre = VersionUtils.process_major(
dev, major, micro, minor, post, pre
)
if release_type == "epoch":
dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(
dev, epoch, major, micro, minor, post, pre
)
local = "".join(v[5] or []) or None
version_list = [major, minor, micro]
if release_type not in ["epoch", "major", "minor", "micro", "pre"]:
version_list += list(v[1][3:])
version_string = ".".join([str(x) for x in version_list if x or x == 0])
if epoch:
version_string = str(epoch) + epoch_name + version_string
if pre is not None:
version_string = VersionUtils.calc_pre_version_string(
pre, pre_name, version_string
)
if post is not None:
version_string += "." + post_name + str(post)
if dev is not None:
version_string += "." + dev_name + str(dev)
if local is not None:
version_string += "." + str(local)
return version_string
|
#vtb
def convert_batchnorm(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
spatial=0
)
return [bn_node]
|
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
|
### Input:
Map MXNet's BatchNorm operator attributes to onnx's BatchNormalization operator
and return the created node.
### Response:
#vtb
def convert_batchnorm(node, **kwargs):
name, input_nodes, attrs = get_inputs(node, kwargs)
momentum = float(attrs.get("momentum", 0.9))
eps = float(attrs.get("eps", 0.001))
bn_node = onnx.helper.make_node(
"BatchNormalization",
input_nodes,
[name],
name=name,
epsilon=eps,
momentum=momentum,
spatial=0
)
return [bn_node]
|
#vtb
def discover_all_plugins(self):
for v in pkg_resources.iter_entry_points():
m = v.load()
m.setup(self)
|
Load all plugins from dgit extension
|
### Input:
Load all plugins from dgit extension
### Response:
#vtb
def discover_all_plugins(self):
for v in pkg_resources.iter_entry_points():
m = v.load()
m.setup(self)
|
#vtb
def align_unaligned_seqs(seqs, moltype=DNA, params=None):
if not params:
params = {}
seq_collection = SequenceCollection(seqs,MolType=moltype)
int_map, int_keys = seq_collection.getIntMap()
int_map = SequenceCollection(int_map,MolType=moltype)
params.update({:get_tmp_filename()})
app = Muscle(InputHandler=,\
params=params, WorkingDir=tempfile.gettempdir())
res = app(int_map.toFasta())
alignment = dict(parse_fasta(res[]))
new_alignment = {}
for k,v in alignment.items():
new_alignment[int_keys[k]]=v
new_alignment = Alignment(new_alignment,MolType=moltype)
res.cleanUp()
del(seq_collection,int_map,int_keys,app,res,alignment,params)
return new_alignment
|
Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object.
|
### Input:
Returns an Alignment object from seqs.
seqs: SequenceCollection object, or data that can be used to build one.
moltype: a MolType object. DNA, RNA, or PROTEIN.
params: dict of parameters to pass in to the Muscle app controller.
Result will be an Alignment object.
### Response:
#vtb
def align_unaligned_seqs(seqs, moltype=DNA, params=None):
if not params:
params = {}
seq_collection = SequenceCollection(seqs,MolType=moltype)
int_map, int_keys = seq_collection.getIntMap()
int_map = SequenceCollection(int_map,MolType=moltype)
params.update({:get_tmp_filename()})
app = Muscle(InputHandler=,\
params=params, WorkingDir=tempfile.gettempdir())
res = app(int_map.toFasta())
alignment = dict(parse_fasta(res[]))
new_alignment = {}
for k,v in alignment.items():
new_alignment[int_keys[k]]=v
new_alignment = Alignment(new_alignment,MolType=moltype)
res.cleanUp()
del(seq_collection,int_map,int_keys,app,res,alignment,params)
return new_alignment
|
#vtb
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
|
Dilated 1d self attention.
|
### Input:
Dilated 1d self attention.
### Response:
#vtb
def dilated_attention_1d(x,
hparams,
attention_type="masked_dilated_1d",
q_padding="VALID",
kv_padding="VALID",
gap_size=2):
x, x_shape, is_4d = maybe_reshape_4d_to_3d(x)
with tf.variable_scope("masked_dilated_1d"):
y = common_attention.multihead_attention(
x,
None,
None,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=attention_type,
block_width=hparams.block_width,
block_length=hparams.block_length,
q_padding=q_padding,
kv_padding=kv_padding,
q_filter_width=hparams.q_filter_width,
kv_filter_width=hparams.kv_filter_width,
gap_size=gap_size,
num_memory_blocks=hparams.num_memory_blocks,
name="self_attention")
if is_4d:
y = tf.reshape(y, x_shape)
y.set_shape([None, None, None, hparams.hidden_size])
return y
|
#vtb
def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
dependencies = self.getDependenciesRecursive(
target = target,
test = test_dependencies
)
return (name in dependencies)
|
Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
|
### Input:
Check if this module, or any of its dependencies, have a
dependencies with the specified name in their dependencies, or in
their targetDependencies corresponding to the specified target.
Note that if recursive dependencies are not installed, this test
may return a false-negative.
### Response:
#vtb
def hasDependencyRecursively(self, name, target=None, test_dependencies=False):
dependencies = self.getDependenciesRecursive(
target = target,
test = test_dependencies
)
return (name in dependencies)
|
#vtb
def raw_conf_process_pyramid(raw_conf):
return BufferedTilePyramid(
raw_conf["pyramid"]["grid"],
metatiling=raw_conf["pyramid"].get("metatiling", 1),
pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0)
)
|
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
|
### Input:
Loads the process pyramid of a raw configuration.
Parameters
----------
raw_conf : dict
Raw mapchete configuration as dictionary.
Returns
-------
BufferedTilePyramid
### Response:
#vtb
def raw_conf_process_pyramid(raw_conf):
return BufferedTilePyramid(
raw_conf["pyramid"]["grid"],
metatiling=raw_conf["pyramid"].get("metatiling", 1),
pixelbuffer=raw_conf["pyramid"].get("pixelbuffer", 0)
)
|
#vtb
def tag(collector, image, artifact, **kwargs):
if artifact in (None, "", NotSpecified):
raise BadOption("Please specify a tag using the artifact option")
if image.image_index in (None, "", NotSpecified):
raise BadOption("Please specify an image with an image_index option")
tag = image.image_name
if collector.configuration["harpoon"].tag is not NotSpecified:
tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag)
else:
tag = "{0}:latest".format(tag)
images = image.harpoon.docker_api.images()
current_tags = chain.from_iterable(image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None)
if tag not in current_tags:
raise BadOption("Please build or pull the image down to your local cache before tagging it")
for image_conf in images:
if image_conf["RepoTags"] is not None:
if tag in image_conf["RepoTags"]:
image_id = image_conf["Id"]
break
log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact))
image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True)
image.tag = artifact
Syncer().push(image)
|
Tag an image!
|
### Input:
Tag an image!
### Response:
#vtb
def tag(collector, image, artifact, **kwargs):
if artifact in (None, "", NotSpecified):
raise BadOption("Please specify a tag using the artifact option")
if image.image_index in (None, "", NotSpecified):
raise BadOption("Please specify an image with an image_index option")
tag = image.image_name
if collector.configuration["harpoon"].tag is not NotSpecified:
tag = "{0}:{1}".format(tag, collector.configuration["harpoon"].tag)
else:
tag = "{0}:latest".format(tag)
images = image.harpoon.docker_api.images()
current_tags = chain.from_iterable(image_conf["RepoTags"] for image_conf in images if image_conf["RepoTags"] is not None)
if tag not in current_tags:
raise BadOption("Please build or pull the image down to your local cache before tagging it")
for image_conf in images:
if image_conf["RepoTags"] is not None:
if tag in image_conf["RepoTags"]:
image_id = image_conf["Id"]
break
log.info("Tagging {0} ({1}) as {2}".format(image_id, image.image_name, artifact))
image.harpoon.docker_api.tag(image_id, repository=image.image_name, tag=artifact, force=True)
image.tag = artifact
Syncer().push(image)
|
#vtb
def get(self, name, default=None):
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value
|
Return the value of the requested parameter or `default` if None.
|
### Input:
Return the value of the requested parameter or `default` if None.
### Response:
#vtb
def get(self, name, default=None):
value = self.parameters.get(name)
self._processed_parameters.append(name)
if value is None:
return default
return value
|
#vtb
def _zeropad(sig, N, axis=0):
sig = np.moveaxis(sig, axis, 0)
out = np.zeros((sig.shape[0] + N,) + sig.shape[1:])
out[:sig.shape[0], ...] = sig
out = np.moveaxis(out, 0, axis)
return out
|
pads with N zeros at the end of the signal, along given axis
|
### Input:
pads with N zeros at the end of the signal, along given axis
### Response:
#vtb
def _zeropad(sig, N, axis=0):
sig = np.moveaxis(sig, axis, 0)
out = np.zeros((sig.shape[0] + N,) + sig.shape[1:])
out[:sig.shape[0], ...] = sig
out = np.moveaxis(out, 0, axis)
return out
|
#vtb
def instantiate(repo, validator_name=None, filename=None, rulesfiles=None):
default_validators = repo.options.get(, {})
validators = {}
if validator_name is not None:
if validator_name in default_validators:
validators = {
validator_name : default_validators[validator_name]
}
else:
validators = {
validator_name : {
: [],
: {},
: []
}
}
else:
validators = default_validators
if filename is not None:
matching_files = repo.find_matching_files([filename])
if len(matching_files) == 0:
print("Filename could not be found", filename)
raise Exception("Invalid filename pattern")
for v in validators:
validators[v][] = matching_files
else:
for v in validators:
if not in validators[v]:
validators[v][] = []
elif len(validators[v][]) > 0:
matching_files = repo.find_matching_files(validators[v][])
validators[v][] = matching_files
if rulesfiles is not None:
matching_files = repo.find_matching_files([rulesfiles])
if len(matching_files) == 0:
print("Could not find matching rules files ({}) for {}".format(rulesfiles,v))
raise Exception("Invalid rules")
for v in validators:
validators[v][] = matching_files
else:
for v in validators:
if not in validators[v]:
validators[v][] = []
else:
rulesfiles = validators[v][]
matching_files = repo.find_matching_files(rulesfiles)
validators[v][] = matching_files
return validators
|
Instantiate the validation specification
|
### Input:
Instantiate the validation specification
### Response:
#vtb
def instantiate(repo, validator_name=None, filename=None, rulesfiles=None):
default_validators = repo.options.get(, {})
validators = {}
if validator_name is not None:
if validator_name in default_validators:
validators = {
validator_name : default_validators[validator_name]
}
else:
validators = {
validator_name : {
: [],
: {},
: []
}
}
else:
validators = default_validators
if filename is not None:
matching_files = repo.find_matching_files([filename])
if len(matching_files) == 0:
print("Filename could not be found", filename)
raise Exception("Invalid filename pattern")
for v in validators:
validators[v][] = matching_files
else:
for v in validators:
if not in validators[v]:
validators[v][] = []
elif len(validators[v][]) > 0:
matching_files = repo.find_matching_files(validators[v][])
validators[v][] = matching_files
if rulesfiles is not None:
matching_files = repo.find_matching_files([rulesfiles])
if len(matching_files) == 0:
print("Could not find matching rules files ({}) for {}".format(rulesfiles,v))
raise Exception("Invalid rules")
for v in validators:
validators[v][] = matching_files
else:
for v in validators:
if not in validators[v]:
validators[v][] = []
else:
rulesfiles = validators[v][]
matching_files = repo.find_matching_files(rulesfiles)
validators[v][] = matching_files
return validators
|
#vtb
def erosion(mapfile, dilated):
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "
style["outlinecolor"] = "
|
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
|
### Input:
We will continue to work with the modified Mapfile
If we wanted to start from scratch we could simply reread it
### Response:
#vtb
def erosion(mapfile, dilated):
ll = mappyfile.find(mapfile["layers"], "name", "line")
ll["status"] = "OFF"
pl = mappyfile.find(mapfile["layers"], "name", "polygon")
pl2 = deepcopy(pl)
pl2["name"] = "newpolygon"
mapfile["layers"].append(pl2)
dilated = dilated.buffer(-0.3)
pl2["features"][0]["wkt"] = dilated.wkt
style = pl["classes"][0]["styles"][0]
style["color"] = "
style["outlinecolor"] = "
|
#vtb
def set_emission_scenario_setup(self, scenario, config_dict):
self.write(scenario, self._scen_file_name)
config_dict["file_emissionscenario"] = self._scen_file_name
config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
return config_dict
|
Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
|
### Input:
Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
### Response:
#vtb
def set_emission_scenario_setup(self, scenario, config_dict):
self.write(scenario, self._scen_file_name)
config_dict["file_emissionscenario"] = self._scen_file_name
config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
return config_dict
|
#vtb
def _make_child_iterator(node, with_links, current_depth=0):
cdp1 = current_depth + 1
if with_links:
iterator = ((cdp1, x[0], x[1]) for x in node._children.items())
else:
leaves = ((cdp1, x[0], x[1]) for x in node._leaves.items())
groups = ((cdp1, y[0], y[1]) for y in node._groups.items())
iterator = itools.chain(groups, leaves)
return iterator
|
Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out.
|
### Input:
Returns an iterator over a node's children.
In case of using a trajectory as a run (setting 'v_crun') some sub branches
that do not belong to the run are blinded out.
### Response:
#vtb
def _make_child_iterator(node, with_links, current_depth=0):
cdp1 = current_depth + 1
if with_links:
iterator = ((cdp1, x[0], x[1]) for x in node._children.items())
else:
leaves = ((cdp1, x[0], x[1]) for x in node._leaves.items())
groups = ((cdp1, y[0], y[1]) for y in node._groups.items())
iterator = itools.chain(groups, leaves)
return iterator
|
#vtb
def date_to_um_date(date):
assert date.hour == 0 and date.minute == 0 and date.second == 0
return [date.year, date.month, date.day, 0, 0, 0]
|
Convert a date object to 'year, month, day, hour, minute, second.'
|
### Input:
Convert a date object to 'year, month, day, hour, minute, second.'
### Response:
#vtb
def date_to_um_date(date):
assert date.hour == 0 and date.minute == 0 and date.second == 0
return [date.year, date.month, date.day, 0, 0, 0]
|
#vtb
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
if q_mesh_shift is None:
shift = np.zeros(3, dtype=)
else:
shift = np.array(q_mesh_shift, dtype=)
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all():
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else:
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift
|
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
|
### Input:
Tolerance is used to judge zero/half gird shift.
This value is not necessary to be changed usually.
### Response:
#vtb
def _shift2boolean(self,
q_mesh_shift,
is_gamma_center=False,
tolerance=1e-5):
if q_mesh_shift is None:
shift = np.zeros(3, dtype=)
else:
shift = np.array(q_mesh_shift, dtype=)
diffby2 = np.abs(shift * 2 - np.rint(shift * 2))
if (diffby2 < 0.01).all():
diff = np.abs(shift - np.rint(shift))
if is_gamma_center:
is_shift = list(diff > 0.1)
else:
is_shift = list(np.logical_xor((diff > 0.1),
(self._mesh % 2 == 0)) * 1)
else:
is_shift = None
return is_shift
|
#vtb
def next(self):
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
|
Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
|
### Input:
Returns the next input from this input reader, a record.
Returns:
The next input from this input reader in the form of a record read from
an LevelDB file.
Raises:
StopIteration: The ordered set records has been exhausted.
### Response:
#vtb
def next(self):
while True:
if not hasattr(self, "_cur_handle") or self._cur_handle is None:
self._cur_handle = super(GCSRecordInputReader, self).next()
if not hasattr(self, "_record_reader") or self._record_reader is None:
self._record_reader = records.RecordsReader(self._cur_handle)
try:
start_time = time.time()
content = self._record_reader.read()
self._slice_ctx.incr(self.COUNTER_IO_READ_BYTE, len(content))
self._slice_ctx.incr(self.COUNTER_IO_READ_MSEC,
int(time.time() - start_time) * 1000)
return content
except EOFError:
self._cur_handle = None
self._record_reader = None
|
#vtb
def rpoplpush(self, src, dst):
with self.pipe as pipe:
f = Future()
res = pipe.rpoplpush(self.redis_key(src), self.redis_key(dst))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f
|
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
|
### Input:
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
### Response:
#vtb
def rpoplpush(self, src, dst):
with self.pipe as pipe:
f = Future()
res = pipe.rpoplpush(self.redis_key(src), self.redis_key(dst))
def cb():
f.set(self.valueparse.decode(res.result))
pipe.on_execute(cb)
return f
|
#vtb
def load_srm(filename):
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data)
|
Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
|
### Input:
Load a Project from an ``.srm`` file.
:param filename: the name of the file from which to load
:rtype: :py:class:`pylsdj.Project`
### Response:
#vtb
def load_srm(filename):
name = "SRMLOAD"
version = 0
return Project(name, version, size_in_blocks, raw_data)
|
#vtb
def equiv(self, other):
if self == other:
return True
elif (not isinstance(other, Weighting) or
self.exponent != other.exponent):
return False
elif isinstance(other, MatrixWeighting):
return other.equiv(self)
elif isinstance(other, ConstWeighting):
return np.array_equiv(self.array, other.const)
else:
return np.array_equal(self.array, other.array)
|
Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants.
|
### Input:
Return True if other is an equivalent weighting.
Returns
-------
equivalent : bool
``True`` if ``other`` is a `Weighting` instance with the same
`Weighting.impl`, which yields the same result as this
weighting for any input, ``False`` otherwise. This is checked
by entry-wise comparison of arrays/constants.
### Response:
#vtb
def equiv(self, other):
if self == other:
return True
elif (not isinstance(other, Weighting) or
self.exponent != other.exponent):
return False
elif isinstance(other, MatrixWeighting):
return other.equiv(self)
elif isinstance(other, ConstWeighting):
return np.array_equiv(self.array, other.const)
else:
return np.array_equal(self.array, other.array)
|
#vtb
def __value_compare(self, target):
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False
|
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
|
### Input:
Comparing result based on expectation if arg_type is "VALUE"
Args: Anything
Return: Boolean
### Response:
#vtb
def __value_compare(self, target):
if self.expectation == "__ANY__":
return True
elif self.expectation == "__DEFINED__":
return True if target is not None else False
elif self.expectation == "__TYPE__":
return True if type(target) == self.target_type else False
elif self.expectation == "__INSTANCE__":
return True if isinstance(target, self.target_type.__class__) else False
else:
return True if target == self.expectation else False
|
#vtb
def is_active(self):
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME))
|
Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
|
### Input:
Determines whether this plugin is active.
This plugin is active if any health pills information is present for any
run.
Returns:
A boolean. Whether this plugin is active.
### Response:
#vtb
def is_active(self):
return bool(
self._grpc_port is not None and
self._event_multiplexer and
self._event_multiplexer.PluginRunToTagToContent(
constants.DEBUGGER_PLUGIN_NAME))
|
#vtb
def writexlsx(self, path, sheetname="default"):
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save()
|
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
|
### Input:
Writes this table to an .xlsx file at the specified path.
If you'd like to specify a sheetname, you may do so.
If you'd like to write one workbook with different DataTables
for each sheet, import the `excel` function from acrylic. You
can see that code in `utils.py`.
Note that the outgoing file is an .xlsx file, so it'd make sense to
name that way.
### Response:
#vtb
def writexlsx(self, path, sheetname="default"):
writer = ExcelRW.UnicodeWriter(path)
writer.set_active_sheet(sheetname)
writer.writerow(self.fields)
writer.writerows(self)
writer.save()
|
#vtb
def process_bind_param(self, value, dialect):
bitmask = 0x00
for e in value:
bitmask = bitmask | e.value
return bitmask
|
Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect
|
### Input:
Returns the integer value of the usage mask bitmask. This value is
stored in the database.
Args:
value(list<enums.CryptographicUsageMask>): list of enums in the
usage mask
dialect(string): SQL dialect
### Response:
#vtb
def process_bind_param(self, value, dialect):
bitmask = 0x00
for e in value:
bitmask = bitmask | e.value
return bitmask
|
#vtb
def list_packages(conn=None):
close = False
if conn is None:
close = True
conn = init()
ret = []
data = conn.execute()
for pkg in data.fetchall():
ret.append(pkg)
if close:
conn.close()
return ret
|
List files for an installed package
|
### Input:
List files for an installed package
### Response:
#vtb
def list_packages(conn=None):
close = False
if conn is None:
close = True
conn = init()
ret = []
data = conn.execute()
for pkg in data.fetchall():
ret.append(pkg)
if close:
conn.close()
return ret
|
#vtb
def _ReadPartial(self, length):
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result
|
Read as much as possible, but not more than length.
|
### Input:
Read as much as possible, but not more than length.
### Response:
#vtb
def _ReadPartial(self, length):
chunk = self.offset // self.chunksize
chunk_offset = self.offset % self.chunksize
if chunk > self.last_chunk:
return ""
available_to_read = min(length, self.chunksize - chunk_offset)
fd = self._GetChunkForReading(chunk)
fd.seek(chunk_offset)
result = fd.read(available_to_read)
self.offset += len(result)
return result
|
#vtb
def store_atomic(self, value, ptr, ordering, align):
if not isinstance(ptr.type, types.PointerType):
raise TypeError("cannot store to value of type %s (%r): not a pointer"
% (ptr.type, str(ptr)))
if ptr.type.pointee != value.type:
raise TypeError("cannot store %s to %s: mismatching types"
% (value.type, ptr.type))
st = instructions.StoreAtomicInstr(self.block, value, ptr, ordering, align)
self._insert(st)
return st
|
Store value to pointer, with optional guaranteed alignment:
*ptr = name
|
### Input:
Store value to pointer, with optional guaranteed alignment:
*ptr = name
### Response:
#vtb
def store_atomic(self, value, ptr, ordering, align):
if not isinstance(ptr.type, types.PointerType):
raise TypeError("cannot store to value of type %s (%r): not a pointer"
% (ptr.type, str(ptr)))
if ptr.type.pointee != value.type:
raise TypeError("cannot store %s to %s: mismatching types"
% (value.type, ptr.type))
st = instructions.StoreAtomicInstr(self.block, value, ptr, ordering, align)
self._insert(st)
return st
|
#vtb
async def verify_credentials(self):
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b,
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
,
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b,
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
self._output_key, self._input_key = self.srp.verify2()
|
Verify credentials with device.
|
### Input:
Verify credentials with device.
### Response:
#vtb
async def verify_credentials(self):
_, public_key = self.srp.initialize()
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b,
tlv8.TLV_PUBLIC_KEY: public_key})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
resp = _get_pairing_data(resp)
session_pub_key = resp[tlv8.TLV_PUBLIC_KEY]
encrypted = resp[tlv8.TLV_ENCRYPTED_DATA]
log_binary(_LOGGER,
,
Public=self.credentials.ltpk,
Encrypted=encrypted)
encrypted_data = self.srp.verify1(
self.credentials, session_pub_key, encrypted)
msg = messages.crypto_pairing({
tlv8.TLV_SEQ_NO: b,
tlv8.TLV_ENCRYPTED_DATA: encrypted_data})
resp = await self.protocol.send_and_receive(
msg, generate_identifier=False)
self._output_key, self._input_key = self.srp.verify2()
|
#vtb
def normalize_range(e, n):
if e.step > 0:
count = max(0, (e.stop - e.start - 1) // e.step + 1)
else:
count = max(0, (e.start - e.stop - 1) // -e.step + 1)
if count == 0:
return (0, 0, e.step)
start = e.start
finish = e.start + (count - 1) * e.step
if start >= 0:
if start >= n or finish < 0 or finish >= n:
return None
else:
start += n
finish += n
if start < 0 or start >= n or finish < 0 or finish >= n:
return None
assert count >= 0
return (start, count, e.step)
|
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
|
### Input:
Return the range tuple normalized for an ``n``-element object.
The semantics of a range is slightly different than that of a slice.
In particular, a range is similar to a list in meaning (and on Py2 it was
eagerly expanded into a list). Thus we do not allow the range to generate
indices that would be invalid for an ``n``-array. Furthermore, we restrict
the range to produce only positive or only negative indices. For example,
``range(2, -2, -1)`` expands into ``[2, 1, 0, -1]``, and it is confusing
to treat the last "-1" as the last element in the list.
:param e: a range object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``, or None
if the range is invalid.
### Response:
#vtb
def normalize_range(e, n):
if e.step > 0:
count = max(0, (e.stop - e.start - 1) // e.step + 1)
else:
count = max(0, (e.start - e.stop - 1) // -e.step + 1)
if count == 0:
return (0, 0, e.step)
start = e.start
finish = e.start + (count - 1) * e.step
if start >= 0:
if start >= n or finish < 0 or finish >= n:
return None
else:
start += n
finish += n
if start < 0 or start >= n or finish < 0 or finish >= n:
return None
assert count >= 0
return (start, count, e.step)
|
#vtb
def _control_longitude(self):
if self.lonm < 0.0:
self.lonm = 360.0 + self.lonm
if self.lonM < 0.0:
self.lonM = 360.0 + self.lonM
if self.lonm > 360.0:
self.lonm = self.lonm - 360.0
if self.lonM > 360.0:
self.lonM = self.lonM - 360.0
|
Control on longitude values
|
### Input:
Control on longitude values
### Response:
#vtb
def _control_longitude(self):
if self.lonm < 0.0:
self.lonm = 360.0 + self.lonm
if self.lonM < 0.0:
self.lonM = 360.0 + self.lonM
if self.lonm > 360.0:
self.lonm = self.lonm - 360.0
if self.lonM > 360.0:
self.lonM = self.lonM - 360.0
|
#vtb
def cylinder(radius=1.0,
height=1.0,
sections=32,
segment=None,
transform=None,
**kwargs):
if segment is not None:
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError()
vector = segment[1] - segment[0]
height = np.linalg.norm(vector)
midpoint = segment[0] + (vector * 0.5)
rotation = align_vectors([0, 0, 1], vector)
translation = transformations.translation_matrix(midpoint)
transform = np.dot(translation, rotation)
theta = np.linspace(0, np.pi * 2, sections)
vertices = np.column_stack((np.sin(theta),
np.cos(theta))) * radius
vertices[0] = [0, 0]
index = np.arange(1, len(vertices) + 1).reshape((-1, 1))
index[-1] = 1
faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2))
faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces))
cylinder = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
**kwargs)
cylinder.vertices[:, 2] -= height * .5
if transform is not None:
cylinder.apply_transform(transform)
return cylinder
|
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
|
### Input:
Create a mesh of a cylinder along Z centered at the origin.
Parameters
----------
radius : float
The radius of the cylinder
height : float
The height of the cylinder
sections : int
How many pie wedges should the cylinder have
segment : (2, 3) float
Endpoints of axis, overrides transform and height
transform : (4, 4) float
Transform to apply
**kwargs:
passed to Trimesh to create cylinder
Returns
----------
cylinder: trimesh.Trimesh
Resulting mesh of a cylinder
### Response:
#vtb
def cylinder(radius=1.0,
height=1.0,
sections=32,
segment=None,
transform=None,
**kwargs):
if segment is not None:
segment = np.asanyarray(segment, dtype=np.float64)
if segment.shape != (2, 3):
raise ValueError()
vector = segment[1] - segment[0]
height = np.linalg.norm(vector)
midpoint = segment[0] + (vector * 0.5)
rotation = align_vectors([0, 0, 1], vector)
translation = transformations.translation_matrix(midpoint)
transform = np.dot(translation, rotation)
theta = np.linspace(0, np.pi * 2, sections)
vertices = np.column_stack((np.sin(theta),
np.cos(theta))) * radius
vertices[0] = [0, 0]
index = np.arange(1, len(vertices) + 1).reshape((-1, 1))
index[-1] = 1
faces = np.tile(index, (1, 2)).reshape(-1)[1:-1].reshape((-1, 2))
faces = np.column_stack((np.zeros(len(faces), dtype=np.int), faces))
cylinder = extrude_triangulation(vertices=vertices,
faces=faces,
height=height,
**kwargs)
cylinder.vertices[:, 2] -= height * .5
if transform is not None:
cylinder.apply_transform(transform)
return cylinder
|
#vtb
def map_element(self, obj, name, event):
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6]
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)
for dn in diagram_nodes:
if dn is not None:
dn.element = element
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break
|
Handles mapping elements to diagram components
|
### Input:
Handles mapping elements to diagram components
### Response:
#vtb
def map_element(self, obj, name, event):
canvas = self.diagram.diagram_canvas
parser = XDotParser()
for element in event.added:
logger.debug("Mapping new element [%s] to diagram node" % element)
for node_mapping in self.nodes:
ct = name[:-6]
if node_mapping.containment_trait == ct:
dot_attrs = node_mapping.dot_node
dot = Dot()
graph_node = Node(str(id(element)))
self._style_node(graph_node, dot_attrs)
dot.add_node(graph_node)
xdot = graph_from_dot_data(dot.create(self.program,"xdot"))
diagram_nodes = parser.parse_nodes(xdot)
for dn in diagram_nodes:
if dn is not None:
dn.element = element
for tool in node_mapping.tools:
dn.tools.append(tool(dn))
canvas.add(dn)
canvas.request_redraw()
for element in event.removed:
logger.debug("Unmapping element [%s] from diagram" % element)
for component in canvas.components:
if element == component.element:
canvas.remove(component)
canvas.request_redraw()
break
|
#vtb
def _process_glsl_template(template, colors):
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = % tuple(color)
template = template.replace( % i, vec4_color)
return template
|
Replace $color_i by color #i in the GLSL template.
|
### Input:
Replace $color_i by color #i in the GLSL template.
### Response:
#vtb
def _process_glsl_template(template, colors):
for i in range(len(colors) - 1, -1, -1):
color = colors[i]
assert len(color) == 4
vec4_color = % tuple(color)
template = template.replace( % i, vec4_color)
return template
|
#vtb
def _bumpUpWeakColumns(self):
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
|
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
|
### Input:
This method increases the permanence values of synapses of columns whose
activity level has been too low. Such columns are identified by having an
overlap duty cycle that drops too much below those of their peers. The
permanence values for such columns are increased.
### Response:
#vtb
def _bumpUpWeakColumns(self):
weakColumns = numpy.where(self._overlapDutyCycles
< self._minOverlapDutyCycles)[0]
for columnIndex in weakColumns:
perm = self._permanences[columnIndex].astype(realDType)
maskPotential = numpy.where(self._potentialPools[columnIndex] > 0)[0]
perm[maskPotential] += self._synPermBelowStimulusInc
self._updatePermanencesForColumn(perm, columnIndex, raisePerm=False)
|
#vtb
def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can{}{}{}'".format(resp))
return resp
|
Set Lambda state value.
|
### Input:
Set Lambda state value.
### Response:
#vtb
def set_state(key, value, namespace=None, table_name=None, environment=None,
layer=None, stage=None, shard_id=None, consistent=True,
serializer=json.dumps, wait_exponential_multiplier=500,
wait_exponential_max=5000, stop_max_delay=10000, ttl=None):
if table_name is None:
table_name = _state_table_name(environment=environment, layer=layer,
stage=stage)
if not table_name:
msg = ("Can{}{}{}'".format(resp))
return resp
|
#vtb
def add_instance(self, inst, index=None):
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject)
|
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
|
### Input:
Adds the specified instance to the dataset.
:param inst: the Instance to add
:type inst: Instance
:param index: the 0-based index where to add the Instance
:type index: int
### Response:
#vtb
def add_instance(self, inst, index=None):
if index is None:
self.__append_instance(inst.jobject)
else:
self.__insert_instance(index, inst.jobject)
|
#vtb
def add_mixl_specific_results_to_estimation_res(estimator, results_dict):
prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"],
estimator.choice_vector,
estimator.rows_to_mixers,
return_type=)
results_dict["simulated_sequence_probs"] = prob_res[0]
results_dict["expanded_sequence_probs"] = prob_res[1]
return results_dict
|
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
|
### Input:
Stores particular items in the results dictionary that are unique to mixed
logit-type models. In particular, this function calculates and adds
`sequence_probs` and `expanded_sequence_probs` to the results dictionary.
The `constrained_pos` object is also stored to the results_dict.
Parameters
----------
estimator : an instance of the MixedEstimator class.
Should contain a `choice_vector` attribute that is a 1D ndarray
representing the choices made for this model's dataset. Should also
contain a `rows_to_mixers` attribute that maps each row of the long
format data to a unit of observation that the mixing is being performed
over.
results_dict : dict.
This dictionary should be the dictionary returned from
scipy.optimize.minimize. In particular, it should have the following
`long_probs` key.
Returns
-------
results_dict.
### Response:
#vtb
def add_mixl_specific_results_to_estimation_res(estimator, results_dict):
prob_res = mlc.calc_choice_sequence_probs(results_dict["long_probs"],
estimator.choice_vector,
estimator.rows_to_mixers,
return_type=)
results_dict["simulated_sequence_probs"] = prob_res[0]
results_dict["expanded_sequence_probs"] = prob_res[1]
return results_dict
|
#vtb
def measure_all(fbasename=None, log=None, ml_version=ml_version):
ml_script1_file =
if ml_version == :
file_out =
else:
file_out = None
ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)
compute.measure_geometry(ml_script1)
compute.measure_topology(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
geometry = ml_script1.geometry
topology = ml_script1.topology
if ml_version == :
if log is not None:
log_file = open(log, )
log_file.write(
%
fbasename)
log_file.close()
aabb = measure_aabb(file_out, log)
else:
aabb = geometry[]
return aabb, geometry, topology
|
Measures mesh geometry, aabb and topology.
|
### Input:
Measures mesh geometry, aabb and topology.
### Response:
#vtb
def measure_all(fbasename=None, log=None, ml_version=ml_version):
ml_script1_file =
if ml_version == :
file_out =
else:
file_out = None
ml_script1 = mlx.FilterScript(file_in=fbasename, file_out=file_out, ml_version=ml_version)
compute.measure_geometry(ml_script1)
compute.measure_topology(ml_script1)
ml_script1.save_to_file(ml_script1_file)
ml_script1.run_script(log=log, script_file=ml_script1_file)
geometry = ml_script1.geometry
topology = ml_script1.topology
if ml_version == :
if log is not None:
log_file = open(log, )
log_file.write(
%
fbasename)
log_file.close()
aabb = measure_aabb(file_out, log)
else:
aabb = geometry[]
return aabb, geometry, topology
|
#vtb
def get_fqhostname():
fqdn = None
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
if len(info) > 3 and info[3]:
fqdn = info[3]
break
except socket.gaierror:
pass
except socket.error as err:
log.debug(, err)
if fqdn is None:
fqdn = socket.getfqdn()
return fqdn
|
Returns the fully qualified hostname
|
### Input:
Returns the fully qualified hostname
### Response:
#vtb
def get_fqhostname():
fqdn = None
try:
addrinfo = socket.getaddrinfo(
socket.gethostname(), 0, socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.SOL_TCP, socket.AI_CANONNAME
)
for info in addrinfo:
if len(info) > 3 and info[3]:
fqdn = info[3]
break
except socket.gaierror:
pass
except socket.error as err:
log.debug(, err)
if fqdn is None:
fqdn = socket.getfqdn()
return fqdn
|
#vtb
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
### Input:
Deletes all error events of a given project.
Example:
>>> from google.cloud import errorreporting_v1beta1
>>>
>>> client = errorreporting_v1beta1.ErrorStatsServiceClient()
>>>
>>> project_name = client.project_path('[PROJECT]')
>>>
>>> response = client.delete_events(project_name)
Args:
project_name (str): [Required] The resource name of the Google Cloud Platform project.
Written as ``projects/`` plus the `Google Cloud Platform project
ID <https://support.google.com/cloud/answer/6158840>`__. Example:
``projects/my-project-123``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.errorreporting_v1beta1.types.DeleteEventsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
#vtb
def delete_events(
self,
project_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "delete_events" not in self._inner_api_calls:
self._inner_api_calls[
"delete_events"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_events,
default_retry=self._method_configs["DeleteEvents"].retry,
default_timeout=self._method_configs["DeleteEvents"].timeout,
client_info=self._client_info,
)
request = error_stats_service_pb2.DeleteEventsRequest(project_name=project_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_name", project_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["delete_events"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
#vtb
def size_of_varint(value):
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
|
Number of bytes needed to encode an integer in variable-length format.
|
### Input:
Number of bytes needed to encode an integer in variable-length format.
### Response:
#vtb
def size_of_varint(value):
value = (value << 1) ^ (value >> 63)
if value <= 0x7f:
return 1
if value <= 0x3fff:
return 2
if value <= 0x1fffff:
return 3
if value <= 0xfffffff:
return 4
if value <= 0x7ffffffff:
return 5
if value <= 0x3ffffffffff:
return 6
if value <= 0x1ffffffffffff:
return 7
if value <= 0xffffffffffffff:
return 8
if value <= 0x7fffffffffffffff:
return 9
return 10
|
#vtb
def next_chunk(self):
raise NotImplementedError("%s not implemented for %s" % (self.next_chunk.__func__.__name__,
self.__class__.__name__))
|
Returns the chunk immediately following (and adjacent to) this one.
|
### Input:
Returns the chunk immediately following (and adjacent to) this one.
### Response:
#vtb
def next_chunk(self):
raise NotImplementedError("%s not implemented for %s" % (self.next_chunk.__func__.__name__,
self.__class__.__name__))
|
#vtb
def get_cell_length(flow_model):
assert flow_model.lower() in FlowModelConst.d8_lens
return FlowModelConst.d8_lens.get(flow_model.lower())
|
Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
|
### Input:
Get flow direction induced cell length dict.
Args:
flow_model: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
### Response:
#vtb
def get_cell_length(flow_model):
assert flow_model.lower() in FlowModelConst.d8_lens
return FlowModelConst.d8_lens.get(flow_model.lower())
|
#vtb
def turbulent_Nunner(Re, Pr, fd, fd_smooth):
r
return Re*Pr*fd/8./(1 + 1.5*Re**-0.125*Pr**(-1/6.)*(Pr*fd/fd_smooth - 1.))
|
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
|
### Input:
r'''Calculates internal convection Nusselt number for turbulent flows
in pipe according to [2]_ as shown in [1]_.
.. math::
Nu = \frac{RePr(f/8)}{1 + 1.5Re^{-1/8}Pr^{-1/6}[Pr(f/f_s)-1]}
Parameters
----------
Re : float
Reynolds number, [-]
Pr : float
Prandtl number, [-]
fd : float
Darcy friction factor [-]
fd_smooth : float
Darcy friction factor of a smooth pipe [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Valid for Pr ≅ 0.7; bad results for Pr > 1.
Examples
--------
>>> turbulent_Nunner(Re=1E5, Pr=0.7, fd=0.0185, fd_smooth=0.005)
101.15841010919947
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] W. Nunner, "Warmeiibergang und Druckabfall in Rauhen Rohren,"
VDI-Forschungsheft 445, ser. B,(22): 5-39, 1956
### Response:
#vtb
def turbulent_Nunner(Re, Pr, fd, fd_smooth):
r
return Re*Pr*fd/8./(1 + 1.5*Re**-0.125*Pr**(-1/6.)*(Pr*fd/fd_smooth - 1.))
|
#vtb
def match_config(filters, device, kind, default):
if device is None:
return default
matches = (f.value(kind, device)
for f in filters
if f.has_value(kind) and f.match(device))
return next(matches, default)
|
Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter
|
### Input:
Matches devices against multiple :class:`DeviceFilter`s.
:param list filters: device filters
:param Device device: device to be mounted
:param str kind: value kind
:param default: default value
:returns: value of the first matching filter
### Response:
#vtb
def match_config(filters, device, kind, default):
if device is None:
return default
matches = (f.value(kind, device)
for f in filters
if f.has_value(kind) and f.match(device))
return next(matches, default)
|
#vtb
def set_lic_text(self, doc, text):
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError()
else:
raise CardinalityError()
else:
raise OrderError()
|
Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined.
|
### Input:
Sets license extracted text.
Raises SPDXValueError if text is not free form text.
Raises OrderError if no license ID defined.
### Response:
#vtb
def set_lic_text(self, doc, text):
if self.has_extr_lic(doc):
if not self.extr_text_set:
self.extr_text_set = True
if validations.validate_is_free_form_text(text):
self.extr_lic(doc).text = str_from_text(text)
return True
else:
raise SPDXValueError()
else:
raise CardinalityError()
else:
raise OrderError()
|
#vtb
def show_inputs(client, workflow):
for input_ in workflow.inputs:
click.echo(
.format(
id=input_.id,
default=_format_default(client, input_.default),
)
)
sys.exit(0)
|
Show workflow inputs and exit.
|
### Input:
Show workflow inputs and exit.
### Response:
#vtb
def show_inputs(client, workflow):
for input_ in workflow.inputs:
click.echo(
.format(
id=input_.id,
default=_format_default(client, input_.default),
)
)
sys.exit(0)
|
#vtb
def get_folder_contents_iter(self, uri):
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource[]
for item in self._folder_get_content_iter(folder_key):
if in item:
if ".patch." in item[]:
continue
yield File(item)
elif in item:
yield Folder(item)
|
Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
|
### Input:
Return iterator for directory contents.
uri -- mediafire URI
Example:
for item in get_folder_contents_iter('mf:///Documents'):
print(item)
### Response:
#vtb
def get_folder_contents_iter(self, uri):
resource = self.get_resource_by_uri(uri)
if not isinstance(resource, Folder):
raise NotAFolderError(uri)
folder_key = resource[]
for item in self._folder_get_content_iter(folder_key):
if in item:
if ".patch." in item[]:
continue
yield File(item)
elif in item:
yield Folder(item)
|
#vtb
async def create_collection(db, model_class: MongoCollectionMixin):
s ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
indicesnamename_fieldsuniqueuniquesparsesparseexpireAfterSecondsexpireAfterSecondsbackgroundpartialFilterExpressionpartialFilterExpressionpartialFilterExpressionfields'],
**index_kwargs
)
except OperationFailure as ex:
pass
return coll
return None
|
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
|
### Input:
Creates a MongoDB collection and all the declared indices in the model's ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
### Response:
#vtb
async def create_collection(db, model_class: MongoCollectionMixin):
s ``Meta`` class
:param db:
A database handle
:type db:
motor.motor_asyncio.AsyncIOMotorClient
:param model_class:
The model to create
:type model_class:
Subclass of ``Model`` mixed with ``MongoCollectionMixin``
indicesnamename_fieldsuniqueuniquesparsesparseexpireAfterSecondsexpireAfterSecondsbackgroundpartialFilterExpressionpartialFilterExpressionpartialFilterExpressionfields'],
**index_kwargs
)
except OperationFailure as ex:
pass
return coll
return None
|
#vtb
def remove_sister(self, sister=None):
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister)
|
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
|
### Input:
Removes a sister node. It has the same effect as
**`TreeNode.up.remove_child(sister)`**
If a sister node is not supplied, the first sister will be deleted
and returned.
:argument sister: A node instance
:return: The node removed
### Response:
#vtb
def remove_sister(self, sister=None):
sisters = self.get_sisters()
if len(sisters) > 0:
if sister is None:
sister = sisters.pop(0)
return self.up.remove_child(sister)
|
#vtb
def get_study_items(self):
study_items = set()
for rec in self.goea_results:
study_items |= rec.study_items
return study_items
|
Get all study items (e.g., geneids).
|
### Input:
Get all study items (e.g., geneids).
### Response:
#vtb
def get_study_items(self):
study_items = set()
for rec in self.goea_results:
study_items |= rec.study_items
return study_items
|
#vtb
def _save_message(self, stack, type_, message, context=None,
from_merge=False):
uid = uuid.uuid4().hex
message[] = uid
if message[]:
if not self.supports_version(message[]):
if self.instant:
print
self._print_message(type_, message, verbose=True)
return
elif self.version_requirements:
message[] = self.version_requirements
stack.append(message)
if message[] is None:
message[] = self.tier
if message[] and not from_merge:
self.compat_summary[ % message[]] += 1
if message[]:
tree = self.message_tree
last_id = None
for eid in message[]:
if last_id is not None:
tree = tree[last_id]
if eid not in tree:
tree[eid] = {: 0,
: 0,
: 0,
: []}
tree[eid][ % type_] += 1
last_id = eid
tree[last_id][].append(uid)
if self.instant:
self._print_message(type_, message, verbose=True)
|
Stores a message in the appropriate message stack.
|
### Input:
Stores a message in the appropriate message stack.
### Response:
#vtb
def _save_message(self, stack, type_, message, context=None,
from_merge=False):
uid = uuid.uuid4().hex
message[] = uid
if message[]:
if not self.supports_version(message[]):
if self.instant:
print
self._print_message(type_, message, verbose=True)
return
elif self.version_requirements:
message[] = self.version_requirements
stack.append(message)
if message[] is None:
message[] = self.tier
if message[] and not from_merge:
self.compat_summary[ % message[]] += 1
if message[]:
tree = self.message_tree
last_id = None
for eid in message[]:
if last_id is not None:
tree = tree[last_id]
if eid not in tree:
tree[eid] = {: 0,
: 0,
: 0,
: []}
tree[eid][ % type_] += 1
last_id = eid
tree[last_id][].append(uid)
if self.instant:
self._print_message(type_, message, verbose=True)
|
#vtb
async def receive_events(self, request: HttpRequest):
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
: True,
:
}, status=400)
secret = s[]
actual_sig = request.headers[]
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
: True,
: ,
}, status=401)
for entry in content[]:
for raw_message in entry.get(, []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
: True,
})
|
Events received from Facebook
|
### Input:
Events received from Facebook
### Response:
#vtb
async def receive_events(self, request: HttpRequest):
body = await request.read()
s = self.settings()
try:
content = ujson.loads(body)
except ValueError:
return json_response({
: True,
:
}, status=400)
secret = s[]
actual_sig = request.headers[]
expected_sig = sign_message(body, secret)
if not hmac.compare_digest(actual_sig, expected_sig):
return json_response({
: True,
: ,
}, status=401)
for entry in content[]:
for raw_message in entry.get(, []):
message = FacebookMessage(raw_message, self)
await self.handle_event(message)
return json_response({
: True,
})
|
#vtb
def param(name, help=""):
def decorator(func):
params = getattr(func, "params", [])
_param = Param(name, help)
params.insert(0, _param)
func.params = params
return func
return decorator
|
Decorator that add a parameter to the wrapped command or function.
|
### Input:
Decorator that add a parameter to the wrapped command or function.
### Response:
#vtb
def param(name, help=""):
def decorator(func):
params = getattr(func, "params", [])
_param = Param(name, help)
params.insert(0, _param)
func.params = params
return func
return decorator
|
#vtb
def assemble_tlg_author_filepaths():
plaintext_dir_rel =
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + ) for x in TLG_INDEX]
return filepaths
|
Reads TLG index and builds a list of absolute filepaths.
|
### Input:
Reads TLG index and builds a list of absolute filepaths.
### Response:
#vtb
def assemble_tlg_author_filepaths():
plaintext_dir_rel =
plaintext_dir = os.path.expanduser(plaintext_dir_rel)
filepaths = [os.path.join(plaintext_dir, x + ) for x in TLG_INDEX]
return filepaths
|
#vtb
def create_crop(self, name, file_obj,
x=None, x2=None, y=None, y2=None):
if name not in self._registry:
return
file_obj.seek(0)
im = Image.open(file_obj)
config = self._registry[name]
if x is not None and x2 and y is not None and y2 and not config.editable:
return
im = config.rotate_by_exif(im)
crop_spec = config.get_crop_spec(im, x=x, x2=x2, y=y, y2=y2)
image = config.process_image(im, crop_spec=crop_spec)
if image:
crop_name = utils.get_size_filename(file_obj.name, name)
self._save_file(image, crop_name)
return crop_spec
|
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created.
|
### Input:
Generate Version for an Image.
value has to be a serverpath relative to MEDIA_ROOT.
Returns the spec for the crop that was created.
### Response:
#vtb
def create_crop(self, name, file_obj,
x=None, x2=None, y=None, y2=None):
if name not in self._registry:
return
file_obj.seek(0)
im = Image.open(file_obj)
config = self._registry[name]
if x is not None and x2 and y is not None and y2 and not config.editable:
return
im = config.rotate_by_exif(im)
crop_spec = config.get_crop_spec(im, x=x, x2=x2, y=y, y2=y2)
image = config.process_image(im, crop_spec=crop_spec)
if image:
crop_name = utils.get_size_filename(file_obj.name, name)
self._save_file(image, crop_name)
return crop_spec
|
#vtb
def _swap_bytes(data):
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data)
|
swaps bytes for 16 bit, leaves remaining trailing bytes alone
|
### Input:
swaps bytes for 16 bit, leaves remaining trailing bytes alone
### Response:
#vtb
def _swap_bytes(data):
a, b = data[1::2], data[::2]
data = bytearray().join(bytearray(x) for x in zip(a, b))
if len(b) > len(a):
data += b[-1:]
return bytes(data)
|
#vtb
def walk(self, start, end):
s = start.path
e = end.path
if start.root != end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down
|
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
|
### Input:
Walk from `start` node to `end` node.
Returns:
(upwards, common, downwards): `upwards` is a list of nodes to go upward to.
`common` top node. `downwards` is a list of nodes to go downward to.
Raises:
WalkError: on no common root node.
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()))
Node('/f')
|-- Node('/f/b')
| |-- Node('/f/b/a')
| +-- Node('/f/b/d')
| |-- Node('/f/b/d/c')
| +-- Node('/f/b/d/e')
+-- Node('/f/g')
+-- Node('/f/g/i')
+-- Node('/f/g/i/h')
Create a walker:
>>> w = Walker()
This class is made for walking:
>>> w.walk(f, f)
((), Node('/f'), ())
>>> w.walk(f, b)
((), Node('/f'), (Node('/f/b'),))
>>> w.walk(b, f)
((Node('/f/b'),), Node('/f'), ())
>>> w.walk(h, e)
((Node('/f/g/i/h'), Node('/f/g/i'), Node('/f/g')), Node('/f'), (Node('/f/b'), Node('/f/b/d'), Node('/f/b/d/e')))
>>> w.walk(d, e)
((), Node('/f/b/d'), (Node('/f/b/d/e'),))
For a proper walking the nodes need to be part of the same tree:
>>> w.walk(Node("a"), Node("b"))
Traceback (most recent call last):
...
anytree.walker.WalkError: Node('/a') and Node('/b') are not part of the same tree.
### Response:
#vtb
def walk(self, start, end):
s = start.path
e = end.path
if start.root != end.root:
msg = "%r and %r are not part of the same tree." % (start, end)
raise WalkError(msg)
c = Walker.__calc_common(s, e)
assert c[0] is start.root
len_c = len(c)
if start is c[-1]:
up = tuple()
else:
up = tuple(reversed(s[len_c:]))
if end is c[-1]:
down = tuple()
else:
down = e[len_c:]
return up, c[-1], down
|
#vtb
def input(self, data):
self.data = data
self.lexer.input(data)
|
Set the input text data.
|
### Input:
Set the input text data.
### Response:
#vtb
def input(self, data):
self.data = data
self.lexer.input(data)
|
#vtb
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs):
if input is not None:
if in kwargs:
raise ValueError()
kwargs[] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
|
Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
|
### Input:
Run command with arguments and return a CompletedProcess instance.
The returned instance will have attributes args, returncode, stdout and
stderr. By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If check is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those streams
were captured.
If timeout is given, and the process takes too long, a TimeoutExpired
exception will be raised.
There is an optional argument "input", allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's "stdin" argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the "input" argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
### Response:
#vtb
def subprocess_run(*popenargs, input=None, timeout=None, check=False, **kwargs):
if input is not None:
if in kwargs:
raise ValueError()
kwargs[] = subprocess.PIPE
with subprocess.Popen(*popenargs, **kwargs) as process:
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except subprocess.TimeoutExpired:
process.kill()
stdout, stderr = process.communicate()
raise subprocess.TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise subprocess.CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
return CompletedProcess(process.args, retcode, stdout, stderr)
|
#vtb
def join_event_view(request, id):
event = get_object_or_404(Event, id=id)
if request.method == "POST":
if not event.show_attending:
return redirect("events")
if "attending" in request.POST:
attending = request.POST.get("attending")
attending = (attending == "true")
if attending:
event.attending.add(request.user)
else:
event.attending.remove(request.user)
return redirect("events")
context = {"event": event, "is_events_admin": request.user.has_admin_permission()}
return render(request, "events/join_event.html", context)
|
Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
|
### Input:
Join event page. If a POST request, actually add or remove the attendance of the current
user. Otherwise, display a page with confirmation.
id: event id
### Response:
#vtb
def join_event_view(request, id):
event = get_object_or_404(Event, id=id)
if request.method == "POST":
if not event.show_attending:
return redirect("events")
if "attending" in request.POST:
attending = request.POST.get("attending")
attending = (attending == "true")
if attending:
event.attending.add(request.user)
else:
event.attending.remove(request.user)
return redirect("events")
context = {"event": event, "is_events_admin": request.user.has_admin_permission()}
return render(request, "events/join_event.html", context)
|
#vtb
def OnSelectReader(self, reader):
SimpleSCardAppEventObserver.OnSelectReader(self, reader)
self.feedbacktext.SetLabel( + repr(reader))
self.transmitbutton.Disable()
|
Called when a reader is selected by clicking on the
reader tree control or toolbar.
|
### Input:
Called when a reader is selected by clicking on the
reader tree control or toolbar.
### Response:
#vtb
def OnSelectReader(self, reader):
SimpleSCardAppEventObserver.OnSelectReader(self, reader)
self.feedbacktext.SetLabel( + repr(reader))
self.transmitbutton.Disable()
|
#vtb
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False):
if not os.path.exists(path):
raise Exception("model {} does not exist".format(path))
normal_params = sum([nn.parameters for nn in blocks], [])
all_params = sum([nn.all_parameters for nn in blocks], [])
if path.endswith(".gz"):
opener = gzip.open if path.lower().endswith() else open
handle = opener(path, )
saved_params = pickle.load(handle)
handle.close()
if len(all_params) != len(saved_params):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(saved_params)))
for target, source in zip(all_params, saved_params):
if not exclude_free_params or target not in normal_params:
target.set_value(source)
elif path.endswith(".npz"):
arrs = np.load(path)
if len(all_params) != len(arrs.keys()):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(arrs.keys())))
for target, idx in zip(all_params, range(len(arrs.keys()))):
if not exclude_free_params or target not in normal_params:
source = arrs[ % idx]
target.set_value(source)
else:
raise Exception("File format of %s is not supported, use or or " % path)
|
Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block
|
### Input:
Load parameters from file to fill all blocks sequentially.
:type blocks: list of deepy.layers.Block
### Response:
#vtb
def fill_parameters(self, path, blocks, exclude_free_params=False, check_parameters=False):
if not os.path.exists(path):
raise Exception("model {} does not exist".format(path))
normal_params = sum([nn.parameters for nn in blocks], [])
all_params = sum([nn.all_parameters for nn in blocks], [])
if path.endswith(".gz"):
opener = gzip.open if path.lower().endswith() else open
handle = opener(path, )
saved_params = pickle.load(handle)
handle.close()
if len(all_params) != len(saved_params):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(saved_params)))
for target, source in zip(all_params, saved_params):
if not exclude_free_params or target not in normal_params:
target.set_value(source)
elif path.endswith(".npz"):
arrs = np.load(path)
if len(all_params) != len(arrs.keys()):
logging.warning(
"parameters in the network: {}, parameters in the dumped model: {}".format(len(all_params),
len(arrs.keys())))
for target, idx in zip(all_params, range(len(arrs.keys()))):
if not exclude_free_params or target not in normal_params:
source = arrs[ % idx]
target.set_value(source)
else:
raise Exception("File format of %s is not supported, use or or " % path)
|
#vtb
def crc16(cmd, use_byte=False):
crc = 0xFFFF
if hasattr(cmd, ):
cmd = bytes.fromhex(cmd)
for _ in cmd:
c = _ & 0x00FF
crc ^= c
for i in range(8):
if crc & 0x0001 > 0:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
t = [(crc & 0x00FF), (crc >> 8 & 0xFF)]
crc = % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc)
return crc
|
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
|
### Input:
CRC16 检验
- 启用``use_byte`` 则返回 bytes 类型.
:param cmd: 无crc检验的指令
:type cmd:
:param use_byte: 是否返回byte类型
:type use_byte:
:return: 返回crc值
:rtype:
### Response:
#vtb
def crc16(cmd, use_byte=False):
crc = 0xFFFF
if hasattr(cmd, ):
cmd = bytes.fromhex(cmd)
for _ in cmd:
c = _ & 0x00FF
crc ^= c
for i in range(8):
if crc & 0x0001 > 0:
crc >>= 1
crc ^= 0xA001
else:
crc >>= 1
t = [(crc & 0x00FF), (crc >> 8 & 0xFF)]
crc = % (t[0], t[1])
if use_byte:
crc = bytes.fromhex(crc)
return crc
|
#vtb
def entities(self, entity_ids):
url = % self.url
for entity_id in entity_ids:
url += % _get_path(entity_id)
url = url[:-1]
data = self._get(url)
return data.json()
|
Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
|
### Input:
Get the default data for entities.
@param entity_ids A list of entity ids either as strings or references.
### Response:
#vtb
def entities(self, entity_ids):
url = % self.url
for entity_id in entity_ids:
url += % _get_path(entity_id)
url = url[:-1]
data = self._get(url)
return data.json()
|
#vtb
def remove_dups(head):
hashset = set()
prev = Node()
while head:
if head.val in hashset:
prev.next = head.next
else:
hashset.add(head.val)
prev = head
head = head.next
|
Time Complexity: O(N)
Space Complexity: O(N)
|
### Input:
Time Complexity: O(N)
Space Complexity: O(N)
### Response:
#vtb
def remove_dups(head):
hashset = set()
prev = Node()
while head:
if head.val in hashset:
prev.next = head.next
else:
hashset.add(head.val)
prev = head
head = head.next
|
#vtb
def select(self, comp_name, options=None):
self._logger.info("select comp for block (options: %s)" % (comp_name, self._name, options))
if comp_name not in self._components:
raise ValueError(" has no component (components are: %s)"\
% (self._name, comp_name, ", ".join(self.component_names())))
if options is None:
options = {}
component = self._components[comp_name]
if not isinstance(component, Optionable) and len(options):
raise ValueError("the component %s is not optionable you can't provide options..." % comp_name)
if comp_name not in self._selected:
if not self.multiple and len(self._selected):
assert len(self._selected) == 1
self._selected[0] = comp_name
else:
self._selected.append(comp_name)
else:
pass
if isinstance(component, Optionable):
component.set_options_values(options, parse=True, strict=True)
|
Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict
|
### Input:
Select the components that will by played (with given options).
`options` will be passed to :func:`.Optionable.parse_options` if the
component is a subclass of :class:`Optionable`.
.. Warning:: this function also setup the options (if given) of the
selected component. Use :func:`clear_selections` to restore both
selection and component's options.
This method may be call at play 'time', before to call :func:`play` to
run all selected components.
:param name: name of the component to select
:type comp_name: str
:param options: options to set to the components
:type options: dict
### Response:
#vtb
def select(self, comp_name, options=None):
self._logger.info("select comp for block (options: %s)" % (comp_name, self._name, options))
if comp_name not in self._components:
raise ValueError(" has no component (components are: %s)"\
% (self._name, comp_name, ", ".join(self.component_names())))
if options is None:
options = {}
component = self._components[comp_name]
if not isinstance(component, Optionable) and len(options):
raise ValueError("the component %s is not optionable you can't provide options..." % comp_name)
if comp_name not in self._selected:
if not self.multiple and len(self._selected):
assert len(self._selected) == 1
self._selected[0] = comp_name
else:
self._selected.append(comp_name)
else:
pass
if isinstance(component, Optionable):
component.set_options_values(options, parse=True, strict=True)
|
#vtb
def to_javascript_(self, table_name: str="data") -> str:
try:
renderer = pytablewriter.JavaScriptTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to javascript code")
|
Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
|
### Input:
Convert the main dataframe to javascript code
:param table_name: javascript variable name, defaults to "data"
:param table_name: str, optional
:return: a javascript constant with the data
:rtype: str
:example: ``ds.to_javastript_("myconst")``
### Response:
#vtb
def to_javascript_(self, table_name: str="data") -> str:
try:
renderer = pytablewriter.JavaScriptTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to javascript code")
|
#vtb
def rmtree (self, errors=):
import shutil
if errors == :
ignore_errors = True
onerror = None
elif errors == :
ignore_errors = False
from .cli import warn
def onerror (func, path, exc_info):
warn (t rmtree %s: in %s of %s: %sunexpected "errors" keyword %r' % (errors,))
shutil.rmtree (text_type (self), ignore_errors=ignore_errors, onerror=onerror)
return self
|
Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors.
|
### Input:
Recursively delete this directory and its contents. The *errors* keyword
specifies how errors are handled:
"warn" (the default)
Print a warning to standard error.
"ignore"
Ignore errors.
### Response:
#vtb
def rmtree (self, errors=):
import shutil
if errors == :
ignore_errors = True
onerror = None
elif errors == :
ignore_errors = False
from .cli import warn
def onerror (func, path, exc_info):
warn (t rmtree %s: in %s of %s: %sunexpected "errors" keyword %r' % (errors,))
shutil.rmtree (text_type (self), ignore_errors=ignore_errors, onerror=onerror)
return self
|
#vtb
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred
|
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
|
### Input:
Run a task in a worker, delivering the result as a ``Deferred`` in the
reactor thread.
### Response:
#vtb
def _defer_to_worker(deliver, worker, work, *args, **kwargs):
deferred = Deferred()
def wrapped_work():
try:
result = work(*args, **kwargs)
except BaseException:
f = Failure()
deliver(lambda: deferred.errback(f))
else:
deliver(lambda: deferred.callback(result))
worker.do(wrapped_work)
return deferred
|
#vtb
def ldap_server_host_use_vrf(self, **kwargs):
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop()
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
### Input:
Auto Generated Code
### Response:
#vtb
def ldap_server_host_use_vrf(self, **kwargs):
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop()
use_vrf = ET.SubElement(host, "use-vrf")
use_vrf.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
#vtb
def _is_sub_intrinsic(data):
return isinstance(data, dict) and len(data) == 1 and LambdaUri._FN_SUB in data
|
Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function
|
### Input:
Is this input data a Fn::Sub intrinsic function
Parameters
----------
data
Data to check
Returns
-------
bool
True if the data Fn::Sub intrinsic function
### Response:
#vtb
def _is_sub_intrinsic(data):
return isinstance(data, dict) and len(data) == 1 and LambdaUri._FN_SUB in data
|
#vtb
def parse_date_range_arguments(options: dict, default_range=) -> (datetime, datetime, list):
begin, end = get_date_range_by_name(default_range)
for range_name in TIME_RANGE_NAMES:
if options.get(range_name):
begin, end = get_date_range_by_name(range_name)
if options.get():
t = parse(options[], default=datetime(2000, 1, 1))
begin = pytz.utc.localize(t)
end = now()
if options.get():
end = pytz.utc.localize(parse(options[], default=datetime(2000, 1, 1)))
step_type = None
after_end = end
for step_name in TIME_STEP_NAMES:
if options.get(step_name):
step_type = getattr(rrule, step_name.upper())
if rrule.DAILY == step_type:
after_end += timedelta(days=1)
if rrule.WEEKLY == step_type:
after_end += timedelta(days=7)
if rrule.MONTHLY == step_type:
after_end += timedelta(days=31)
steps = None
if step_type:
begins = [t for t in rrule.rrule(step_type, dtstart=begin, until=after_end)]
steps = [(begins[i], begins[i+1]) for i in range(len(begins)-1)]
if steps is None:
steps = [(begin, end)]
return begin, end, steps
|
:param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...]
|
### Input:
:param options:
:param default_range: Default datetime range to return if no other selected
:return: begin, end, [(begin1,end1), (begin2,end2), ...]
### Response:
#vtb
def parse_date_range_arguments(options: dict, default_range=) -> (datetime, datetime, list):
begin, end = get_date_range_by_name(default_range)
for range_name in TIME_RANGE_NAMES:
if options.get(range_name):
begin, end = get_date_range_by_name(range_name)
if options.get():
t = parse(options[], default=datetime(2000, 1, 1))
begin = pytz.utc.localize(t)
end = now()
if options.get():
end = pytz.utc.localize(parse(options[], default=datetime(2000, 1, 1)))
step_type = None
after_end = end
for step_name in TIME_STEP_NAMES:
if options.get(step_name):
step_type = getattr(rrule, step_name.upper())
if rrule.DAILY == step_type:
after_end += timedelta(days=1)
if rrule.WEEKLY == step_type:
after_end += timedelta(days=7)
if rrule.MONTHLY == step_type:
after_end += timedelta(days=31)
steps = None
if step_type:
begins = [t for t in rrule.rrule(step_type, dtstart=begin, until=after_end)]
steps = [(begins[i], begins[i+1]) for i in range(len(begins)-1)]
if steps is None:
steps = [(begin, end)]
return begin, end, steps
|
#vtb
def decode_value(stream):
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value
|
Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
|
### Input:
Decode the contents of a value from a serialized stream.
:param stream: Source data stream
:type stream: io.BytesIO
:returns: Decoded value
:rtype: bytes
### Response:
#vtb
def decode_value(stream):
length = decode_length(stream)
(value,) = unpack_value(">{:d}s".format(length), stream)
return value
|
#vtb
def posterior_covariance_between_points(self, X1, X2):
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
|
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
|
### Input:
Computes the posterior covariance between points.
:param X1: some input observations
:param X2: other input observations
### Response:
#vtb
def posterior_covariance_between_points(self, X1, X2):
return self.posterior.covariance_between_points(self.kern, self.X, X1, X2)
|
#vtb
def addView(self, viewType):
if not viewType:
return None
view = viewType.createInstance(self, self.viewWidget())
self.addTab(view, view.windowTitle())
return view
|
Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None
|
### Input:
Adds a new view of the inputed view type.
:param viewType | <subclass of XView>
:return <XView> || None
### Response:
#vtb
def addView(self, viewType):
if not viewType:
return None
view = viewType.createInstance(self, self.viewWidget())
self.addTab(view, view.windowTitle())
return view
|
#vtb
def dump_hash_prefix_values(self):
q =
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
|
Export all hash prefix values.
Returns a list of known hash prefix values
|
### Input:
Export all hash prefix values.
Returns a list of known hash prefix values
### Response:
#vtb
def dump_hash_prefix_values(self):
q =
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
|
#vtb
def is_domain(value, **kwargs):
try:
value = validators.domain(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True
|
Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:password@domain.dev``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
|
### Input:
Indicate whether ``value`` is a valid domain.
.. caution::
This validator does not verify that ``value`` **exists** as a domain. It
merely verifies that its contents *might* exist as a domain.
.. note::
This validator checks to validate that ``value`` resembles a valid
domain name. It is - generally - compliant with
`RFC 1035 <https://tools.ietf.org/html/rfc1035>`_ and
`RFC 6761 <https://tools.ietf.org/html/rfc6761>`_, however it diverges
in a number of key ways:
* Including authentication (e.g. ``username:password@domain.dev``) will
fail validation.
* Including a path (e.g. ``domain.dev/path/to/file``) will fail validation.
* Including a port (e.g. ``domain.dev:8080``) will fail validation.
If you are hoping to validate a more complete URL, we recommend that you
see :func:`url <validator_collection.validators.url>`.
:param value: The value to evaluate.
:param allow_ips: If ``True``, will succeed when validating IP addresses,
If ``False``, will fail if ``value`` is an IP address. Defaults to ``False``.
:type allow_ips: :class:`bool <python:bool>`
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
### Response:
#vtb
def is_domain(value, **kwargs):
try:
value = validators.domain(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True
|
#vtb
def pipe_to_process(self, payload):
message = payload[]
key = payload[]
if not self.process_handler.is_running(key):
return {: ,
: }
self.process_handler.send_to_process(message, key)
return {: ,
: }
|
Send something to stdin of a specific process.
|
### Input:
Send something to stdin of a specific process.
### Response:
#vtb
def pipe_to_process(self, payload):
message = payload[]
key = payload[]
if not self.process_handler.is_running(key):
return {: ,
: }
self.process_handler.send_to_process(message, key)
return {: ,
: }
|
#vtb
def search(self, q=None, has_geo=False, callback=None, errback=None):
if not self.data:
raise ZoneException()
return self._rest.search(self.zone, q, has_geo, callback, errback)
|
Search within a zone for specific metadata. Zone must already be loaded.
|
### Input:
Search within a zone for specific metadata. Zone must already be loaded.
### Response:
#vtb
def search(self, q=None, has_geo=False, callback=None, errback=None):
if not self.data:
raise ZoneException()
return self._rest.search(self.zone, q, has_geo, callback, errback)
|
#vtb
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
if not folium:
raise ImportError()
locations = list([(lat, lon) for lon, lat in edge[].coords])
if popup_attribute is None:
popup = None
else:
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl
|
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
|
### Input:
Turn a row from the gdf_edges GeoDataFrame into a folium PolyLine with
attributes.
Parameters
----------
edge : GeoSeries
a row from the gdf_edges GeoDataFrame
edge_color : string
color of the edge lines
edge_width : numeric
width of the edge lines
edge_opacity : numeric
opacity of the edge lines
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked, if None,
no popup
Returns
-------
pl : folium.PolyLine
### Response:
#vtb
def make_folium_polyline(edge, edge_color, edge_width, edge_opacity, popup_attribute=None):
if not folium:
raise ImportError()
locations = list([(lat, lon) for lon, lat in edge[].coords])
if popup_attribute is None:
popup = None
else:
popup_text = json.dumps(edge[popup_attribute])
popup = folium.Popup(html=popup_text)
pl = folium.PolyLine(locations=locations, popup=popup,
color=edge_color, weight=edge_width, opacity=edge_opacity)
return pl
|
#vtb
def print_vessel_errors(retdict):
ERROR_RESPONSES = {
"Node Manager error ": {
: "You lack sufficient permissions to perform this action.",
: "Did you release the resource(s) by accident?"},
: {
:},
"file not found": {
: "The specified file(s) could not be found.",
: "Please check the filename."},
"Node Manager error ": {
: "Requested platform is not supported by the target vessel."},
}
error_longnames = {}
for longname in retdict:
if not retdict[longname][0]:
matches = []
for error_string in ERROR_RESPONSES:
if error_string.lower() in retdict[longname][1].lower():
if not matches:
matches = [error_string]
else:
if len(error_string) > len(matches[0]):
matches = [error_string]
elif len(error_string) == len(matches[0]):
matches.append(error_string)
if errorid not in error_longnames:
error_longnames[errorid] = []
error_longnames[errorid].append(longname)
for errorid in error_longnames:
nodestring =
for node in error_longnames[errorid]:
if node == error_longnames[errorid][0]:
divider =
elif node != error_longnames[errorid][-1]:
divider =
else:
divider = " and "
if len(error_longnames[errorid]) > 2:
divider = + divider
nodestring += divider + node
if errorid in ERROR_RESPONSES:
print ERROR_RESPONSES[errorid][],
if in ERROR_RESPONSES[errorid]:
print ERROR_RESPONSES[errorid][]
else:
print
else:
print "An error occurred: " + errorid
print "Affected vessels:", nodestring +
|
<Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None
|
### Input:
<Purpose>
Prints out any errors that occurred while performing an action on vessels,
in a human readable way.
Errors will be printed out in the following format:
description [reason]
Affected vessels: nodelist
To define a new error, add the following entry to ERROR_RESPONSES in this
function:
'error_identifier': {
'error': 'description for the error',
'reason': 'reason for the error' (optional).
'error_identifier'
This is the substring of the error that can be used to identify it.
Longer identifiers will have a higher priority over shorter identifiers.
For example, authentication errors could be identified using the string
'Insufficient Permissions'.
'error'
This is where you put the description for the error to show to the user.
'reason' (optional)
This is where you put clarification for the error to ease the user.
Additionally, you may put things that they could do to fix the error here,
if applicable. If you don't want to show a reason, don't include this key
in the dictionary.
Examples when you would not put a reason is if you received a timeout,
since the user can't do anything to fix them.
<Arguments>
retdict:
A list of longnames mapped against a tuple (Success?, Message/Errortext).
<Side Effects>
Prints error messages onto the screen. See documentation for ERROR_RESPONSES
for more information.
<Exceptions>
Exception
<Return>
None
### Response:
#vtb
def print_vessel_errors(retdict):
ERROR_RESPONSES = {
"Node Manager error ": {
: "You lack sufficient permissions to perform this action.",
: "Did you release the resource(s) by accident?"},
: {
:},
"file not found": {
: "The specified file(s) could not be found.",
: "Please check the filename."},
"Node Manager error ": {
: "Requested platform is not supported by the target vessel."},
}
error_longnames = {}
for longname in retdict:
if not retdict[longname][0]:
matches = []
for error_string in ERROR_RESPONSES:
if error_string.lower() in retdict[longname][1].lower():
if not matches:
matches = [error_string]
else:
if len(error_string) > len(matches[0]):
matches = [error_string]
elif len(error_string) == len(matches[0]):
matches.append(error_string)
if errorid not in error_longnames:
error_longnames[errorid] = []
error_longnames[errorid].append(longname)
for errorid in error_longnames:
nodestring =
for node in error_longnames[errorid]:
if node == error_longnames[errorid][0]:
divider =
elif node != error_longnames[errorid][-1]:
divider =
else:
divider = " and "
if len(error_longnames[errorid]) > 2:
divider = + divider
nodestring += divider + node
if errorid in ERROR_RESPONSES:
print ERROR_RESPONSES[errorid][],
if in ERROR_RESPONSES[errorid]:
print ERROR_RESPONSES[errorid][]
else:
print
else:
print "An error occurred: " + errorid
print "Affected vessels:", nodestring +
|
#vtb
def _get_tau_vector(self, tau_mean, tau_std, imt_list):
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"]
self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"]
t_bar = {}
t_std = {}
for imt in imt_list:
t_bar[imt] = []
t_std[imt] = []
for mag, key in zip(self.magnitude_limits, self.tau_keys):
t_bar[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_mean))
t_std[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_std))
return t_bar, t_std
|
Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries
|
### Input:
Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries
### Response:
#vtb
def _get_tau_vector(self, tau_mean, tau_std, imt_list):
self.magnitude_limits = MAG_LIMS_KEYS[self.tau_model]["mag"]
self.tau_keys = MAG_LIMS_KEYS[self.tau_model]["keys"]
t_bar = {}
t_std = {}
for imt in imt_list:
t_bar[imt] = []
t_std[imt] = []
for mag, key in zip(self.magnitude_limits, self.tau_keys):
t_bar[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_mean))
t_std[imt].append(
TAU_EXECUTION[self.tau_model](imt, mag, tau_std))
return t_bar, t_std
|
#vtb
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
if not overwrite:
if Path(dst).exists():
raise EnvironmentError(" already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else:
f_output.write(converter(f_input.read(), **kwargs))
|
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
|
### Input:
A file stream transform IO utility function.
:param src: original file path
:param dst: destination file path
:param converter: binary content converter function
:param overwrite: default False,
:param stream: default True, if True, use stream IO mode, chunksize has to
be specified.
:param chunksize: default 1MB
### Response:
#vtb
def transform(src, dst, converter,
overwrite=False, stream=True, chunksize=1024**2, **kwargs):
if not overwrite:
if Path(dst).exists():
raise EnvironmentError(" already exists!" % dst)
with open(src, "rb") as f_input:
with open(dst, "wb") as f_output:
if stream:
if chunksize > 1024 ** 2 * 10:
chunksize = 1024 ** 2 * 10
elif chunksize < 1024 ** 2:
chunksize = 1024 ** 2
while 1:
content = f_input.read(chunksize)
if content:
f_output.write(converter(content, **kwargs))
else:
break
else:
f_output.write(converter(f_input.read(), **kwargs))
|
#vtb
def assign_descriptors(mol):
topology.recognize(mol)
descriptor.assign_valence(mol)
descriptor.assign_rotatable(mol)
topology.minify_ring(mol)
descriptor.assign_aromatic(mol)
|
Throws:
RuntimeError: if minify_ring failed
|
### Input:
Throws:
RuntimeError: if minify_ring failed
### Response:
#vtb
def assign_descriptors(mol):
topology.recognize(mol)
descriptor.assign_valence(mol)
descriptor.assign_rotatable(mol)
topology.minify_ring(mol)
descriptor.assign_aromatic(mol)
|
#vtb
def get_members(self, selector):
members = []
for member in self.get_member_list():
if selector.select(member):
members.append(member)
return members
|
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
|
### Input:
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
### Response:
#vtb
def get_members(self, selector):
members = []
for member in self.get_member_list():
if selector.select(member):
members.append(member)
return members
|
#vtb
def get_private_key_from_wif(wif: str) -> bytes:
if wif is None or wif is "":
raise Exception("none wif")
data = base58.b58decode(wif)
if len(data) != 38 or data[0] != 0x80 or data[33] != 0x01:
raise Exception("wif wrong")
checksum = Digest.hash256(data[0:34])
for i in range(4):
if data[len(data) - 4 + i] != checksum[i]:
raise Exception("wif wrong")
return data[1:33]
|
This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes.
|
### Input:
This interface is used to decode a WIF encode ECDSA private key.
:param wif: a WIF encode private key.
:return: a ECDSA private key in the form of bytes.
### Response:
#vtb
def get_private_key_from_wif(wif: str) -> bytes:
if wif is None or wif is "":
raise Exception("none wif")
data = base58.b58decode(wif)
if len(data) != 38 or data[0] != 0x80 or data[33] != 0x01:
raise Exception("wif wrong")
checksum = Digest.hash256(data[0:34])
for i in range(4):
if data[len(data) - 4 + i] != checksum[i]:
raise Exception("wif wrong")
return data[1:33]
|
#vtb
def filter_to_pass_and_reject(in_file, paired, out_dir=None):
from bcbio.heterogeneity import bubbletree
out_file = "%s-prfilter.vcf.gz" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
max_depth = bubbletree.max_normal_germline_depth(in_file, bubbletree.PARAMS, paired)
tx_out_plain = tx_out_file.replace(".vcf.gz", ".vcf")
with contextlib.closing(cyvcf2.VCF(in_file)) as reader:
reader = _add_db_to_header(reader)
with contextlib.closing(cyvcf2.Writer(tx_out_plain, reader)) as writer:
for rec in reader:
filters = rec.FILTER.split(";") if rec.FILTER else []
other_filters = [x for x in filters if x not in ["PASS", ".", "REJECT"]]
if len(other_filters) == 0 or bubbletree.is_info_germline(rec):
if "REJECT" in filters or bubbletree.is_info_germline(rec):
stats = bubbletree._is_possible_loh(rec, reader, bubbletree.PARAMS, paired,
use_status=True, max_normal_depth=max_depth)
if stats:
rec.FILTER = "PASS"
rec.INFO["DB"] = True
writer.write_record(rec)
else:
writer.write_record(rec)
vcfutils.bgzip_and_index(tx_out_plain, paired.tumor_data["config"])
return out_file
|
Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT.
|
### Input:
Filter VCF to only those with a strict PASS/REJECT: somatic + germline.
Removes low quality calls filtered but also labeled with REJECT.
### Response:
#vtb
def filter_to_pass_and_reject(in_file, paired, out_dir=None):
from bcbio.heterogeneity import bubbletree
out_file = "%s-prfilter.vcf.gz" % utils.splitext_plus(in_file)[0]
if out_dir:
out_file = os.path.join(out_dir, os.path.basename(out_file))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
max_depth = bubbletree.max_normal_germline_depth(in_file, bubbletree.PARAMS, paired)
tx_out_plain = tx_out_file.replace(".vcf.gz", ".vcf")
with contextlib.closing(cyvcf2.VCF(in_file)) as reader:
reader = _add_db_to_header(reader)
with contextlib.closing(cyvcf2.Writer(tx_out_plain, reader)) as writer:
for rec in reader:
filters = rec.FILTER.split(";") if rec.FILTER else []
other_filters = [x for x in filters if x not in ["PASS", ".", "REJECT"]]
if len(other_filters) == 0 or bubbletree.is_info_germline(rec):
if "REJECT" in filters or bubbletree.is_info_germline(rec):
stats = bubbletree._is_possible_loh(rec, reader, bubbletree.PARAMS, paired,
use_status=True, max_normal_depth=max_depth)
if stats:
rec.FILTER = "PASS"
rec.INFO["DB"] = True
writer.write_record(rec)
else:
writer.write_record(rec)
vcfutils.bgzip_and_index(tx_out_plain, paired.tumor_data["config"])
return out_file
|
#vtb
def sitetree_tree(parser, token):
tokens = token.split_contents()
use_template = detect_clause(parser, , tokens)
tokens_num = len(tokens)
if tokens_num in (3, 5):
tree_alias = parser.compile_filter(tokens[2])
return sitetree_treeNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
% tokens[0])
|
Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html"
|
### Input:
Parses sitetree tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_tree from "mytree" %}
Used to render tree for "mytree" site tree.
2. Four arguments:
{% sitetree_tree from "mytree" template "sitetree/mytree.html" %}
Used to render tree for "mytree" site tree using specific
template "sitetree/mytree.html"
### Response:
#vtb
def sitetree_tree(parser, token):
tokens = token.split_contents()
use_template = detect_clause(parser, , tokens)
tokens_num = len(tokens)
if tokens_num in (3, 5):
tree_alias = parser.compile_filter(tokens[2])
return sitetree_treeNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
% tokens[0])
|
#vtb
def compute_tls13_resumption_secret(self):
if self.connection_end == "server":
hkdf = self.prcs.hkdf
elif self.connection_end == "client":
hkdf = self.pwcs.hkdf
rs = hkdf.derive_secret(self.tls13_master_secret,
b"resumption master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["resumption_secret"] = rs
|
self.handshake_messages should be ClientHello...ClientFinished.
|
### Input:
self.handshake_messages should be ClientHello...ClientFinished.
### Response:
#vtb
def compute_tls13_resumption_secret(self):
if self.connection_end == "server":
hkdf = self.prcs.hkdf
elif self.connection_end == "client":
hkdf = self.pwcs.hkdf
rs = hkdf.derive_secret(self.tls13_master_secret,
b"resumption master secret",
b"".join(self.handshake_messages))
self.tls13_derived_secrets["resumption_secret"] = rs
|
#vtb
def init_app(self, app):
app.cli.add_command(upgrader_cmd)
app.extensions[] = self
|
Flask application initialization.
|
### Input:
Flask application initialization.
### Response:
#vtb
def init_app(self, app):
app.cli.add_command(upgrader_cmd)
app.extensions[] = self
|
#vtb
def express_route_connections(self):
api_version = self._get_api_version()
if api_version == :
from .v2018_08_01.operations import ExpressRouteConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
|
### Input:
Instance depends on the API version:
* 2018-08-01: :class:`ExpressRouteConnectionsOperations<azure.mgmt.network.v2018_08_01.operations.ExpressRouteConnectionsOperations>`
### Response:
#vtb
def express_route_connections(self):
api_version = self._get_api_version()
if api_version == :
from .v2018_08_01.operations import ExpressRouteConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
#vtb
def upcoming_viewings(self):
upcoming_viewings = []
try:
if self._data_from_search:
viewings = self._data_from_search.find_all(
, {: })
else:
viewings = []
except Exception as e:
if self._debug:
logging.error(
"Error getting upcoming_viewings. Error message: " + e.args[0])
return
for viewing in viewings:
upcoming_viewings.append(viewing.text.strip())
return upcoming_viewings
|
Returns an array of upcoming viewings for a property.
:return:
|
### Input:
Returns an array of upcoming viewings for a property.
:return:
### Response:
#vtb
def upcoming_viewings(self):
upcoming_viewings = []
try:
if self._data_from_search:
viewings = self._data_from_search.find_all(
, {: })
else:
viewings = []
except Exception as e:
if self._debug:
logging.error(
"Error getting upcoming_viewings. Error message: " + e.args[0])
return
for viewing in viewings:
upcoming_viewings.append(viewing.text.strip())
return upcoming_viewings
|
#vtb
def main(ylib: str = None, path: str = None,
scope: ValidationScope = ValidationScope.all,
ctype: ContentType = ContentType.config, set_id: bool = False,
tree: bool = False, no_types: bool = False,
digest: bool = False, validate: str = None) -> int:
if ylib is None:
parser = argparse.ArgumentParser(
prog="yangson",
description="Validate JSON data against a YANG data model.")
parser.add_argument(
"-V", "--version", action="version",
version=f"%(prog)s {pkg_resources.get_distribution().version}")
parser.add_argument(
"ylib", metavar="YLIB",
help=("name of the file with description of the data model"
" in JSON-encoded YANG library format [RFC 7895]"))
parser.add_argument(
"-p", "--path",
help=("colon-separated list of directories to search"
" for YANG modules"))
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"-i", "--id", action="store_true",
help="print module set id")
grp.add_argument(
"-t", "--tree", action="store_true",
help="print schema tree as ASCII art")
grp.add_argument(
"-d", "--digest", action="store_true",
help="print schema digest in JSON format")
grp.add_argument(
"-v", "--validate", metavar="INST",
help="name of the file with JSON-encoded instance data")
parser.add_argument(
"-s", "--scope", choices=["syntax", "semantics", "all"],
default="all", help="validation scope (default: %(default)s)")
parser.add_argument(
"-c", "--ctype", type=str, choices=["config", "nonconfig", "all"],
default="config",
help="content type of the data instance (default: %(default)s)")
parser.add_argument(
"-n", "--no-types", action="store_true",
help="suppress type info in tree output")
args = parser.parse_args()
ylib: str = args.ylib
path: Optional[str] = args.path
scope = ValidationScope[args.scope]
ctype = ContentType[args.ctype]
set_id: bool = args.id
tree: bool = args.tree
no_types = args.no_types
digest: bool = args.digest
validate: str = args.validate
try:
with open(ylib, encoding="utf-8") as infile:
yl = infile.read()
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("YANG library:", str(e), file=sys.stderr)
return 1
sp = path if path else os.environ.get("YANG_MODPATH", ".")
try:
dm = DataModel(yl, tuple(sp.split(":")))
except BadYangLibraryData as e:
print("Invalid YANG library:", str(e), file=sys.stderr)
return 2
except FeaturePrerequisiteError as e:
print("Unsupported pre-requisite feature:", str(e), file=sys.stderr)
return 2
except MultipleImplementedRevisions as e:
print("Multiple implemented revisions:", str(e), file=sys.stderr)
return 2
except ModuleNotFound as e:
print("Module not found:", str(e), file=sys.stderr)
return 2
except ModuleNotRegistered as e:
print("Module not registered:", str(e), file=sys.stderr)
return 2
if set_id:
print(dm.module_set_id())
return 0
if tree:
print(dm.ascii_tree(no_types))
return 0
if digest:
print(dm.schema_digest())
return 0
if not validate:
return 0
try:
with open(validate, encoding="utf-8") as infile:
itxt = json.load(infile)
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("Instance data:", str(e), file=sys.stderr)
return 1
try:
i = dm.from_raw(itxt)
except RawMemberError as e:
print("Illegal object member:", str(e), file=sys.stderr)
return 3
except RawTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
try:
i.validate(scope, ctype)
except SchemaError as e:
print("Schema error:", str(e), file=sys.stderr)
return 3
except SemanticError as e:
print("Semantic error:", str(e), file=sys.stderr)
return 3
except YangTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
return 0
|
Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other)
|
### Input:
Entry-point for a validation script.
Args:
ylib: Name of the file with YANG library
path: Colon-separated list of directories to search for YANG modules.
scope: Validation scope (syntax, semantics or all).
ctype: Content type of the data instance (config, nonconfig or all)
set_id: If `True`, print module set id.
tree: If `True`, print schema tree.
no_types: If `True`, don't print types in schema tree.
digest: If `True`, print schema digest.
validate: Name of file to validate against the schema.
Returns:
Numeric return code (0=no error, 2=YANG error, 1=other)
### Response:
#vtb
def main(ylib: str = None, path: str = None,
scope: ValidationScope = ValidationScope.all,
ctype: ContentType = ContentType.config, set_id: bool = False,
tree: bool = False, no_types: bool = False,
digest: bool = False, validate: str = None) -> int:
if ylib is None:
parser = argparse.ArgumentParser(
prog="yangson",
description="Validate JSON data against a YANG data model.")
parser.add_argument(
"-V", "--version", action="version",
version=f"%(prog)s {pkg_resources.get_distribution().version}")
parser.add_argument(
"ylib", metavar="YLIB",
help=("name of the file with description of the data model"
" in JSON-encoded YANG library format [RFC 7895]"))
parser.add_argument(
"-p", "--path",
help=("colon-separated list of directories to search"
" for YANG modules"))
grp = parser.add_mutually_exclusive_group()
grp.add_argument(
"-i", "--id", action="store_true",
help="print module set id")
grp.add_argument(
"-t", "--tree", action="store_true",
help="print schema tree as ASCII art")
grp.add_argument(
"-d", "--digest", action="store_true",
help="print schema digest in JSON format")
grp.add_argument(
"-v", "--validate", metavar="INST",
help="name of the file with JSON-encoded instance data")
parser.add_argument(
"-s", "--scope", choices=["syntax", "semantics", "all"],
default="all", help="validation scope (default: %(default)s)")
parser.add_argument(
"-c", "--ctype", type=str, choices=["config", "nonconfig", "all"],
default="config",
help="content type of the data instance (default: %(default)s)")
parser.add_argument(
"-n", "--no-types", action="store_true",
help="suppress type info in tree output")
args = parser.parse_args()
ylib: str = args.ylib
path: Optional[str] = args.path
scope = ValidationScope[args.scope]
ctype = ContentType[args.ctype]
set_id: bool = args.id
tree: bool = args.tree
no_types = args.no_types
digest: bool = args.digest
validate: str = args.validate
try:
with open(ylib, encoding="utf-8") as infile:
yl = infile.read()
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("YANG library:", str(e), file=sys.stderr)
return 1
sp = path if path else os.environ.get("YANG_MODPATH", ".")
try:
dm = DataModel(yl, tuple(sp.split(":")))
except BadYangLibraryData as e:
print("Invalid YANG library:", str(e), file=sys.stderr)
return 2
except FeaturePrerequisiteError as e:
print("Unsupported pre-requisite feature:", str(e), file=sys.stderr)
return 2
except MultipleImplementedRevisions as e:
print("Multiple implemented revisions:", str(e), file=sys.stderr)
return 2
except ModuleNotFound as e:
print("Module not found:", str(e), file=sys.stderr)
return 2
except ModuleNotRegistered as e:
print("Module not registered:", str(e), file=sys.stderr)
return 2
if set_id:
print(dm.module_set_id())
return 0
if tree:
print(dm.ascii_tree(no_types))
return 0
if digest:
print(dm.schema_digest())
return 0
if not validate:
return 0
try:
with open(validate, encoding="utf-8") as infile:
itxt = json.load(infile)
except (FileNotFoundError, PermissionError,
json.decoder.JSONDecodeError) as e:
print("Instance data:", str(e), file=sys.stderr)
return 1
try:
i = dm.from_raw(itxt)
except RawMemberError as e:
print("Illegal object member:", str(e), file=sys.stderr)
return 3
except RawTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
try:
i.validate(scope, ctype)
except SchemaError as e:
print("Schema error:", str(e), file=sys.stderr)
return 3
except SemanticError as e:
print("Semantic error:", str(e), file=sys.stderr)
return 3
except YangTypeError as e:
print("Invalid type:", str(e), file=sys.stderr)
return 3
return 0
|
#vtb
def queryProxy(self, query):
valid_proxies = []
query_scheme = query.url().scheme()
query_host = query.url().host()
query_scheme_host = .format(query_scheme, query_host)
proxy_servers = process_proxy_servers(self.proxy_servers)
if proxy_servers:
for key in proxy_servers:
proxy_settings = proxy_servers[key]
if key == and query_scheme == :
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
elif key == and query_scheme == :
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
if key == query_scheme_host:
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
else:
valid_proxies.append(QNetworkProxy(QNetworkProxy.DefaultProxy))
return valid_proxies
|
Override Qt method.
|
### Input:
Override Qt method.
### Response:
#vtb
def queryProxy(self, query):
valid_proxies = []
query_scheme = query.url().scheme()
query_host = query.url().host()
query_scheme_host = .format(query_scheme, query_host)
proxy_servers = process_proxy_servers(self.proxy_servers)
if proxy_servers:
for key in proxy_servers:
proxy_settings = proxy_servers[key]
if key == and query_scheme == :
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
elif key == and query_scheme == :
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
if key == query_scheme_host:
proxy = self._create_proxy(proxy_settings)
valid_proxies.append(proxy)
else:
valid_proxies.append(QNetworkProxy(QNetworkProxy.DefaultProxy))
return valid_proxies
|
#vtb
def get_channel(self, name):
return self._api_get(.format(
urllib.parse.quote_plus(name)
))
|
Details about an individual channel.
:param name: The channel name
:type name: str
|
### Input:
Details about an individual channel.
:param name: The channel name
:type name: str
### Response:
#vtb
def get_channel(self, name):
return self._api_get(.format(
urllib.parse.quote_plus(name)
))
|
#vtb
def generatorInit(self, U0):
j = 0 + 1j
generators = self.dyn_generators
Efd0 = zeros(len(generators))
Xgen0 = zeros((len(generators), 4))
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
x_tr = array([g.x_tr for g in generators])
omega0 = ones(len(typ1)) * 2 * pi * self.freq
Sg = array([g.p + j * g.q for g in generators])
Ia0 = conj(Sg[typ1]) / conj(U0) / self.base_mva
Eq_tr0 = U0[typ1] + j * x_tr * Ia0
delta0 = angle(Eq_tr0)
Eq_tr0 = abs(Eq_tr0)
Xgen0[typ1, :] = c_[delta0, omega0, Eq_tr0]
xd = array([g.xd for g in generators])
xq = array([g.xq for g in generators])
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
omega0 = ones(len(typ2)) * 2 * pi * self.freq
Ia0 = conj(Sg[typ2]) / conj(U0[typ2]) / self.base_mva
phi0 = angle(Ia0)
Eq0 = U0[typ2] + j * xq * Ia0
delta0 = angle(Eq0)
Id0 = -abs(Ia0) * sin(delta0 - phi0)
Iq0 = abs(Ia0) * cos(delta0 - phi0)
Efd0[typ2] = abs(Eq0) - (xd - xq) * Id0
Eq_tr0 = Efd0[typ2] + (xd - xd_tr) * Id0
Ed_tr0 = -(xq - xq_tr) * Iq0
Xgen0[typ2, :] = c_[delta0, omega0, Eq_tr0, Ed_tr0]
return Efd0, Xgen0
|
Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions.
|
### Input:
Based on GeneratorInit.m from MatDyn by Stijn Cole, developed at
Katholieke Universiteit Leuven. See U{http://www.esat.kuleuven.be/
electa/teaching/matdyn/} for more information.
@rtype: tuple
@return: Initial generator conditions.
### Response:
#vtb
def generatorInit(self, U0):
j = 0 + 1j
generators = self.dyn_generators
Efd0 = zeros(len(generators))
Xgen0 = zeros((len(generators), 4))
typ1 = [g._i for g in generators if g.model == CLASSICAL]
typ2 = [g._i for g in generators if g.model == FOURTH_ORDER]
x_tr = array([g.x_tr for g in generators])
omega0 = ones(len(typ1)) * 2 * pi * self.freq
Sg = array([g.p + j * g.q for g in generators])
Ia0 = conj(Sg[typ1]) / conj(U0) / self.base_mva
Eq_tr0 = U0[typ1] + j * x_tr * Ia0
delta0 = angle(Eq_tr0)
Eq_tr0 = abs(Eq_tr0)
Xgen0[typ1, :] = c_[delta0, omega0, Eq_tr0]
xd = array([g.xd for g in generators])
xq = array([g.xq for g in generators])
xd_tr = array([g.xd_tr for g in generators])
xq_tr = array([g.xq_tr for g in generators])
omega0 = ones(len(typ2)) * 2 * pi * self.freq
Ia0 = conj(Sg[typ2]) / conj(U0[typ2]) / self.base_mva
phi0 = angle(Ia0)
Eq0 = U0[typ2] + j * xq * Ia0
delta0 = angle(Eq0)
Id0 = -abs(Ia0) * sin(delta0 - phi0)
Iq0 = abs(Ia0) * cos(delta0 - phi0)
Efd0[typ2] = abs(Eq0) - (xd - xq) * Id0
Eq_tr0 = Efd0[typ2] + (xd - xd_tr) * Id0
Ed_tr0 = -(xq - xq_tr) * Iq0
Xgen0[typ2, :] = c_[delta0, omega0, Eq_tr0, Ed_tr0]
return Efd0, Xgen0
|
#vtb
def _aloadstr(ins):
output = _addr(ins.quad[2])
output.append()
output.append()
REQUIRES.add()
return output
|
Loads a string value from a memory address.
|
### Input:
Loads a string value from a memory address.
### Response:
#vtb
def _aloadstr(ins):
output = _addr(ins.quad[2])
output.append()
output.append()
REQUIRES.add()
return output
|
#vtb
def extract(self):
if not self.package_request.conflict:
new_slice, package_request = self.variant_slice.extract()
if package_request:
assert(new_slice is not self.variant_slice)
scope = copy.copy(self)
scope.variant_slice = new_slice
if self.pr:
self.pr("extracted %s from %s", package_request, self)
return (scope, package_request)
return (self, None)
|
Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned.
|
### Input:
Extract a common dependency.
Returns:
A (_PackageScope, Requirement) tuple, containing the new scope copy
with the extraction, and the extracted package range. If no package
was extracted, then (self,None) is returned.
### Response:
#vtb
def extract(self):
if not self.package_request.conflict:
new_slice, package_request = self.variant_slice.extract()
if package_request:
assert(new_slice is not self.variant_slice)
scope = copy.copy(self)
scope.variant_slice = new_slice
if self.pr:
self.pr("extracted %s from %s", package_request, self)
return (scope, package_request)
return (self, None)
|
#vtb
def dotted(self):
v = str(self.geoid.tract).zfill(6)
return v[0:4] + + v[4:]
|
Return just the tract number, excluding the state and county, in the dotted format
|
### Input:
Return just the tract number, excluding the state and county, in the dotted format
### Response:
#vtb
def dotted(self):
v = str(self.geoid.tract).zfill(6)
return v[0:4] + + v[4:]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.