Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
374,600
|
def visualize_explanation(explanation, label=None):
if not sys.version_info[:2] >= (3, 5):
raise IndicoError("Python >= 3.5+ is required for explanation visualization")
try:
from colr import Colr as C
except ImportError:
raise IndicoError("Package colr >= 0.8.1 is required for explanation visualization.")
cursor = 0
text = explanation[]
for token in explanation.get():
try:
class_confidence = token.get()[label]
except KeyError:
raise IndicoError("Invalid label: {}".format(label))
if class_confidence > 0.5:
fg_color = (255, 255, 255)
else:
fg_color = (0, 0, 0)
rg_value = 255 - int(class_confidence * 255)
token_end = token.get().get()
token_text = text[cursor:token_end]
cursor = token_end
sys.stdout.write(
str(C().b_rgb(
rg_value, rg_value, 255
).rgb(
fg_color[0], fg_color[1], fg_color[2], token_text
))
)
sys.stdout.write("\n")
sys.stdout.flush()
|
Given the output of the explain() endpoint, produces a terminal visual that plots response strength over a sequence
|
374,601
|
def _writetypesdoc(doc, thing, forceload=0):
try:
object, name = pydoc.resolve(thing, forceload)
name = os.path.join(doc, name + )
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
return
cdict = {}
fdict = {}
elements_dict = {}
types_dict = {}
for kname,klass in inspect.getmembers(thing, inspect.isclass):
if thing is not inspect.getmodule(klass):
continue
cdict[kname] = inspect.getmembers(klass, inspect.isclass)
for iname,iklass in cdict[kname]:
key = (kname,iname)
fdict[key] = _writedoc(doc, iklass)
if issubclass(iklass, ElementDeclaration):
try:
typecode = iklass()
except (AttributeError,RuntimeError), ex:
elements_dict[iname] = _writebrokedoc(doc, ex, iname)
continue
elements_dict[iname] = None
if typecode.pyclass is not None:
elements_dict[iname] = _writedoc(doc, typecode.pyclass)
continue
if issubclass(iklass, TypeDefinition):
try:
typecode = iklass(None)
except (AttributeError,RuntimeError), ex:
types_dict[iname] = _writebrokedoc(doc, ex, iname)
continue
types_dict[iname] = None
if typecode.pyclass is not None:
types_dict[iname] = _writedoc(doc, typecode.pyclass)
continue
def strongarm(self, object, name=None, mod=None, funcs={}, classes={}, *ignored):
realname = object.__name__
name = name or realname
bases = object.__bases__
object, name = pydoc.resolve(object, forceload)
contents = []
push = contents.append
if name == realname:
title = % (
name, realname)
else:
title = % (
name, name, realname)
mdict = {}
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + % pydoc.join(parents, )
doc = self.markup(pydoc.getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and % doc
for iname,iclass in cdict[name]:
fname = fdict[(name,iname)]
if elements_dict.has_key(iname):
push(\
%(fname,iname))
pyclass = elements_dict[iname]
if pyclass is not None:
push()
push(\
%elements_dict[iname])
push()
elif types_dict.has_key(iname):
push( %(fname,iname))
pyclass = types_dict[iname]
if pyclass is not None:
push()
push(\
%types_dict[iname])
push()
else:
push( %(fname,iname))
contents = .join(contents)
return self.section(title, , , contents, 3, doc)
doclass = pydoc.HTMLDoc.docclass
pydoc.HTMLDoc.docclass = strongarm
try:
page = pydoc.html.page(pydoc.describe(object), pydoc.html.document(object, name))
file = open(name, )
file.write(page)
file.close()
except (ImportError, pydoc.ErrorDuringImport), value:
log.debug(str(value))
pydoc.HTMLDoc.docclass = doclass
|
Write HTML documentation to a file in the current directory.
|
374,602
|
def PublishEvent(cls, event_name, msg, token=None):
cls.PublishMultipleEvents({event_name: [msg]}, token=token)
|
Publish the message into all listeners of the event.
We send the message to all event handlers which contain this
string in their EVENT static member. This allows the event to be
sent to multiple interested listeners.
Args:
event_name: An event name.
msg: The message to send to the event handler.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage.
|
374,603
|
def openflow_controller_controller_name(self, **kwargs):
config = ET.Element("config")
openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow")
controller_name = ET.SubElement(openflow_controller, "controller-name")
controller_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
374,604
|
def _set_default_vrf(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=default_vrf.default_vrf, is_container=, presence=False, yang_name="default-vrf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__default_vrf = t
if hasattr(self, ):
self._set()
|
Setter method for default_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv6/ipv6_unicast/default_vrf (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_vrf() directly.
|
374,605
|
def _empty_queue(self):
while True:
try:
self.queue.pop()
self.unused += 1
self.nqueue -= 1
except:
self.nqueue = 0
break
|
Dump all live point proposals currently on the queue.
|
374,606
|
def _set_ocsp_callback(self, helper, data):
self._ocsp_helper = helper
self._ocsp_callback = helper.callback
if data is None:
self._ocsp_data = _ffi.NULL
else:
self._ocsp_data = _ffi.new_handle(data)
rc = _lib.SSL_CTX_set_tlsext_status_cb(
self._context, self._ocsp_callback
)
_openssl_assert(rc == 1)
rc = _lib.SSL_CTX_set_tlsext_status_arg(self._context, self._ocsp_data)
_openssl_assert(rc == 1)
|
This internal helper does the common work for
``set_ocsp_server_callback`` and ``set_ocsp_client_callback``, which is
almost all of it.
|
374,607
|
def modified(self, base: pathlib.PurePath = pathlib.PurePath()) \
-> Iterator[str]:
if self.is_modified:
yield str(base / self.right.name)
|
Find the paths of modified files. There is no option to include
intermediate directories, as all files and directories exist in both
the left and right trees.
:param base: The base directory to recursively append to the right
entity.
:return: An iterable of paths of modified files.
|
374,608
|
def inverse(self):
if self.scalar == 0.0:
raise ZeroDivisionError(
)
return ScalingOperator(self.domain, 1.0 / self.scalar)
|
Return the inverse operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> vec = r3.element([1, 2, 3])
>>> op = ScalingOperator(r3, 2.0)
>>> inv = op.inverse
>>> inv(op(vec)) == vec
True
>>> op(inv(vec)) == vec
True
|
374,609
|
def as_tree(self, visitor=None, children=None):
_parameters = {"node": self}
if visitor is not None:
_parameters["visitor"] = visitor
if children is not None:
_parameters["children"] = children
return self.__class__.objects.node_as_tree(**_parameters)
|
Recursively traverses each tree (starting from each root) in order
to generate a dictionary-based tree structure of the entire forest.
Each level of the forest/tree is a list of nodes, and each node
consists of a dictionary representation, where the entry
``children`` (by default) consists of a list of dictionary
representations of its children.
See :meth:`CTENodeManager.as_tree` and
:meth:`CTENodeManager.node_as_tree` for details on how this method
works, as well as its expected arguments.
:param visitor: optional function responsible for generating the
dictionary representation of a node.
:param children: optional function responsible for generating a
children key and list for a node.
:return: a dictionary representation of the structure of the forest.
|
374,610
|
def default(self, vid):
command = % vid
return self.configure(command) if isvlan(vid) else False
|
Defaults the VLAN configuration
.. code-block:: none
default vlan <vlanid>
Args:
vid (str): The VLAN ID to default
Returns:
True if the operation was successful otherwise False
|
374,611
|
def put(self, transfer_id, amount, created_timestamp, receipt):
return self.connection.put(,
data=dict(transfer_id=transfer_id,
amount=amount,
created_timestamp=created_timestamp,
receipt=receipt))
|
:param transfer_id: int of the account_id to deposit the money to
:param amount: float of the amount to transfer
:param created_timestamp: str of the validated receipt that money has been received
:param receipt: str of the receipt
:return: Transfer dict
|
374,612
|
def _sort_results(self, results):
parents = []
groups = []
for result in results:
if not self._in_list(parents, result.parent):
parents.append(result.parent)
groups.append([])
groups[len(groups) - 1].append(result)
else:
groups[parents.index(result.parent)].append(result)
array = []
for group in groups:
array += sorted(
group,
key=lambda element: element.parent.contents.index(element)
)
return array
|
Order the results.
:param results: The disordened results.
:type results: array.bs4.element.Tag
:return: The ordened results.
:rtype: array.bs4.element.Tag
|
374,613
|
def attach_stream(self, stream):
curr_stream, count, prev = self._allocated_streams[stream]
if count == (self.model.get(u) - 1):
new_stream = self.allocate_stream(curr_stream.stream_type, previous=curr_stream)
copy_desc = u"({} always) => {} using copy_all_a".format(curr_stream, new_stream)
self.sensor_graph.add_node(copy_desc)
self._allocated_streams[stream] = (new_stream, 1, curr_stream)
if curr_stream.stream_type == DataStream.ConstantType and curr_stream in self.sensor_graph.constant_database:
self.sensor_graph.add_constant(new_stream, self.sensor_graph.constant_database[curr_stream])
return new_stream
self._allocated_streams[stream] = (curr_stream, count + 1, prev)
return curr_stream
|
Notify that we would like to attach a node input to this stream.
The return value from this function is the DataStream that should be attached
to since this function may internally allocate a new SGNode that copies the
stream if there is no space in the output list to hold another input.
This function should be called once for every node input before allocated a new
sensor graph node that attaches to a stream that is managed by the StreamAllocator.
Args:
stream (DataStream): The stream (originally returned from allocate_stream)
that we want to attach to.
Returns:
Datastream: A data stream, possible the same as stream, that should be attached
to a node input.
|
374,614
|
def wait_for_simulation_stop(self, timeout=None):
start = datetime.now()
while self.get_is_sim_running():
sleep(0.5)
if timeout is not None:
if (datetime.now() - start).seconds >= timeout:
ret = None
break
else:
ret = self.simulation_info()
return ret
|
Block until the simulation is done or timeout seconds exceeded.
If the simulation stops before timeout, siminfo is returned.
|
374,615
|
def read_config_files(self, files):
errors = {}
for _file in files:
config, valid = self.read_config_file(_file)
self.update(config)
if valid is not True:
errors[_file] = valid
return errors or True
|
Read a list of config files.
:param iterable files: An iterable (e.g. list) of files to read.
|
374,616
|
def get_active_lines(lines, comment_char="
return list(filter(None, (line.split(comment_char, 1)[0].strip() for line in lines)))
|
Returns lines, or parts of lines, from content that are not commented out
or completely empty. The resulting lines are all individually stripped.
This is useful for parsing many config files such as ifcfg.
Parameters:
lines (list): List of strings to parse.
comment_char (str): String indicating that all chars following
are part of a comment and will be removed from the output.
Returns:
list: List of valid lines remaining in the input.
Examples:
>>> lines = [
... 'First line',
... ' ',
... '# Comment line',
... 'Inline comment # comment',
... ' Whitespace ',
... 'Last line']
>>> get_active_lines(lines)
['First line', 'Inline comment', 'Whitespace', 'Last line']
|
374,617
|
def _convert_coordinatelist(input_obj):
cdl = pgmagick.CoordinateList()
for obj in input_obj:
cdl.append(pgmagick.Coordinate(obj[0], obj[1]))
return cdl
|
convert from 'list' or 'tuple' object to pgmagick.CoordinateList.
:type input_obj: list or tuple
|
374,618
|
def load_module(self, module):
m_ref = self._modules_map.get(module)
if m_ref is None:
raise LoaderError(.format(module))
mod = importlib.import_module(.format(
.join([elm.split()[0] for elm in m_ref.split(os.path.sep)])))
return mod
|
Introspect Ansible module.
:param module:
:return:
|
374,619
|
def name2rgb(name):
try:
import colour
except ImportError:
raise ImportError()
c = colour.Color(name)
color = int(c.red * 255), int(c.green * 255), int(c.blue * 255)
return color
|
Convert the name of a color into its RGB value
|
374,620
|
def _mk_connectivity_flats(self, i12, j1, j2, mat_data, flats, elev, mag):
nn, mm = flats.shape
NN = np.prod(flats.shape)
assigned, n_flats = spndi.label(flats, FLATS_KERNEL3)
flat_ids, flat_coords, flat_labelsf = _get_flat_ids(assigned)
flat_j = [None] * n_flats
flat_prop = [None] * n_flats
flat_i = [None] * n_flats
edges = np.zeros_like(flats)
warn_flats = []
for ii in xrange(n_flats):
ids_flats = flat_ids[flat_coords[ii]:flat_coords[ii+1]]
edges[:] = 0
j = ids_flats % mm
i = ids_flats // mm
for iii in [-1, 0, 1]:
for jjj in [-1, 0, 1]:
i_2 = i + iii
j_2 = j + jjj
ids_tmp = (i_2 >= 0) & (j_2 >= 0) & (i_2 < nn) & (j_2 < mm)
edges[i_2[ids_tmp], j_2[ids_tmp]] += \
FLATS_KERNEL3[iii+1, jjj+1]
edges.ravel()[ids_flats] = 0
ids_edge = np.argwhere(edges.ravel()).squeeze()
flat_elev_loc = elev.ravel()[ids_flats]
flat_elev = flat_elev_loc.min()
loc_elev = elev.ravel()[ids_edge]
I_filt = loc_elev < flat_elev
try:
loc_elev = loc_elev[I_filt]
loc_slope = mag.ravel()[ids_edge][I_filt]
except:
loc_elev = np.array([])
loc_slope = np.array([])
loc_dx = self.dX.mean()
n = len(loc_slope)
if n == 0:
ids_flat_on_edge = ((ids_flats % mag.shape[1]) == 0) | \
((ids_flats % mag.shape[1]) == (mag.shape[1] - 1)) | \
(ids_flats <= mag.shape[1]) | \
(ids_flats >= (mag.shape[1] * (mag.shape[0] - 1)))
if ids_flat_on_edge.sum() == 0:
warn_flats.append(ii)
continue
drain_ids = ids_flats[ids_flat_on_edge]
loc_proportions = mag.ravel()[ids_flats[ids_flat_on_edge]]
loc_proportions /= loc_proportions.sum()
ids_flats = ids_flats[~ids_flat_on_edge]
if len(ids_flats) == 0:
continue
flat_elev_loc = flat_elev_loc[~ids_flat_on_edge]
else:
min_edges = np.zeros(loc_slope.shape, bool)
min_edges[np.argmin(loc_slope)] = True
min_edges = (loc_slope + loc_slope * loc_dx / 2) \
>= loc_slope[min_edges]
drain_ids = ids_edge[I_filt][min_edges]
loc_proportions = loc_slope[min_edges]
loc_proportions /= loc_proportions.sum()
one_id = np.zeros(ids_flats.size, bool)
one_id[np.argmin(flat_elev_loc)] = True
j1.ravel()[ids_flats[~one_id]] = ids_flats[one_id]
mat_data.ravel()[ids_flats[~one_id]] = 1
j2.ravel()[ids_flats[~one_id]] = -1
mat_data.ravel()[ids_flats[~one_id] + NN] = 0
j1.ravel()[ids_flats[one_id]] = drain_ids[0]
mat_data.ravel()[ids_flats[one_id]] = loc_proportions[0]
if len(drain_ids) > 1:
j2.ravel()[ids_flats[one_id]] = drain_ids[1]
mat_data.ravel()[ids_flats[one_id] + NN] = loc_proportions[1]
if len(loc_proportions > 2):
flat_j[ii] = drain_ids[2:]
flat_prop[ii] = loc_proportions[2:]
flat_i[ii] = np.ones(drain_ids[2:].size, ) * ids_flats[one_id]
try:
flat_j = np.concatenate([fj for fj in flat_j if fj is not None])
flat_prop = \
np.concatenate([fp for fp in flat_prop if fp is not None])
flat_i = np.concatenate([fi for fi in flat_i if fi is not None])
except:
flat_j = np.array([], )
flat_prop = np.array([], )
flat_i = np.array([], )
if len(warn_flats) > 0:
warnings.warn("Warning %d flats had no place" % len(warn_flats) +
" to drain to --> these are pits (check pit-remove"
"algorithm).")
return j1, j2, mat_data, flat_i, flat_j, flat_prop
|
Helper function for _mk_adjacency_matrix. This calcualtes the
connectivity for flat regions. Every pixel in the flat will drain
to a random pixel in the flat. This accumulates all the area in the
flat region to a single pixel. All that area is then drained from
that pixel to the surroundings on the flat. If the border of the
flat has a single pixel with a much lower elevation, all the area will
go towards that pixel. If the border has pixels with similar elevation,
then the area will be distributed amongst all the border pixels
proportional to their elevation.
|
374,621
|
def find_credentials():
return DETAILS[], password
|
Cycle through all the possible credentials and return the first one that
works.
|
374,622
|
def get_config_id(kwargs=None, call=None):
if call == :
raise SaltCloudException(
)
if kwargs is None:
kwargs = {}
name = kwargs.get(, None)
linode_id = kwargs.get(, None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
name\linode_id\
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
response = _query(, , args={: linode_id})[]
config_id = {: response[0][]}
return config_id
|
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.h
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
|
374,623
|
def get_link(self, task_id):
links = [x for x in self.links if x.task_id == task_id]
if len(links) != 1:
raise CoTError("No single Link matches task_id {}!\n{}".format(task_id, self.dependent_task_ids()))
return links[0]
|
Get a ``LinkOfTrust`` by task id.
Args:
task_id (str): the task id to find.
Returns:
LinkOfTrust: the link matching the task id.
Raises:
CoTError: if no ``LinkOfTrust`` matches.
|
374,624
|
def catch_config_error(method, app, *args, **kwargs):
try:
return method(app, *args, **kwargs)
except (TraitError, ArgumentError) as e:
app.print_description()
app.print_help()
app.print_examples()
app.log.fatal("Bad config encountered during initialization:")
app.log.fatal(str(e))
app.log.debug("Config at the time: %s", app.config)
app.exit(1)
|
Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
On a TraitError (generally caused by bad config), this will print the trait's
message, and exit the app.
For use on init methods, to prevent invoking excepthook on invalid input.
|
374,625
|
def remove_all_observers(self):
for weak_observer in self._weak_observers:
observer = weak_observer()
if observer:
self.remove_observer(observer)
|
Removes all registered observers.
|
374,626
|
def compose(self, other, qargs=None, front=False):
if qargs is not None:
return Stinespring(
SuperOp(self).compose(other, qargs=qargs, front=front))
if not isinstance(other, Kraus):
other = Kraus(other)
if front and self._input_dim != other._output_dim:
raise QiskitError(
)
if not front and self._output_dim != other._input_dim:
raise QiskitError(
)
return Stinespring(Kraus(self).compose(other, front=front))
|
Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel subclass.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Stinespring: The composition channel as a Stinespring object.
Raises:
QiskitError: if other cannot be converted to a channel or
has incompatible dimensions.
|
374,627
|
def generate_nodeinfo2_document(**kwargs):
return {
"version": "1.0",
"server": {
"baseUrl": kwargs[][],
"name": kwargs[][],
"software": kwargs[][],
"version": kwargs[][],
},
"organization": {
"name": kwargs.get(, {}).get(, None),
"contact": kwargs.get(, {}).get(, None),
"account": kwargs.get(, {}).get(, None),
},
"protocols": kwargs.get(, ["diaspora"]),
"relay": kwargs.get(, ),
"services": {
"inbound": kwargs.get(, {}).get(, []),
"outbound": kwargs.get(, {}).get(, []),
},
"openRegistrations": kwargs[],
"usage": {
"users": {
"total": kwargs.get(, {}).get(, {}).get(),
"activeHalfyear": kwargs.get(, {}).get(, {}).get(),
"activeMonth": kwargs.get(, {}).get(, {}).get(),
"activeWeek": kwargs.get(, {}).get(, {}).get(),
},
"localPosts": kwargs.get(, {}).get(),
"localComments": kwargs.get(, {}).get(),
}
}
|
Generate a NodeInfo2 document.
Pass in a dictionary as per NodeInfo2 1.0 schema:
https://github.com/jaywink/nodeinfo2/blob/master/schemas/1.0/schema.json
Minimum required schema:
{server:
baseUrl
name
software
version
}
openRegistrations
Protocols default will match what this library supports, ie "diaspora" currently.
:return: dict
:raises: KeyError on missing required items
|
374,628
|
def configure_stream_logger(logger=, level=None, formatter=):
level = level or logging.WARNING
if isinstance(level, str):
level = getattr(logging, level, None)
if level is None:
raise ValueError( + level)
root_logger = logging.getLogger()
for handler in root_logger.handlers:
root_logger.removeHandler(handler)
logging.getLogger(logger).setLevel(logging.DEBUG)
console_log_handler = logging.StreamHandler()
console_log_handler.setLevel(level)
if isinstance(formatter, str):
formatter = logging.Formatter(formatter)
elif not isinstance(formatter, logging.Formatter):
raise TypeError()
console_log_handler.setFormatter(formatter)
logging.getLogger(logger).addHandler(console_log_handler)
logging.captureWarnings(True)
return console_log_handler
|
Configure the default stream handler for logging messages to the console,
remove other logging handlers, and enable capturing warnings.
.. versionadded:: 1.3.0
:param str logger: The logger to add the stream handler for.
:param level: The level to set the logger to, will default to WARNING if no level is specified.
:type level: None, int, str
:param formatter: The format to use for logging messages to the console.
:type formatter: str, :py:class:`logging.Formatter`
:return: The new configured stream handler.
:rtype: :py:class:`logging.StreamHandler`
|
374,629
|
def parse_request() -> Dict[str, str]:
in_lines = sys.stdin.readlines()
LOGGER.debug(, in_lines)
request = {}
for line in in_lines:
if not line.strip():
continue
parts = line.split(, 1)
assert len(parts) == 2
request[parts[0].strip()] = parts[1].strip()
return request
|
Parse the request of the git credential API from stdin.
Returns:
A dictionary with all key-value pairs of the request
|
374,630
|
def verify_leaf_inclusion(self, leaf: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
leaf_hash = self.hasher.hash_leaf(leaf)
return self.verify_leaf_hash_inclusion(leaf_hash, leaf_index, proof,
sth)
|
Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf: The leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
|
374,631
|
def dumps(self):
ret_str = []
for item in self._arg_list:
if isinstance(item, TikZUserPath):
ret_str.append(item.dumps())
elif isinstance(item, TikZCoordinate):
ret_str.append(item.dumps())
elif isinstance(item, str):
ret_str.append(item)
return .join(ret_str)
|
Return representation of the path command.
|
374,632
|
def transformer_tall_finetune_uniencdec():
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 80000
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
hparams.unidirectional_encoder = True
return hparams
|
Fine-tune CNN/DM with a unidirectional encoder and decoder.
|
374,633
|
def has_sources(self, extension=None):
source_paths = self._sources_field.source_paths
if not source_paths:
return False
if not extension:
return True
return any(source.endswith(extension) for source in source_paths)
|
Return `True` if this target owns sources; optionally of the given `extension`.
:API: public
:param string extension: Optional suffix of filenames to test for.
:return: `True` if the target contains sources that match the optional extension suffix.
:rtype: bool
|
374,634
|
def get_probmodel_data(model):
if not isinstance(model, BayesianModel):
raise TypeError("Model must an instance of BayesianModel.")
model_data = {: {: , : {}}}
variables = model.nodes()
for var in variables:
model_data[][][var] = model.node[var]
model_data[][] = {}
edges = model.edges()
for edge in edges:
model_data[][][str(edge)] = model.adj[edge[0]][edge[1]]
model_data[][] = []
cpds = model.get_cpds()
for cpd in cpds:
potential_dict = {}
potential_dict[] = {}
evidence = cpd.variables[:0:-1]
if evidence:
potential_dict[][cpd.variable] = evidence
else:
potential_dict[][cpd.variable] = []
potential_dict[] = "Table"
potential_dict[] = "conditionalProbability"
potential_dict[] = " ".join([str(val) for val in cpd.values.ravel().astype(float)]) + " "
model_data[][].append(potential_dict)
return model_data
|
Returns the model_data based on the given model.
Parameters
----------
model: BayesianModel instance
Model to write
Return
------
model_data: dict
dictionary containing model data of the given model.
Examples
--------
>>> model_data = pgmpy.readwrite.get_model_data(model)
>>> writer.get_model_data(model)
|
374,635
|
def _merge_tops_merge_all(self, tops):
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, six.string_types):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in six.itervalues(tops):
for ctop in ctops:
for saltenv, targets in six.iteritems(ctop):
if saltenv == :
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError()
return top
|
Merge the top files into a single dictionary
|
374,636
|
def auto_discover_board(self, verbose):
start_time = time.time()
while len(self.analog_mapping_query_results) == 0:
if time.time() - start_time > 30:
return False
self.send_sysex(self.ANALOG_MAPPING_QUERY)
time.sleep(.1)
if verbose:
print("Board initialized in %d seconds" % (time.time() - start_time))
for pin in self.analog_mapping_query_results:
self.total_pins_discovered += 1
if pin != self.pymata.IGNORE:
self.number_of_analog_pins_discovered += 1
if verbose:
print( % self.total_pins_discovered)
print( % self.number_of_analog_pins_discovered)
for pin in range(0, self.total_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.digital_response_table.append(response_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
response_entry = [self.pymata.INPUT, 0, None]
self.analog_response_table.append(response_entry)
for pin in range(0, self.total_pins_discovered):
digital_latch_table_entry = [0, 0, 0, 0, None]
self.digital_latch_table.append(digital_latch_table_entry)
for pin in range(0, self.number_of_analog_pins_discovered):
analog_latch_table_entry = [0, 0, 0, 0, 0, None]
self.analog_latch_table.append(analog_latch_table_entry)
return True
|
This method will allow up to 30 seconds for discovery (communicating with) an Arduino board
and then will determine a pin configuration table for the board.
:return: True if board is successfully discovered or False upon timeout
|
374,637
|
def _cleanup_temp_dir(self, base_dir):
if self._should_cleanup_temp_dir:
logging.debug(, base_dir)
if self._user is None:
util.rmtree(base_dir, onerror=util.log_rmtree_error)
else:
rm = subprocess.Popen(self._build_cmdline([, , , base_dir]),
stderr=subprocess.PIPE)
rm_output = rm.stderr.read().decode()
rm.stderr.close()
if rm.wait() != 0 or rm_output:
logging.warning("Failed to clean up temp directory %s: %s.",
base_dir, rm_output)
else:
logging.info("Skipping cleanup of temporary directory %s.", base_dir)
|
Delete given temporary directory and all its contents.
|
374,638
|
def available_phone_numbers(self):
if self._available_phone_numbers is None:
self._available_phone_numbers = AvailablePhoneNumberCountryList(
self._version,
account_sid=self._solution[],
)
return self._available_phone_numbers
|
Access the available_phone_numbers
:returns: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList
:rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryList
|
374,639
|
def load_stock_quantity(self, symbol: str) -> Decimal(0):
book = self.get_gc_book()
collection = SecuritiesAggregate(book)
sec = collection.get_aggregate_for_symbol(symbol)
quantity = sec.get_quantity()
return quantity
|
retrieves stock quantity
|
374,640
|
def _update_alpha(self, event=None):
a = self.alpha.get()
hexa = self.hexa.get()
hexa = hexa[:7] + ("%2.2x" % a).upper()
self.hexa.delete(0, )
self.hexa.insert(0, hexa)
self.alphabar.set(a)
self._update_preview()
|
Update display after a change in the alpha spinbox.
|
374,641
|
def _enable_read_access(self):
if not self.algo_inited_for_read:
self.flash.init(self.flash.Operation.VERIFY)
self.algo_inited_for_read = True
|
! @brief Ensure flash is accessible by initing the algo for verify.
Not all flash memories are always accessible. For instance, external QSPI. Initing the
flash algo for the VERIFY operation is the canonical way to ensure that the flash is
memory mapped and accessible.
|
374,642
|
async def process_check_ins(self):
params = {
: 1,
: 1 if AUTO_GET_MATCHES else 0
}
res = await self.connection(, .format(self._id), **params)
self._refresh_from_json(res)
|
finalize the check in phase
|methcoro|
Warning:
|unstable|
Note:
|from_api| This should be invoked after a tournament's check-in window closes before the tournament is started.
1. Marks participants who have not checked in as inactive.
2. Moves inactive participants to bottom seeds (ordered by original seed).
3. Transitions the tournament state from 'checking_in' to 'checked_in'
NOTE: Checked in participants on the waiting list will be promoted if slots become available.
Raises:
APIException
|
374,643
|
def generate_row_keys(self):
keys = self.key
columns = self.values
if not self._data:
self._data = self.get_data()
for column in columns:
key_prefix = self.cache_key_prefix() + "
self._data[] = self._data[keys].apply(lambda xdf: key_prefix + "=" + .join(xdf.astype(str).values),
axis=1)
return list(self._data[].values)
|
Method for generating key features at serving time or prediction time
:param data: Pass in the data that is necessary for generating the keys
Example :
Feature : User warehouse searches and conversions
Keys will be of the form 'user_id#warehouse_id#searches=23811676#3'
Keys will be of the form 'user_id#warehouse_id#conversions=23811676#3'
data Frame should have values for all the columns as feature_key in this case ['user_id','warehouse_id']
:return:
|
374,644
|
def launch_external_file(filename: str, raise_if_fails: bool = False) -> None:
log.info("Launching external file: {!r}", filename)
try:
if sys.platform.startswith():
cmdargs = ["xdg-open", filename]
subprocess.call(cmdargs)
else:
os.startfile(filename)
except Exception as e:
log.critical("Error launching {!r}: error was {}.\n\n{}",
filename, str(e), traceback.format_exc())
if raise_if_fails:
raise
|
Launches a file using the operating system's standard launcher.
Args:
filename: file to launch
raise_if_fails: raise any exceptions from
``subprocess.call(["xdg-open", filename])`` (Linux)
or ``os.startfile(filename)`` (otherwise)? If not, exceptions
are suppressed.
|
374,645
|
def add_postprocessor(postproc):
def decorator(func):
func = ScriptAdaptor._wrap(func)
func._add_postprocessor(postproc)
return func
return decorator
|
Define a postprocessor to run after the function is executed, when
running in console script mode.
:param postproc: The callable, which will be passed the Namespace
object generated by argparse and the return
result of the function. The return result of the
callable will be used as the final return result
(or as the result fed into the next
postprocessor).
|
374,646
|
def matchiter(r, s, flags=0):
if isinstance(r, basestring):
r = re.compile(r, flags)
i = 0
while s:
m = r.match(s)
g = m and m.group(0)
if not m or not g:
raise ValueError("{}: {!r}".format(i, s[:50]))
i += len(g)
s = s[len(g):]
yield m
|
Yields contiguous MatchObjects of r in s.
Raises ValueError if r eventually doesn't match contiguously.
|
374,647
|
def _parse_connection_string(connstr):
res = {}
for item in connstr.split():
item = item.strip()
if not item:
continue
key, value = item.split(, 1)
key = key.strip().lower().replace(, )
value = value.strip()
res[key] = value
return res
|
MSSQL style connection string parser
Returns normalized dictionary of connection string parameters
|
374,648
|
def _check_dhcp_server(self, vboxnet):
properties = yield from self._execute("list", ["dhcpservers"])
flag_dhcp_server_found = False
for prop in properties.splitlines():
try:
name, value = prop.split(, 1)
except ValueError:
continue
if name.strip() == "NetworkName" and value.strip().endswith(vboxnet):
flag_dhcp_server_found = True
if flag_dhcp_server_found and name.strip() == "Enabled":
if value.strip() == "Yes":
return True
return False
|
Check if the DHCP server associated with a vboxnet is enabled.
:param vboxnet: vboxnet name
:returns: boolean
|
374,649
|
def _posix_split_name(self, name):
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
|
Split a name longer than 100 chars into a prefix
and a name part.
|
374,650
|
def _track_stack_pointers(self):
regs = {self.project.arch.sp_offset}
if hasattr(self.project.arch, ) and self.project.arch.bp_offset is not None:
regs.add(self.project.arch.bp_offset)
spt = self.project.analyses.StackPointerTracker(self.function, regs, track_memory=self._sp_tracker_track_memory)
if spt.inconsistent_for(self.project.arch.sp_offset):
l.warning("Inconsistency found during stack pointer tracking. Decompilation results might be incorrect.")
return spt
|
For each instruction, track its stack pointer offset and stack base pointer offset.
:return: None
|
374,651
|
def results(self):
if self.deriv == 0:
return self.v,
if self.deriv == 1:
return self.v, self.d
if self.deriv == 2:
return self.v, self.d, self.dd
|
Return the value and optionally derivative and second order derivative
|
374,652
|
def _compile_pattern(pat, ignore_case=True):
if isinstance(pat, bytes):
pat_str = pat.decode()
res_str = _translate_glob(pat_str)
res = res_str.encode()
else:
res = _translate_glob(pat)
flags = re.IGNORECASE if ignore_case else 0
return re.compile(res, flags=flags).match
|
Translate and compile a glob pattern to a regular expression matcher.
|
374,653
|
def environ_setting(name, default=None, required=True):
if name not in os.environ and default is None:
message = "The {0} ENVVAR is not set.".format(name)
if required:
raise ImproperlyConfigured(message)
else:
warnings.warn(ConfigurationMissing(message))
return os.environ.get(name, default)
|
Fetch setting from the environment. The bahavior of the setting if it
is not in environment is as follows:
1. If it is required and the default is None, raise Exception
2. If it is requried and a default exists, return default
3. If it is not required and default is None, return None
4. If it is not required and default exists, return default
|
374,654
|
def from_class(cls, target_class):
module_name = target_class.__module__
class_name = target_class.__name__
return cls(module_name, "__init__", class_name)
|
Create a FunctionDescriptor from a class.
Args:
cls: Current class which is required argument for classmethod.
target_class: the python class used to create the function
descriptor.
Returns:
The FunctionDescriptor instance created according to the class.
|
374,655
|
def get_edges_with_citations(self, citations: Iterable[Citation]) -> List[Edge]:
return self.session.query(Edge).join(Evidence).filter(Evidence.citation.in_(citations)).all()
|
Get edges with one of the given citations.
|
374,656
|
def Times(self, val):
return Point(self.x * val, self.y * val, self.z * val)
|
Returns a new point which is pointwise multiplied by val.
|
374,657
|
def stack_push(self, key, value):
task = Task.current_task()
try:
context = task._context_stack
except AttributeError:
task._context_stack = context = {}
if key not in context:
context[key] = []
context[key].append(value)
|
Set a value in a task context stack
|
374,658
|
def wsgi(self, environ, start_response):
request = Request(environ)
ctx = Context(request)
try:
try:
response = self(request, ctx)
ctx._run_callbacks(, (request, response))
response = response.conditional_to(request)
except HTTPException as e:
response = e.response
except Exception:
self.handle_error(request, ctx)
response = InternalServerError().response
response.add_callback(lambda: ctx._run_callbacks())
return response(environ, start_response)
finally:
ctx._run_callbacks(, log_errors=True)
|
Implements the mapper's WSGI interface.
|
374,659
|
def contributors(self, sr, limit=None):
userlist = self._limit_get(, sr, , , limit=limit)
return _process_userlist(userlist)
|
Login required. GETs list of contributors to subreddit ``sr``. Returns :class:`things.ListBlob` object.
**NOTE**: The :class:`things.Account` objects in the returned ListBlob *only* have ``id`` and ``name`` set. This is because that's all reddit returns. If you need full info on each contributor, you must individually GET them using :meth:`user` or :meth:`things.Account.about`.
URL: ``http://www.reddit.com/r/<sr>/about/contributors/``
:param sr: name of subreddit
|
374,660
|
def STRH(self, params):
Ra, Rb, Rc = self.get_three_parameters(self.THREE_PARAMETER_WITH_BRACKETS, params)
if self.is_immediate(Rc):
self.check_arguments(low_registers=(Ra, Rb), imm5=(Rc,))
def STRH_func():
for i in range(2):
self.memory[self.register[Rb] + self.convert_to_integer(Rc[1:]) + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
else:
self.check_arguments(low_registers=(Ra, Rb, Rc))
def STRH_func():
for i in range(2):
self.memory[self.register[Rb] + self.register[Rc] + i] = ((self.register[Ra] >> (8 * i)) & 0xFF)
return STRH_func
|
STRH Ra, [Rb, Rc]
STRH Ra, [Rb, #imm6_2]
Store Ra into memory as a half word
Ra, Rb, and Rc must be low registers
|
374,661
|
def set_values(self,x):
x = numpy.atleast_2d(x)
x = x.real
C_inv = self.__C_inv__
theta = numpy.dot( x, C_inv )
self.theta = theta
return theta
|
Updates self.theta parameter. No returns values
|
374,662
|
def ADC(cpu, dest, src):
cpu._ADD(dest, src, carry=True)
|
Adds with carry.
Adds the destination operand (first operand), the source operand (second operand),
and the carry (CF) flag and stores the result in the destination operand. The state
of the CF flag represents a carry from a previous addition. When an immediate value
is used as an operand, it is sign-extended to the length of the destination operand
format. The ADC instruction does not distinguish between signed or unsigned operands.
Instead, the processor evaluates the result for both data types and sets the OF and CF
flags to indicate a carry in the signed or unsigned result, respectively. The SF flag
indicates the sign of the signed result. The ADC instruction is usually executed as
part of a multibyte or multiword addition in which an ADD instruction is followed by an
ADC instruction::
DEST = DEST + SRC + CF;
The OF, SF, ZF, AF, CF, and PF flags are set according to the result.
:param cpu: current CPU.
:param dest: destination operand.
:param src: source operand.
|
374,663
|
def show(self):
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item:
if i.item != self:
i.show()
|
Print with a pretty display the MapList object
|
374,664
|
def msg_curse(self, args=None, max_width=None):
ret = []
if not self.stats or self.is_disable():
return ret
msg = .format()
ret.append(self.curse_add_line(msg, "TITLE"))
msg = .format(self.trend_msg(self.get_trend()))
ret.append(self.curse_add_line(msg))
msg = .format(self.stats[] / 100)
ret.append(self.curse_add_line(msg))
if in self.stats:
msg = .format()
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
ret.append(self.curse_new_line())
msg = .format()
ret.append(self.curse_add_line(msg))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg))
if in self.stats:
msg = .format()
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
ret.append(self.curse_new_line())
msg = .format()
ret.append(self.curse_add_line(msg))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(
msg, self.get_views(key=, option=)))
if in self.stats:
msg = .format()
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
ret.append(self.curse_new_line())
msg = .format()
ret.append(self.curse_add_line(msg))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg))
if in self.stats:
msg = .format()
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
msg = .format(self.auto_unit(self.stats[]))
ret.append(self.curse_add_line(msg, optional=self.get_views(key=, option=)))
return ret
|
Return the dict to display in the curse interface.
|
374,665
|
def set_current_operation_progress(self, percent):
if not isinstance(percent, baseinteger):
raise TypeError("percent can only be an instance of type baseinteger")
self._call("setCurrentOperationProgress",
in_p=[percent])
|
Internal method, not to be called externally.
in percent of type int
|
374,666
|
def delete_connection(self, **kwargs):
conn = self.find_connection(**kwargs)
if not conn:
return False
self.delete(conn)
return True
|
Remove a single connection to a provider for the specified user.
|
374,667
|
def p_genvarlist(self, p):
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1))
|
genvarlist : genvarlist COMMA genvar
|
374,668
|
def _compute_total_chunks(self, chunk_size):
try:
return int(math.ceil(self._ase.size / chunk_size))
except ZeroDivisionError:
return 0
|
Compute total number of chunks for entity
:param Descriptor self: this
:param int chunk_size: chunk size
:rtype: int
:return: num chunks
|
374,669
|
def call_moses_detokenizer(workspace_dir: str, input_fname: str, output_fname: str, lang_code: Optional[str] = None):
detokenizer_fname = os.path.join(workspace_dir,
DIR_THIRD_PARTY,
MOSES_DEST,
"scripts",
"tokenizer",
"detokenizer.perl")
with bin_open(input_fname) as inp, open(output_fname, "wb") as out, open(os.devnull, "wb") as devnull:
command = ["perl", detokenizer_fname]
if lang_code:
command.append("-l")
command.append(lang_code)
detokenizer = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=devnull)
detokenizer_thread = threading.Thread(target=copy_out, args=(detokenizer.stdout, out))
detokenizer_thread.start()
for line in inp:
detokenizer.stdin.write(line)
detokenizer.stdin.close()
detokenizer_thread.join()
detokenizer.wait()
|
Call Moses detokenizer.
:param workspace_dir: Workspace third-party directory where Moses
tokenizer is checked out.
:param input_fname: Path of tokenized input file, plain text or gzipped.
:param output_fname: Path of tokenized output file, plain text.
:param lang_code: Language code for rules and non-breaking prefixes. Can be
None if unknown (using pre-tokenized data), which will
cause the tokenizer to default to English.
|
374,670
|
def convert_relational(relational):
rel = relational.rel_op
if rel in [, , ]:
return relational.lhs-relational.rhs
elif rel in [, ]:
return relational.rhs-relational.lhs
else:
raise Exception("The relational operation is not "
"implemented!")
|
Convert all inequalities to >=0 form.
|
374,671
|
def window_iterator(data, width):
start = 0
while start < len(data):
yield data[start:start+width]
start += width
|
Instead of iterating element by element, get a number of elements at each iteration step.
:param data: data to iterate on
:param width: maximum number of elements to get in each iteration step
:return:
|
374,672
|
def _paged_api_call(self, func, kwargs, item_type=):
page = 1
while True:
LOG.info("Fetching page %s" % page)
kwargs[] = page
rsp = self._load_rsp(func(**kwargs))
if rsp["stat"] == "ok":
plural = item_type +
if plural in rsp:
items = rsp[plural]
if int(items["page"]) < page:
LOG.info("End of Flickr pages (%s pages with %s per page)" % (items["pages"], items["perpage"]))
break
for i in items[item_type]:
yield self._prep(i)
else:
yield rsp
page += 1
else:
yield [rsp]
break
|
Takes a Flickr API function object and dict of keyword args and calls the
API call repeatedly with an incrementing page value until all contents are exhausted.
Flickr seems to limit to about 500 items.
|
374,673
|
def get_agile_board(self, board_id):
url = .format(str(board_id))
return self.get(url)
|
Get agile board info by id
:param board_id:
:return:
|
374,674
|
def add_boundary_regions(regions=None, faces=[, , ,
, , ]):
r
if faces is not None:
regions = sp.pad(regions, 1, )
if regions.ndim == 3:
regions[:, :, 0] = regions[:, :, 0] + regions.max()
regions[:, :, -1] = regions[:, :, -1] + regions.max()
regions[0, :, :] = regions[0, :, :] + regions.max()
regions[-1, :, :] = regions[-1, :, :] + regions.max()
regions[:, 0, :] = regions[:, 0, :] + regions.max()
regions[:, -1, :] = regions[:, -1, :] + regions.max()
regions[:, :, 0] = (~find_boundaries(regions[:, :, 0],
mode=)) * regions[:, :, 0]
regions[:, :, -1] = (~find_boundaries(regions[:, :, -1],
mode=)) * regions[:, :, -1]
regions[0, :, :] = (~find_boundaries(regions[0, :, :],
mode=)) * regions[0, :, :]
regions[-1, :, :] = (~find_boundaries(regions[-1, :, :],
mode=)) * regions[-1, :, :]
regions[:, 0, :] = (~find_boundaries(regions[:, 0, :],
mode=)) * regions[:, 0, :]
regions[:, -1, :] = (~find_boundaries(regions[:, -1, :],
mode=)) * regions[:, -1, :]
regions = sp.pad(regions, 2, )
if not in faces:
regions = regions[:, 3:, :]
if not in faces:
regions = regions[:, :-3, :]
if not in faces:
regions = regions[3:, :, :]
if not in faces:
regions = regions[:-3, :, :]
if not in faces:
regions = regions[:, :, 3:]
if not in faces:
regions = regions[:, :, :-3]
elif regions.ndim == 2:
regions[0, :] = regions[0, :] + regions.max()
regions[-1, :] = regions[-1, :] + regions.max()
regions[:, 0] = regions[:, 0] + regions.max()
regions[:, -1] = regions[:, -1] + regions.max()
regions[0, :] = (~find_boundaries(regions[0, :],
mode=)) * regions[0, :]
regions[-1, :] = (~find_boundaries(regions[-1, :],
mode=)) * regions[-1, :]
regions[:, 0] = (~find_boundaries(regions[:, 0],
mode=)) * regions[:, 0]
regions[:, -1] = (~find_boundaries(regions[:, -1],
mode=)) * regions[:, -1]
regions = sp.pad(regions, 2, )
if not in faces:
regions = regions[3:, :]
if not in faces:
regions = regions[:-3, :]
if not in faces and not in faces:
regions = regions[:, 3:]
if not in faces and not in faces:
regions = regions[:, :-3]
else:
print()
regions = make_contiguous(regions)
else:
regions = regions
return regions
|
r"""
Given an image partitioned into regions, pads specified faces with new
regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into regions and labeled
faces : list of strings
The faces of ``regions`` which should have boundaries added. Options
are:
*'right'* - Adds boundaries to the x=0 face (``im[0, :, :]``)
*'left'* - Adds boundaries to the x=X face (``im[-1, :, :]``)
*'front'* - Adds boundaries to the y=0 face (``im[:, ), :]``)
*'back'* - Adds boundaries to the x=0 face (``im[:, -1, :]``)
*'bottom'* - Adds boundaries to the x=0 face (``im[:, :, 0]``)
*'top'* - Adds boundaries to the x=0 face (``im[:, :, -1]``)
The default is all faces.
Returns
-------
image : ND-array
A copy of ``regions`` with the specified boundaries added, so will be
slightly larger in each direction where boundaries were added.
|
374,675
|
def clone(self, instance):
metaclass = get_metaclass(instance)
metaclass = self.find_metaclass(metaclass.kind)
return metaclass.clone(instance)
|
Create a shallow clone of an *instance*.
**Note:** the clone and the original instance **does not** have to be
part of the same metaclass.
|
374,676
|
def channels(self):
def comparator(channel):
return (not isinstance(channel, TextChannel), channel.position)
ret = [c for c in self.guild.channels if c.category_id == self.id]
ret.sort(key=comparator)
return ret
|
List[:class:`abc.GuildChannel`]: Returns the channels that are under this category.
These are sorted by the official Discord UI, which places voice channels below the text channels.
|
374,677
|
def parse_dossier_data(data, ep):
changed = False
doc_changed = False
ref = data[][]
logger.debug(, ref)
with transaction.atomic():
try:
dossier = Dossier.objects.get(reference=ref)
except Dossier.DoesNotExist:
dossier = Dossier(reference=ref)
logger.debug()
changed = True
if dossier.title != data[][]:
logger.debug(, dossier.title,
data[][])
dossier.title = data[][]
changed = True
if changed:
logger.info(, ref)
dossier.save()
source = data[][].replace(, )
try:
doc = Document.objects.get(dossier=dossier, kind=)
except Document.DoesNotExist:
doc = Document(dossier=dossier, kind=, chamber=ep)
logger.debug(, ref)
doc_changed = True
if doc.link != source:
logger.debug(, doc.link, source)
doc.link = source
doc_changed = True
if doc_changed:
logger.info(, doc.link, ref)
doc.save()
if in data.keys() and in data[]:
command = Command()
command.init_cache()
command.parse_vote_data(data[])
|
Parse data from parltarck dossier export (1 dossier) Update dossier
if it existed before, this function goal is to import and update a
dossier, not to import all parltrack data
|
374,678
|
def _compute_a22_factor(self, imt):
if imt.name == :
return 0.0
period = imt.period
if period < 2.0:
return 0.0
else:
return 0.0625 * (period - 2.0)
|
Compute and return the a22 factor, equation 20, page 80.
|
374,679
|
def precompile_python_code(context: Context):
from compileall import compile_dir
kwargs = {}
if context.verbosity < 2:
kwargs[] = True
compile_dir(context.app.django_app_name, **kwargs)
|
Pre-compiles python modules
|
374,680
|
def try_greyscale(pixels, alpha=False, dirty_alpha=True):
planes = 3 + bool(alpha)
res = list()
apix = list()
for row in pixels:
green = row[1::planes]
if alpha:
apix.append(row[4:planes])
if (green != row[0::planes] or green != row[2::planes]):
return False
else:
res.append(green)
if alpha:
return MergedPlanes(res, 1, apix, 1)
else:
return res
|
Check if flatboxed RGB `pixels` could be converted to greyscale
If could - return iterator with greyscale pixels,
otherwise return `False` constant
|
374,681
|
def get_gan_loss(self, true_frames, gen_frames, name):
with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE):
gan_d_loss, _, fake_logits_stop = self.d_step(
true_frames, gen_frames)
with tf.variable_scope("%s_discriminator" % name, reuse=True):
gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step(
gen_frames, fake_logits_stop)
gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d
tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss)
if self.hparams.gan_optimization == "joint":
gan_loss = gan_g_loss + gan_d_loss
else:
curr_step = self.get_iteration_num()
gan_loss = tf.cond(
tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss,
lambda: gan_d_loss)
return gan_loss
|
Get the discriminator + generator loss at every step.
This performs an 1:1 update of the discriminator and generator at every
step.
Args:
true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be ground truth.
gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C)
Assumed to be fake.
name: discriminator scope.
Returns:
loss: 0-D Tensor, with d_loss + g_loss
|
374,682
|
def read_lock(self):
me = self._current_thread()
if me in self._pending_writers:
raise RuntimeError("Writer %s can not acquire a read lock"
" while waiting for the write lock"
% me)
with self._cond:
while True:
if self._writer is None or self._writer == me:
try:
self._readers[me] = self._readers[me] + 1
except KeyError:
self._readers[me] = 1
break
self._cond.wait()
try:
yield self
finally:
with self._cond:
try:
me_instances = self._readers[me]
if me_instances > 1:
self._readers[me] = me_instances - 1
else:
self._readers.pop(me)
except KeyError:
pass
self._cond.notify_all()
|
Context manager that grants a read lock.
Will wait until no active or pending writers.
Raises a ``RuntimeError`` if a pending writer tries to acquire
a read lock.
|
374,683
|
def locate_point(nodes, x_val, y_val):
r
zero1 = _curve_helpers.full_reduce(nodes[[0], :]) - x_val
zero2 = _curve_helpers.full_reduce(nodes[[1], :]) - y_val
if zero1.shape[1] > zero2.shape[1]:
zero1, zero2 = zero2, zero1
zero1, zero2 = zero2, zero1
power_basis1 = poly_to_power_basis(zero1[0, :])
all_roots = roots_in_unit_interval(power_basis1)
if all_roots.size == 0:
return None
power_basis2 = normalize_polynomial(poly_to_power_basis(zero2[0, :]))
near_zero = np.abs(polynomial.polyval(all_roots, power_basis2))
index = np.argmin(near_zero)
if near_zero[index] < _ZERO_THRESHOLD:
return all_roots[index]
return None
|
r"""Find the parameter corresponding to a point on a curve.
.. note::
This assumes that the curve :math:`B(s, t)` defined by ``nodes``
lives in :math:`\mathbf{R}^2`.
Args:
nodes (numpy.ndarray): The nodes defining a B |eacute| zier curve.
x_val (float): The :math:`x`-coordinate of the point.
y_val (float): The :math:`y`-coordinate of the point.
Returns:
Optional[float]: The parameter on the curve (if it exists).
|
374,684
|
def _get_version_for_class_from_state(state, klass):
names = [_importable_name(klass)]
from .util import class_rename_registry
names.extend(class_rename_registry.old_handled_by(klass))
for n in names:
try:
return state[][n]
except KeyError:
continue
if _debug:
logger.debug(, klass)
return float()
|
retrieves the version of the current klass from the state mapping from old locations to new ones.
|
374,685
|
def get_device_status(host, services=None, zconf=None):
try:
status = _get_status(
host, services, zconf, "/setup/eureka_info?options=detail")
friendly_name = status.get(, "Unknown Chromecast")
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
if in status:
model_name = status[].get(, model_name)
manufacturer = status[].get(, manufacturer)
udn = status.get(, None)
cast_type = CAST_TYPES.get(model_name.lower(),
CAST_TYPE_CHROMECAST)
uuid = None
if udn:
uuid = UUID(udn.replace(, ))
return DeviceStatus(friendly_name, model_name, manufacturer,
uuid, cast_type)
except (requests.exceptions.RequestException, OSError, ValueError):
return None
|
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
|
374,686
|
def _create_app(self):
template_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), self.TEMPLATE_FOLDER, self.TEMPLATE_FILENAME
)
new_dir = self._arguments[]
override_destination = self._arguments.get(, None)
if override_destination is not None:
if override_destination == :
raise ValueError()
if os.path.isabs(override_destination) and os.path.isdir(override_destination):
new_dir = os.path.join(override_destination, new_dir)
else:
override_path = os.path.join(os.getcwd(), override_destination)
if not os.path.isabs(override_path) or not os.path.isdir(override_path):
raise ValueError( % override_destination)
new_dir = os.path.join(override_path, new_dir)
else:
if os.path.isabs(new_dir) or os.path.sep in new_dir:
raise ValueError("Directory name is invalid")
new_dir = os.path.join(os.getcwd(), new_dir)
os.makedirs(new_dir)
new_file_path = os.path.join(new_dir, self.DEFAULT_NEW_APP_FILENAME)
shutil.copyfile(template_path, new_file_path)
printer( % new_file_path)
|
Method for creating a new Application Template.
USAGE: cloud-harness create <dir_name> [--destination=<path>]
|
374,687
|
def getValue(self):
dropdown_value = self.widget.GetValue()
if not str(dropdown_value).isdigit():
return
arg = str(self.option_string).replace(, )
repeated_args = arg * int(dropdown_value)
return + repeated_args
|
Returns
str(option_string * DropDown Value)
e.g.
-vvvvv
|
374,688
|
def repr_imgs(imgs):
if isinstance(imgs, string_types):
return imgs
if isinstance(imgs, collections.Iterable):
return .format(.join(repr_imgs(img) for img in imgs))
try:
filename = imgs.get_filename()
if filename is not None:
img_str = "{}()".format(imgs.__class__.__name__, filename)
else:
img_str = "{}(shape={}, affine={})".format(imgs.__class__.__name__,
repr(get_shape(imgs)),
repr(imgs.get_affine()))
except Exception as exc:
log.error()
return repr(imgs)
else:
return img_str
|
Printing of img or imgs
|
374,689
|
def _add_games_to_schedule(self, schedule):
for item in schedule:
if in str(item) or \
in str(item):
continue
game = Game(item)
self._games.append(game)
|
Add game information to list of games.
Create a Game instance for the given game in the schedule and add it to
the list of games the team has or will play during the season.
Parameters
----------
schedule : PyQuery object
A PyQuery object pertaining to a team's schedule table.
year : string
The requested year to pull stats from.
|
374,690
|
def main():
args = parse_arguments()
if args.askpass:
password = getpass.getpass("Password: ")
else:
password = None
if args.asksudopass:
sudo = True
sudo_pass = getpass.getpass("Sudo password[default ssh password]: ")
if len(sudo_pass) == 0:
sudo_pass = password
sudo_user =
else:
sudo = False
sudo_pass = None
sudo_user = None
if not args.username:
username = getpass.getuser()
else:
username = args.username
host_list = args.hosts
os.environ["ANSIBLE_HOST_KEY_CHECKING"] = "False"
execute_ping(host_list, username, password,
sudo=sudo, sudo_user=sudo_user, sudo_pass=sudo_pass)
|
Simple examples
|
374,691
|
def where(self, fieldname, value, negate=False):
if negate:
return self.mask([elem != value
for elem in self[fieldname]])
else:
return self.mask([elem == value
for elem in self[fieldname]])
|
Returns a new DataTable with rows only where the value at
`fieldname` == `value`.
|
374,692
|
def modify_replication_group(ReplicationGroupId=None, ReplicationGroupDescription=None, PrimaryClusterId=None, SnapshottingClusterId=None, AutomaticFailoverEnabled=None, CacheSecurityGroupNames=None, SecurityGroupIds=None, PreferredMaintenanceWindow=None, NotificationTopicArn=None, CacheParameterGroupName=None, NotificationTopicStatus=None, ApplyImmediately=None, EngineVersion=None, AutoMinorVersionUpgrade=None, SnapshotRetentionLimit=None, SnapshotWindow=None, CacheNodeType=None, NodeGroupId=None):
pass
|
Modifies the settings for a replication group.
See also: AWS API Documentation
:example: response = client.modify_replication_group(
ReplicationGroupId='string',
ReplicationGroupDescription='string',
PrimaryClusterId='string',
SnapshottingClusterId='string',
AutomaticFailoverEnabled=True|False,
CacheSecurityGroupNames=[
'string',
],
SecurityGroupIds=[
'string',
],
PreferredMaintenanceWindow='string',
NotificationTopicArn='string',
CacheParameterGroupName='string',
NotificationTopicStatus='string',
ApplyImmediately=True|False,
EngineVersion='string',
AutoMinorVersionUpgrade=True|False,
SnapshotRetentionLimit=123,
SnapshotWindow='string',
CacheNodeType='string',
NodeGroupId='string'
)
:type ReplicationGroupId: string
:param ReplicationGroupId: [REQUIRED]
The identifier of the replication group to modify.
:type ReplicationGroupDescription: string
:param ReplicationGroupDescription: A description for the replication group. Maximum length is 255 characters.
:type PrimaryClusterId: string
:param PrimaryClusterId: For replication groups with a single primary, if this parameter is specified, ElastiCache promotes the specified cluster in the specified replication group to the primary role. The nodes of all other clusters in the replication group are read replicas.
:type SnapshottingClusterId: string
:param SnapshottingClusterId: The cache cluster ID that is used as the daily snapshot source for the replication group. This parameter cannot be set for Redis (cluster mode enabled) replication groups.
:type AutomaticFailoverEnabled: boolean
:param AutomaticFailoverEnabled: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure.
Valid values: true | false
Note
ElastiCache Multi-AZ replication groups are not supported on:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
:type CacheSecurityGroupNames: list
:param CacheSecurityGroupNames: A list of cache security group names to authorize for the clusters in this replication group. This change is asynchronously applied as soon as possible.
This parameter can be used only with replication group containing cache clusters running outside of an Amazon Virtual Private Cloud (Amazon VPC).
Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default .
(string) --
:type SecurityGroupIds: list
:param SecurityGroupIds: Specifies the VPC Security Groups associated with the cache clusters in the replication group.
This parameter can be used only with replication group containing cache clusters running in an Amazon Virtual Private Cloud (Amazon VPC).
(string) --
:type PreferredMaintenanceWindow: string
:param PreferredMaintenanceWindow: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H Clock UTC). The minimum maintenance window is a 60 minute period.
Valid values for ddd are:
sun
mon
tue
wed
thu
fri
sat
Example: sun:23:00-mon:01:30
:type NotificationTopicArn: string
:param NotificationTopicArn: The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications are sent.
Note
The Amazon SNS topic owner must be same as the replication group owner.
:type CacheParameterGroupName: string
:param CacheParameterGroupName: The name of the cache parameter group to apply to all of the clusters in this replication group. This change is asynchronously applied as soon as possible for parameters when the ApplyImmediately parameter is specified as true for this request.
:type NotificationTopicStatus: string
:param NotificationTopicStatus: The status of the Amazon SNS notification topic for the replication group. Notifications are sent only if the status is active .
Valid values: active | inactive
:type ApplyImmediately: boolean
:param ApplyImmediately: If true , this parameter causes the modifications in this request and any pending modifications to be applied, asynchronously and as soon as possible, regardless of the PreferredMaintenanceWindow setting for the replication group.
If false , changes to the nodes in the replication group are applied on the next maintenance reboot, or the next failure reboot, whichever occurs first.
Valid values: true | false
Default: false
:type EngineVersion: string
:param EngineVersion: The upgraded version of the cache engine to be run on the cache clusters in the replication group.
Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version ), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version.
:type AutoMinorVersionUpgrade: boolean
:param AutoMinorVersionUpgrade: This parameter is currently disabled.
:type SnapshotRetentionLimit: integer
:param SnapshotRetentionLimit: The number of days for which ElastiCache retains automatic node group (shard) snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a snapshot that was taken today is retained for 5 days before being deleted.
Important If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
:type SnapshotWindow: string
:param SnapshotWindow: The daily time range (in UTC) during which ElastiCache begins taking a daily snapshot of the node group (shard) specified by SnapshottingClusterId .
Example: 05:00-09:00
If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range.
:type CacheNodeType: string
:param CacheNodeType: A valid cache node type that you want to scale this replication group to.
:type NodeGroupId: string
:param NodeGroupId: The name of the Node Group (called shard in the console).
:rtype: dict
:return: {
'ReplicationGroup': {
'ReplicationGroupId': 'string',
'Description': 'string',
'Status': 'string',
'PendingModifiedValues': {
'PrimaryClusterId': 'string',
'AutomaticFailoverStatus': 'enabled'|'disabled'
},
'MemberClusters': [
'string',
],
'NodeGroups': [
{
'NodeGroupId': 'string',
'Status': 'string',
'PrimaryEndpoint': {
'Address': 'string',
'Port': 123
},
'Slots': 'string',
'NodeGroupMembers': [
{
'CacheClusterId': 'string',
'CacheNodeId': 'string',
'ReadEndpoint': {
'Address': 'string',
'Port': 123
},
'PreferredAvailabilityZone': 'string',
'CurrentRole': 'string'
},
]
},
],
'SnapshottingClusterId': 'string',
'AutomaticFailover': 'enabled'|'disabled'|'enabling'|'disabling',
'ConfigurationEndpoint': {
'Address': 'string',
'Port': 123
},
'SnapshotRetentionLimit': 123,
'SnapshotWindow': 'string',
'ClusterEnabled': True|False,
'CacheNodeType': 'string'
}
}
:returns:
Redis versions earlier than 2.8.6.
Redis (cluster mode disabled):T1 and T2 cache node types. Redis (cluster mode enabled): T1 node types.
|
374,693
|
def parse(self, gff_file, strict=False):
valid_strand = set((, , , ))
valid_phase = set((0, 1, 2))
multi_value_attributes = set((, , , , ))
valid_attribute_target_strand = set((, , ))
reserved_attributes = set((, , , , , , , , , , ))
unescaped_seqid = re.compile(r).search
unescaped_field = re.compile(r).search
gff_fp = gff_file
if isinstance(gff_file, str):
gff_fp = open(gff_file, )
lines = []
current_line_num = 1
features = defaultdict(list)
unresolved_parents = defaultdict(list)
for line_raw in gff_fp:
line_data = {
: current_line_num - 1,
: line_raw,
: ,
: [],
: [],
: ,
: ,
: [],
: ,
}
line_strip = line_raw.strip()
if line_strip != line_raw[:len(line_strip)]:
self.add_line_error(line_data, {: , : , : })
if current_line_num == 1 and not line_strip.startswith():
self.add_line_error(line_data, {: , : , : })
if len(line_strip) == 0:
line_data[] =
continue
if line_strip.startswith():
line_data[] =
if line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 3:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
if [True for d in lines if ( in d and d[] == and in d and d[] == line_data[])]:
self.add_line_error(line_data, {: % line_data[], : , : })
try:
all_good = True
try:
line_data[] = int(tokens[1])
if line_data[] < 1:
self.add_line_error(line_data, {: % tokens[1], : , : })
except ValueError:
all_good = False
self.add_line_error(line_data, {: % tokens[1], : , : })
line_data[] = tokens[1]
try:
line_data[] = int(tokens[2])
if line_data[] < 1:
self.add_line_error(line_data, {: % tokens[2], : , : })
except ValueError:
all_good = False
self.add_line_error(line_data, {: % tokens[2], : , : })
line_data[] = tokens[2]
if all_good and line_data[] > line_data[]:
self.add_line_error(line_data, {: , : , : })
except IndexError:
pass
elif line_strip.startswith():
line_data[] =
if [True for d in lines if ( in d and d[] == )]:
self.add_line_error(line_data, {: , : , : })
tokens = list(line_strip.split()[1:])
if len(tokens) != 1:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
try:
line_data[] = int(tokens[0])
if line_data[] != 3:
self.add_line_error(line_data, {: % tokens[0], : , : })
except ValueError:
self.add_line_error(line_data, {: % tokens[0], : , : })
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
elif line_strip.startswith():
line_data[] =
self.logger.info()
self.fasta_embedded, count = fasta_file_to_dict(gff_fp)
self.logger.info( % len(self.fasta_embedded))
elif line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 1:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 1:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 1:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 1:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
tokens = list(line_strip.split()[1:])
if len(tokens) != 2:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
if len(tokens) > 0:
line_data[] = tokens[0]
try:
line_data[] = tokens[1]
except IndexError:
pass
else:
self.add_line_error(line_data, {: , : , : })
tokens = list(line_strip.split())
line_data[] = tokens[0]
elif line_strip.startswith():
line_data[] =
else:
line_data[] =
tokens = list(map(str.strip, line_raw.split()))
if len(tokens) != 9:
self.add_line_error(line_data, {: % (len(tokens) - 1, repr(tokens[1:])), : , : })
for i, t in enumerate(tokens):
if not t:
self.add_line_error(line_data, {: % (i + 1), : , : })
try:
line_data[] = tokens[0]
if unescaped_seqid(tokens[0]):
self.add_line_error(line_data, {: % tokens[0], : , : })
line_data[] = tokens[1]
if unescaped_field(tokens[1]):
self.add_line_error(line_data, {: % tokens[1], : , : })
line_data[] = tokens[2]
if unescaped_field(tokens[2]):
self.add_line_error(line_data, {: % tokens[2], : , : })
all_good = True
try:
line_data[] = int(tokens[3])
if line_data[] < 1:
self.add_line_error(line_data, {: % tokens[3], : , : })
except ValueError:
all_good = False
line_data[] = tokens[3]
if line_data[] != :
self.add_line_error(line_data, {: % line_data[], : , : })
try:
line_data[] = int(tokens[4])
if line_data[] < 1:
self.add_line_error(line_data, {: % tokens[4], : , : })
except ValueError:
all_good = False
line_data[] = tokens[4]
if line_data[] != :
self.add_line_error(line_data, {: % line_data[], : , : })
if all_good and line_data[] > line_data[]:
self.add_line_error(line_data, {: , : , : })
try:
line_data[] = float(tokens[5])
except ValueError:
line_data[] = tokens[5]
if line_data[] != :
self.add_line_error(line_data, {: % line_data[], : , : })
line_data[] = tokens[6]
if line_data[] not in valid_strand:
self.add_line_error(line_data, {: % tokens[6], : , : })
try:
line_data[] = int(tokens[7])
if line_data[] not in valid_phase:
self.add_line_error(line_data, {: % tokens[7], : , : })
except ValueError:
line_data[] = tokens[7]
if line_data[] != :
self.add_line_error(line_data, {: % line_data[], : , : })
elif line_data[] == :
self.add_line_error(line_data, {: , : , : })
if unescaped_field(tokens[8]):
self.add_line_error(line_data, {: , : , : })
attribute_tokens = tuple(tuple(t for t in a.split()) for a in tokens[8].split() if a)
line_data[] = {}
if len(attribute_tokens) == 1 and len(attribute_tokens[0]) == 1 and attribute_tokens[0][0] == :
pass
else:
for a in attribute_tokens:
if len(a) != 2:
self.add_line_error(line_data, {: % (.join(a)), : , : })
try:
tag, value = a
except ValueError:
tag, value = a[0],
if not tag:
self.add_line_error(line_data, {: % .join(a), : , : })
if not value.strip():
self.add_line_error(line_data, {: % .join(a), : , : }, log_level=logging.WARNING)
if tag in line_data[]:
self.add_line_error(line_data, {: % tag, : , : })
if tag in multi_value_attributes:
if value.find() >= 0:
self.add_line_error(line_data, {: % (tag, value), : , : }, log_level=logging.WARNING)
if tag in line_data[]:
if tag == :
self.lines = lines
self.features = features
return 1
|
Parse the gff file into the following data structures:
* lines(list of line_data(dict))
- line_index(int): the index in lines
- line_raw(str)
- line_type(str in ['feature', 'directive', 'comment', 'blank', 'unknown'])
- line_errors(list of str): a list of error messages
- line_status(str in ['normal', 'modified', 'removed'])
- parents(list of feature(list of line_data(dict))): may have multiple parents
- children(list of line_data(dict))
- extra fields depending on line_type
* directive
- directive(str in ['##gff-version', '##sequence-region', '##feature-ontology', '##attribute-ontology', '##source-ontology', '##species', '##genome-build', '###', '##FASTA'])
- extra fields depending on directive
* feature
- seqid(str): must escape any characters not in the set [a-zA-Z0-9.:^*$@!+_?-|] using RFC 3986 Percent-Encoding
- source(str)
- type(str in so_types)
- start(int)
- end(int)
- score(float)
- strand(str in ['+', '-', '.', '?'])
- phase(int in [0, 1, 2])
- attributes(dict of tag(str) to value)
- ID(str)
- Name(str)
- Alias(list of str): multi value
- Parent(list of str): multi value
- Target(dict)
- target_id(str)
- start(int)
- end(int)
- strand(str in ['+', '-', ''])
- Gap(str): CIGAR format
- Derives_from(str)
- Note(list of str): multi value
- Dbxref(list of str): multi value
- Ontology_term(list of str): multi value
- Is_circular(str in ['true'])
* fasta_dict(dict of id(str) to sequence_item(dict))
- id(str)
- header(str)
- seq(str)
- line_length(int)
* features(dict of feature_id(str in line_data['attributes']['ID']) to feature(list of line_data(dict)))
A feature is a list of line_data(dict), since all lines that share an ID collectively represent a single feature.
During serialization, line_data(dict) references should be converted into line_index(int)
:param gff_file: a string path or file object
:param strict: when true, throw exception on syntax and format errors. when false, use best effort to finish parsing while logging errors
|
374,694
|
def _cleanup_and_die(data):
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf)
|
cleanup func for step 1
|
374,695
|
def delete(self, object_id):
obj = self.session.query(self.cls).filter_by(id=object_id).one()
self.session.delete(obj)
return obj
|
Delete an object by its id
:param object_id: the objects id.
:return: the deleted object
:raises: :class: NoResultFound when the object could not be found
|
374,696
|
def calculate_bins(array, _=None, *args, **kwargs) -> BinningBase:
if array is not None:
if kwargs.pop("check_nan", True):
if np.any(np.isnan(array)):
raise RuntimeError("Cannot calculate bins in presence of NaN's.")
if kwargs.get("range", None):
array = array[(array >= kwargs["range"][0]) & (array <= kwargs["range"][1])]
if _ is None:
bin_count = 10
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif isinstance(_, BinningBase):
binning = _
elif isinstance(_, int):
binning = numpy_binning(array, _, *args, **kwargs)
elif isinstance(_, str):
if _ in bincount_methods:
bin_count = ideal_bin_count(array, method=_)
binning = numpy_binning(array, bin_count, *args, **kwargs)
elif _ in binning_methods:
method = binning_methods[_]
binning = method(array, *args, **kwargs)
else:
raise RuntimeError("No binning method {0} available.".format(_))
elif callable(_):
binning = _(array, *args, **kwargs)
elif np.iterable(_):
binning = static_binning(array, _, *args, **kwargs)
else:
raise RuntimeError("Binning {0} not understood.".format(_))
return binning
|
Find optimal binning from arguments.
Parameters
----------
array: arraylike
Data from which the bins should be decided (sometimes used, sometimes not)
_: int or str or Callable or arraylike or Iterable or BinningBase
To-be-guessed parameter that specifies what kind of binning should be done
check_nan: bool
Check for the presence of nan's in array? Default: True
range: tuple
Limit values to a range. Some of the binning methods also (subsequently)
use this parameter for the bin shape.
Returns
-------
BinningBase
A two-dimensional array with pairs of bin edges (not necessarily consecutive).
|
374,697
|
def mav_to_gpx(infilename, outfilename):
mlog = mavutil.mavlink_connection(infilename)
outf = open(outfilename, mode=)
def process_packet(timestamp, lat, lon, alt, hdg, v):
t = time.localtime(timestamp)
outf.write( % (lat, lon, alt,
time.strftime("%Y-%m-%dT%H:%M:%SZ", t),
hdg, v))
def add_header():
outf.write()
def add_footer():
outf.write()
add_header()
count=0
lat=0
lon=0
fix=0
while True:
m = mlog.recv_match(type=[, , , ], condition=args.condition)
if m is None:
break
if m.get_type() == :
lat = m.lat/1.0e7
lon = m.lon/1.0e7
alt = m.alt/1.0e3
v = m.vel/100.0
hdg = m.cog/100.0
timestamp = m._timestamp
fix = m.fix_type
elif m.get_type() == :
lat = m.lat
lon = m.lon
alt = m.alt
v = m.v
hdg = m.hdg
timestamp = m._timestamp
fix = m.fix_type
elif m.get_type() == or m.get_type() == :
lat = m.Lat
lon = m.Lng
alt = m.Alt
v = m.Spd
hdg = m.GCrs
timestamp = m._timestamp
fix = m.Status
else:
pass
if fix < 2 and not args.nofixcheck:
continue
if lat == 0.0 or lon == 0.0:
continue
process_packet(timestamp, lat, lon, alt, hdg, v)
count += 1
add_footer()
print("Created %s with %u points" % (outfilename, count))
|
convert a mavlink log file to a GPX file
|
374,698
|
def _run_config_cmds(self, commands, server):
command_start = [, ]
command_end = []
full_command = command_start + commands + command_end
self._run_eos_cmds(full_command, server)
|
Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param server: Server endpoint on the Arista switch to be configured
|
374,699
|
def plugin(module, *args, **kwargs):
def wrap(f):
m = module(f, *args, **kwargs)
if inspect.isclass(m):
for k, v in m.__dict__.items():
if not k.startswith("__"):
setattr(f, k, v)
elif inspect.isfunction(m):
setattr(f, kls.__name__, m)
return f
return wrap
|
Decorator to extend a package to a view.
The module can be a class or function. It will copy all the methods to the class
ie:
# Your module.py
my_ext(view, **kwargs):
class MyExtension(object):
def my_view(self):
return {}
return MyExtension
# Your view.py
@plugin(my_ext)
class Index(View):
pass
:param module: object
:param args:
:param kwargs:
:return:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.