text stringlengths 78 104k | score float64 0 0.18 |
|---|---|
def is_parent_of(self, parent, child):
"""Return whether ``child`` is a branch descended from ``parent`` at
any remove.
"""
if parent == 'trunk':
return True
if child == 'trunk':
return False
if child not in self._branches:
raise ValueError(
"The branch {} seems not to have ever been created".format(
child
)
)
if self._branches[child][0] == parent:
return True
return self.is_parent_of(parent, self._branches[child][0]) | 0.003344 |
def multizone_member_added(self, member_uuid):
"""Handle added audio group member."""
casts = self._casts
if member_uuid not in casts:
casts[member_uuid] = {'listeners': [],
'groups': set()}
casts[member_uuid]['groups'].add(self._group_uuid)
for listener in list(casts[member_uuid]['listeners']):
listener.added_to_multizone(self._group_uuid) | 0.004545 |
def is_packet_type(cls):
"""Check if class is one the packet types."""
from .packet_types import EddystoneUIDFrame, EddystoneURLFrame, \
EddystoneEncryptedTLMFrame, EddystoneTLMFrame, \
EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB
return (cls in [EddystoneURLFrame, EddystoneUIDFrame, EddystoneEncryptedTLMFrame, \
EddystoneTLMFrame, EddystoneEIDFrame, IBeaconAdvertisement, \
EstimoteTelemetryFrameA, EstimoteTelemetryFrameB]) | 0.012987 |
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except:
eln = site.species_string
reldists_neighs = []
for neigh, dist in neighs_dists:
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(
eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(structure, s),
'weight': w,
'site_index': self._get_original_site(structure,
s)})
return siw | 0.002962 |
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
app_location = self.storages[app].location
if app_location not in searched_locations:
searched_locations.append(app_location)
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches | 0.003899 |
def client(self):
"""Get an elasticsearch client
"""
if not hasattr(self, "_client"):
self._client = connections.get_connection("default")
return self._client | 0.009901 |
def delete(self, path, data=None, headers=None, params=None):
"""
Deletes resources at given paths.
:rtype: dict
:return: Empty dictionary to have consistent interface.
Some of Atlassian REST resources don't return any content.
"""
self.request('DELETE', path=path, data=data, headers=headers, params=params) | 0.008242 |
def vcsNodeState_nodeRbridgeid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
nodeRbridgeid = ET.SubElement(vcsNodeState, "nodeRbridgeid")
nodeRbridgeid.text = kwargs.pop('nodeRbridgeid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.006696 |
def newarray(self, length, value=0):
"""Initialise empty row"""
if self.bitdepth > 8:
return array('H', [value] * length)
else:
return bytearray([value] * length) | 0.009524 |
def create_vault(self, *args, **kwargs):
"""Pass through to provider VaultAdminSession.create_vault"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.create_bin
return Vault(
self._provider_manager,
self._get_provider_session('vault_admin_session').create_vault(*args, **kwargs),
self._runtime,
self._proxy) | 0.007246 |
def set_entry(key, value):
"""
Set a configuration entry
:param key: key name
:param value: value for this key
:raises KeyError: if key is not str
"""
if type(key) != str:
raise KeyError('key must be str')
_config[key] = value | 0.003745 |
def get_tables(self, db_name, pattern):
"""
Parameters:
- db_name
- pattern
"""
self.send_get_tables(db_name, pattern)
return self.recv_get_tables() | 0.005618 |
def toBCD (n):
"""Converts the number n into Binary Coded Decimal."""
bcd = 0
bits = 0
while True:
n, r = divmod(n, 10)
bcd |= (r << bits)
if n is 0:
break
bits += 4
return bcd | 0.020325 |
def notify(self, data):
"""Notify this channel of inbound data"""
string_channels = {
ChannelIdentifiers.de_registrations,
ChannelIdentifiers.registrations_expired
}
if data['channel'] in string_channels:
message = {'device_id': data["value"], 'channel': data["channel"]}
else:
message = DeviceStateChanges._map_endpoint_data(data)
return super(DeviceStateChanges, self).notify(message) | 0.004132 |
def process_composite(self, response):
"""
Process a composite response.
composites do not have inter item separators as they appear joined.
We need to respect the universal options too.
"""
composite = response["composite"]
# if the composite is of not Composite make it one so we can simplify
# it.
if not isinstance(composite, Composite):
composite = Composite(composite)
# simplify and get underlying list.
composite = composite.simplify().get_content()
response["composite"] = composite
if not isinstance(composite, list):
raise Exception('expecting "composite" key in response')
# if list is empty nothing to do
if not len(composite):
return
if "full_text" in response:
err = 'conflicting "full_text" and "composite" in response'
raise Exception(err)
# set markup
if "markup" in self.py3status_module_options:
markup = self.py3status_module_options["markup"]
for item in composite:
item["markup"] = markup
# set universal options on last component
composite[-1].update(self.i3bar_module_options)
# update all components
color = response.get("color")
urgent = response.get("urgent")
composite_length = len(response["composite"]) - 1
for index, item in enumerate(response["composite"]):
# validate the response
if "full_text" not in item:
raise KeyError('missing "full_text" key in response')
# make sure all components have a name
if "name" not in item:
instance_index = item.get("index", index)
item["instance"] = "{} {}".format(self.module_inst, instance_index)
item["name"] = self.module_name
# hide separator for all inner components unless existing
if index != composite_length:
if "separator" not in item:
item["separator"] = False
item["separator_block_width"] = 0
# If a color was supplied for the composite and a composite
# part does not supply a color, use the composite color.
if color and "color" not in item:
item["color"] = color
# Remove any none color from our output
if hasattr(item.get("color"), "none_setting"):
del item["color"]
# set background and border colors. set left/right border widths
# only on first/last composites and no border width for inner
# composites or we will see border lines between composites.
for key, value in self.i3bar_gaps_module_options.items():
if (key == "border_left" and index != 0) or (
key == "border_right" and index != composite_length
):
item[key] = 0
else:
item[key] = value
# set urgent based on available user-defined settings
if not self.allow_urgent:
if "urgent" in item:
del item["urgent"]
elif urgent:
if self.i3bar_gaps_urgent_options:
# set background and border colors. set left/right border widths
# only on first/last composites and no border width for inner
# composites or we will see border lines between composites.
for key, value in self.i3bar_gaps_urgent_options.items():
if (key == "border_left" and index != 0) or (
key == "border_right" and index != composite_length
):
item[key] = 0
elif key == "foreground":
item["color"] = value
else:
item[key] = value
if "urgent" in item:
del item["urgent"]
else:
item["urgent"] = urgent
# set min_length
if "min_length" in self.py3status_module_options:
min_length = self.py3status_module_options["min_length"]
# get length, skip if length exceeds min_length
length = sum([len(x["full_text"]) for x in response["composite"]])
if length >= min_length:
return
# sometimes we go under min_length to pad both side evenly,
# we will add extra space on either side to honor min_length
padding = int((min_length / 2.0) - (length / 2.0))
offset = min_length - ((padding * 2) + length)
# set position
position = self.py3status_module_options.get("position", "left")
if position == "center":
left = right = " " * padding
if self.random_int:
left += " " * offset
else:
right += " " * offset
elif position == "left":
left, right = "", " " * (padding * 2 + offset)
elif position == "right":
right, left = "", " " * (padding * 2 + offset)
# padding
if left:
response["composite"][0]["full_text"] = (
left + response["composite"][0]["full_text"]
)
if right:
response["composite"][-1]["full_text"] += right | 0.001065 |
def set_welcome_message(self):
"""Create and insert welcome message."""
string = html_header()
string += welcome_message().to_html()
string += html_footer()
self.welcome_message.setHtml(string) | 0.008584 |
def send(message, **kwargs):
"""Send a SocketIO message.
This function sends a simple SocketIO message to one or more connected
clients. The message can be a string or a JSON blob. This is a simpler
version of ``emit()``, which should be preferred. This is a function that
can only be called from a SocketIO event handler.
:param message: The message to send, either a string or a JSON blob.
:param json: ``True`` if ``message`` is a JSON blob, ``False``
otherwise.
:param namespace: The namespace under which the message is to be sent.
Defaults to the namespace used by the originating event.
An empty string can be used to use the global namespace.
:param callback: Callback function to invoke with the client's
acknowledgement.
:param broadcast: ``True`` to send the message to all connected clients, or
``False`` to only reply to the sender of the originating
event.
:param room: Send the message to all the users in the given room.
:param include_self: ``True`` to include the sender when broadcasting or
addressing a room, or ``False`` to send to everyone
but the sender.
:param ignore_queue: Only used when a message queue is configured. If
set to ``True``, the event is emitted to the
clients directly, without going through the queue.
This is more efficient, but only works when a
single server process is used, or when there is a
single addresee. It is recommended to always leave
this parameter with its default value of ``False``.
"""
json = kwargs.get('json', False)
if 'namespace' in kwargs:
namespace = kwargs['namespace']
else:
namespace = flask.request.namespace
callback = kwargs.get('callback')
broadcast = kwargs.get('broadcast')
room = kwargs.get('room')
if room is None and not broadcast:
room = flask.request.sid
include_self = kwargs.get('include_self', True)
ignore_queue = kwargs.get('ignore_queue', False)
socketio = flask.current_app.extensions['socketio']
return socketio.send(message, json=json, namespace=namespace, room=room,
include_self=include_self, callback=callback,
ignore_queue=ignore_queue) | 0.000394 |
def validate_stream(stream):
"""
Check that the stream name is well-formed.
"""
if not STREAM_REGEX.match(stream) or len(stream) > MAX_STREAM_LENGTH:
raise InvalidStreamName(stream) | 0.015544 |
def __vDecodeDIGICAMConfigure(self, mCommand_Long):
if mCommand_Long.param1 != 0:
print ("Exposure Mode = %d" % mCommand_Long.param1)
if mCommand_Long.param1 == self.ProgramAuto:
self.__vCmdSetCamExposureMode(["Program Auto"])
elif mCommand_Long.param1 == self.Aperture:
self.__vCmdSetCamExposureMode(["Aperture"])
elif mCommand_Long.param1 == self.Shutter:
self.__vCmdSetCamExposureMode(["Shutter"])
'''Shutter Speed'''
if mCommand_Long.param2 != 0:
print ("Shutter Speed= %d" % mCommand_Long.param2)
self.__vCmdSetCamShutterSpeed([mCommand_Long.param2])
'''Aperture'''
if mCommand_Long.param3 != 0:
print ("Aperture = %d" % mCommand_Long.param3)
self.__vCmdSetCamAperture([mCommand_Long.param3])
'''ISO'''
if mCommand_Long.param4 != 0:
print ("ISO = %d" % mCommand_Long.param4)
self.__vCmdSetCamISO([mCommand_Long.param4])
'''Exposure Type'''
if mCommand_Long.param5 != 0:
print ("Exposure type= %d" % mCommand_Long.param5) | 0.008958 |
def inputConnections(self, cls=None):
"""
Returns a list of input connections from the scene that match the
inputed class for this node.
:param cls | <subclass of XNodeConnection> || None
:return [<XNodeConnection>, ..]
"""
scene = self.scene()
if not scene:
return []
if not cls:
cls = XNodeConnection
output = []
for item in scene.items():
if not isinstance(item, cls):
continue
if item.inputNode() == self:
output.append(item)
return output | 0.0131 |
def file_input(parser, body):
"""file_input: (NEWLINE | stmt)* ENDMARKER"""
body = reduce(list.__add__, body, [])
loc = None
if body != []:
loc = body[0].loc
return ast.Module(body=body, loc=loc) | 0.008097 |
def to_red(self, on: bool=False):
"""
Change the LED to red (on or off)
:param on: True or False
:return: None
"""
self._on = on
if on:
self._load_new(led_red_on)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_red(False))
else:
self._load_new(led_red)
if self._toggle_on_click:
self._canvas.bind('<Button-1>', lambda x: self.to_red(True)) | 0.007797 |
def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs) | 0.007859 |
def action_webimport(hrlinetop=False):
""" select from the available online directories for import """
DIR_OPTIONS = {1: "http://lov.okfn.org", 2: "http://prefix.cc/popular/"}
selection = None
while True:
if hrlinetop:
printDebug("----------")
text = "Please select which online directory to scan: (enter=quit)\n"
for x in DIR_OPTIONS:
text += "%d) %s\n" % (x, DIR_OPTIONS[x])
var = input(text + "> ")
if var == "q" or var == "":
return None
else:
try:
selection = int(var)
test = DIR_OPTIONS[selection] #throw exception if number wrong
break
except:
printDebug("Invalid selection. Please try again.", "important")
continue
printDebug("----------")
text = "Search for a specific keyword? (enter=show all)\n"
var = input(text + "> ")
keyword = var
try:
if selection == 1:
_import_LOV(keyword=keyword)
elif selection == 2:
_import_PREFIXCC(keyword=keyword)
except:
printDebug("Sorry, the online repository seems to be unreachable.")
return True | 0.003172 |
def fixed_padding(inputs, kernel_size, data_format):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: A tensor of size [batch, channels, height_in, width_in] or
[batch, height_in, width_in, channels] depending on data_format.
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
Should be a positive integer.
data_format: The input format ('channels_last' or 'channels_first').
Returns:
A tensor with the same format as the input with the data either intact
(if kernel_size == 1) or padded (if kernel_size > 1).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs | 0.007641 |
def output(self, _filename):
"""
_filename is not used
Args:
_filename(string)
"""
txt = ''
for c in self.contracts:
txt += "\nContract %s\n"%c.name
table = PrettyTable(['Variable', 'Dependencies'])
for v in c.state_variables:
table.add_row([v.name, _get(v, c)])
txt += str(table)
txt += "\n"
for f in c.functions_and_modifiers_not_inherited:
txt += "\nFunction %s\n"%f.full_name
table = PrettyTable(['Variable', 'Dependencies'])
for v in f.variables:
table.add_row([v.name, _get(v, f)])
for v in c.state_variables:
table.add_row([v.canonical_name, _get(v, f)])
txt += str(table)
self.info(txt) | 0.004499 |
def draw_qubit_graph(G, layout, linear_biases={}, quadratic_biases={},
nodelist=None, edgelist=None, cmap=None, edge_cmap=None, vmin=None, vmax=None,
edge_vmin=None, edge_vmax=None,
**kwargs):
"""Draws graph G according to layout.
If `linear_biases` and/or `quadratic_biases` are provided, these
are visualized on the plot.
Parameters
----------
G : NetworkX graph
The graph to be drawn
layout : dict
A dict of coordinates associated with each node in G. Should
be of the form {node: coordinate, ...}. Coordinates will be
treated as vectors, and should all have the same length.
linear_biases : dict (optional, default {})
A dict of biases associated with each node in G. Should be of
form {node: bias, ...}. Each bias should be numeric.
quadratic_biases : dict (optional, default {})
A dict of biases associated with each edge in G. Should be of
form {edge: bias, ...}. Each bias should be numeric. Self-loop
edges (i.e., :math:`i=j`) are treated as linear biases.
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
if linear_biases or quadratic_biases:
# if linear biases and/or quadratic biases are provided, then color accordingly.
try:
import matplotlib.pyplot as plt
import matplotlib as mpl
except ImportError:
raise ImportError("Matplotlib and numpy required for draw_qubit_graph()")
if nodelist is None:
nodelist = G.nodes()
if edgelist is None:
edgelist = G.edges()
if cmap is None:
cmap = plt.get_cmap('coolwarm')
if edge_cmap is None:
edge_cmap = plt.get_cmap('coolwarm')
# any edges or nodes with an unspecified bias default to 0
def edge_color(u, v):
c = 0.
if (u, v) in quadratic_biases:
c += quadratic_biases[(u, v)]
if (v, u) in quadratic_biases:
c += quadratic_biases[(v, u)]
return c
def node_color(v):
c = 0.
if v in linear_biases:
c += linear_biases[v]
if (v, v) in quadratic_biases:
c += quadratic_biases[(v, v)]
return c
node_color = [node_color(v) for v in nodelist]
edge_color = [edge_color(u, v) for u, v in edgelist]
kwargs['edge_color'] = edge_color
kwargs['node_color'] = node_color
# the range of the color map is shared for nodes/edges and is symmetric
# around 0.
vmag = max(max(abs(c) for c in node_color), max(abs(c) for c in edge_color))
if vmin is None:
vmin = -1 * vmag
if vmax is None:
vmax = vmag
if edge_vmin is None:
edge_vmin = -1 * vmag
if edge_vmax is None:
edge_vmax = vmag
draw(G, layout, nodelist=nodelist, edgelist=edgelist,
cmap=cmap, edge_cmap=edge_cmap, vmin=vmin, vmax=vmax, edge_vmin=edge_vmin,
edge_vmax=edge_vmax,
**kwargs)
# if the biases are provided, then add a legend explaining the color map
if linear_biases or quadratic_biases:
fig = plt.figure(1)
# cax = fig.add_axes([])
cax = fig.add_axes([.9, 0.2, 0.04, 0.6]) # left, bottom, width, height
mpl.colorbar.ColorbarBase(cax, cmap=cmap,
norm=mpl.colors.Normalize(vmin=-1 * vmag, vmax=vmag, clip=False),
orientation='vertical') | 0.001787 |
def p_expression_unot(self, p):
'expression : NOT expression %prec UNOT'
p[0] = Unot(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | 0.01227 |
def update_readme(self, content):
"""Update the readme descriptive metadata."""
logger.debug("Updating readme")
key = self.get_readme_key()
# Back up old README content.
backup_content = self.get_readme_content()
backup_key = key + "-{}".format(
timestamp(datetime.datetime.now())
)
logger.debug("README.yml backup key: {}".format(backup_key))
self.put_text(backup_key, backup_content)
self.put_text(key, content) | 0.003945 |
def enable_autocenter(self, option):
"""Set ``autocenter`` behavior.
Parameters
----------
option : {'on', 'override', 'once', 'off'}
Option for auto-center behavior. A list of acceptable options can
also be obtained by :meth:`get_autocenter_options`.
Raises
------
ginga.ImageView.ImageViewError
Invalid option.
"""
option = option.lower()
assert(option in self.autocenter_options), \
ImageViewError("Bad autocenter option '%s': must be one of %s" % (
str(self.autocenter_options)))
self.t_.set(autocenter=option) | 0.00299 |
def positions_to_contigs(positions):
"""Flattens and converts a positions array to a contigs array, if applicable.
"""
if isinstance(positions, np.ndarray):
flattened_positions = positions.flatten()
else:
try:
flattened_positions = np.array(
[pos for contig in positions for pos in contig])
except TypeError:
flattened_positions = np.array(positions)
if (np.diff(positions) == 0).any() and not (0 in set(positions)):
warnings.warn("I detected identical consecutive nonzero values.")
return positions
n = len(flattened_positions)
contigs = np.ones(n)
counter = 0
for i in range(1, n):
if positions[i] == 0:
counter += 1
contigs[i] += counter
else:
contigs[i] = contigs[i - 1]
return contigs | 0.002315 |
def cancelled(self):
"""Return whether this future was successfully cancelled."""
return self._state == self.S_EXCEPTION and isinstance(self._result, Cancelled) | 0.017045 |
async def get_all_leases(self):
"""
Return the lease info for all partitions.
A typical implementation could just call get_lease_async() on all partitions.
:return: A list of lease info.
:rtype: list[~azure.eventprocessorhost.lease.Lease]
"""
lease_futures = []
partition_ids = await self.host.partition_manager.get_partition_ids_async()
for partition_id in partition_ids:
lease_futures.append(self.get_lease_async(partition_id))
return lease_futures | 0.007366 |
def verify_unsigned_tx(unsigned_tx, outputs, inputs=None, sweep_funds=False,
change_address=None, coin_symbol='btc'):
'''
Takes an unsigned transaction and what was used to build it (in
create_unsigned_tx) and verifies that tosign_tx matches what is being
signed and what was requestsed to be signed.
Returns if valid:
(True, '')
Returns if invalid:
(False, 'err_msg')
Specifically, this checks that the outputs match what we're expecting
(bad inputs would fail signature anyway).
Note: it was a mistake to include `inputs` in verify_unsigned_tx as it by definition is not used.
It would be removed but that would break compatibility.
'''
if not (change_address or sweep_funds):
err_msg = 'Cannot Verify Without Developer Supplying Change Address (or Sweeping)'
return False, err_msg
if 'tosign_tx' not in unsigned_tx:
err_msg = 'tosign_tx not in API response:\n%s' % unsigned_tx
return False, err_msg
output_addr_list = [x['address'] for x in outputs if x.get('address') != None]
if change_address:
output_addr_list.append(change_address)
assert len(unsigned_tx['tosign_tx']) == len(unsigned_tx['tosign']), unsigned_tx
for cnt, tosign_tx_toverify in enumerate(unsigned_tx['tosign_tx']):
# Confirm tosign is the dsha256 of tosign_tx
if double_sha256(tosign_tx_toverify) != unsigned_tx['tosign'][cnt]:
err_msg = 'double_sha256(%s) =! %s' % (tosign_tx_toverify, unsigned_tx['tosign'][cnt])
print(unsigned_tx)
return False, err_msg
try:
txn_outputs_response_dict = get_txn_outputs_dict(raw_tx_hex=tosign_tx_toverify,
output_addr_list=output_addr_list,
coin_symbol=coin_symbol)
except Exception as inst:
# Could be wrong output addresses, keep print statement for debug
print(unsigned_tx)
print(coin_symbol)
return False, str(inst)
if sweep_funds:
# output adresses are already confirmed in `get_txn_outputs`,
# which was called by `get_txn_outputs_dict`
# no point in confirming values for a sweep
continue
else:
# get rid of change address as tx fee (which affects value)
# is determined by blockcypher and can't be known up front
try:
txn_outputs_response_dict.pop(change_address)
except KeyError:
# This is possible in the case of change address not needed
pass
user_outputs = compress_txn_outputs(outputs)
if txn_outputs_response_dict != user_outputs:
# TODO: more helpful error message
err_msg = 'API Response Ouputs != Supplied Outputs\n\n%s\n\n%s' % (
txn_outputs_response_dict, user_outputs)
return False, err_msg
return True, '' | 0.003247 |
def read(self, n):
""" return at most n array items, move the cursor.
"""
while len(self.pool) < n:
self.cur = self.files.next()
self.pool = numpy.append(self.pool,
self.fetch(self.cur), axis=0)
rt = self.pool[:n]
if n == len(self.pool):
self.pool = self.fetch(None)
else:
self.pool = self.pool[n:]
return rt | 0.009217 |
def _EccZmaxRperiRap(self,*args,**kwargs):
"""
NAME:
_EccZmaxRperiRap
PURPOSE:
evaluate the eccentricity, maximum height above the plane, peri- and apocenter for an isochrone potential
INPUT:
Either:
a) R,vR,vT,z,vz[,phi]:
1) floats: phase-space value for single object (phi is optional) (each can be a Quantity)
2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity)
b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument
OUTPUT:
(e,zmax,rperi,rap)
HISTORY:
2017-12-22 - Written - Bovy (UofT)
"""
if len(args) == 5: #R,vR.vT, z, vz pragma: no cover
R,vR,vT, z, vz= args
elif len(args) == 6: #R,vR.vT, z, vz, phi
R,vR,vT, z, vz, phi= args
else:
self._parse_eval_args(*args)
R= self._eval_R
vR= self._eval_vR
vT= self._eval_vT
z= self._eval_z
vz= self._eval_vz
if isinstance(R,float):
R= nu.array([R])
vR= nu.array([vR])
vT= nu.array([vT])
z= nu.array([z])
vz= nu.array([vz])
if self._c: #pragma: no cover
pass
else:
Lz= R*vT
Lx= -z*vT
Ly= z*vR-R*vz
L2= Lx*Lx+Ly*Ly+Lz*Lz
E= self._ip(R,z)+vR**2./2.+vT**2./2.+vz**2./2.
if self.b == 0:
warnings.warn("zmax for point-mass (b=0) isochrone potential is only approximate, because it assumes that zmax is attained at rap, which is not necessarily the case",galpyWarning)
a= -self.amp/2./E
me2= L2/self.amp/a
e= nu.sqrt(1.-me2)
rperi= a*(1.-e)
rap= a*(1.+e)
else:
smin= 0.5*((2.*E-self.amp/self.b)\
+nu.sqrt((2.*E-self.amp/self.b)**2.
+2.*E*(4.*self.amp/self.b+L2/self.b**2.)))/E
smax= 2.-self.amp/E/self.b-smin
rperi= smin*nu.sqrt(1.-2./smin)*self.b
rap= smax*nu.sqrt(1.-2./smax)*self.b
return ((rap-rperi)/(rap+rperi),rap*nu.sqrt(1.-Lz**2./L2),
rperi,rap) | 0.022718 |
def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue | 0.005468 |
def get_raw(self):
"""
Get the reconstructed code as bytearray
:rtype: bytearray
"""
code_raw = self.code.get_raw()
self.insns_size = (len(code_raw) // 2) + (len(code_raw) % 2)
buff = bytearray()
buff += pack("<H", self.registers_size) + \
pack("<H", self.ins_size) + \
pack("<H", self.outs_size) + \
pack("<H", self.tries_size) + \
pack("<I", self.debug_info_off) + \
pack("<I", self.insns_size) + \
code_raw
if self.tries_size > 0:
if (self.insns_size % 2 == 1):
buff += pack("<H", self.padding)
for i in self.tries:
buff += i.get_raw()
buff += self.handlers.get_raw()
return buff | 0.009627 |
def Start(self):
"""Issue a request to list the directory."""
self.CallClient(
server_stubs.PlistQuery,
request=self.args.request,
next_state="Receive") | 0.005435 |
def _split(rule):
"""Splits a rule whose len(rhs) > 2 into shorter rules."""
rule_str = str(rule.lhs) + '__' + '_'.join(str(x) for x in rule.rhs)
rule_name = '__SP_%s' % (rule_str) + '_%d'
yield Rule(rule.lhs, [rule.rhs[0], NT(rule_name % 1)], weight=rule.weight, alias=rule.alias)
for i in xrange(1, len(rule.rhs) - 2):
yield Rule(NT(rule_name % i), [rule.rhs[i], NT(rule_name % (i + 1))], weight=0, alias='Split')
yield Rule(NT(rule_name % (len(rule.rhs) - 2)), rule.rhs[-2:], weight=0, alias='Split') | 0.007477 |
def set_boot_script(self, filename):
"""
::
POST /:login/machines/:id/metadata
:param filename: file path to the script to be uploaded and executed
at boot on the machine
:type filename: :py:class:`basestring`
Replace the existing boot script for the machine with the data in the
named file.
.. Note:: The SMF service that runs the boot script will kill processes
that exceed 60 seconds execution time, so this is not necessarily
the best vehicle for long ``pkgin`` installations, for example.
"""
data = {}
with open(filename) as f:
data['user-script'] = f.read()
j, r = self.datacenter.request('POST', self.path + '/metadata',
data=data)
r.raise_for_status()
self.boot_script = data['user-script'] | 0.009868 |
def dump_type(self, obj):
"""Dump the text name of the relation."""
if not isinstance(obj.relation_type, RelationType):
return resolve_relation_type_config(obj.relation_type).name
else:
return obj.relation_type.name | 0.007605 |
def astensor(array: TensorLike) -> BKTensor:
"""Convert to product tensor"""
tensor = tf.convert_to_tensor(array, dtype=CTYPE)
if DEVICE == 'gpu':
tensor = tensor.gpu() # pragma: no cover
# size = np.prod(np.array(tensor.get_shape().as_list()))
N = int(math.log2(size(tensor)))
tensor = tf.reshape(tensor, ([2]*N))
return tensor | 0.002725 |
def remove_links(text):
"""
Helper function to remove the links from the input text
Args:
text (str): A string
Returns:
str: the same text, but with any substring that matches the regex
for a link removed and replaced with a space
Example:
>>> from tweet_parser.getter_methods.tweet_text import remove_links
>>> text = "lorem ipsum dolor https://twitter.com/RobotPrincessFi"
>>> remove_links(text)
'lorem ipsum dolor '
"""
tco_link_regex = re.compile("https?://t.co/[A-z0-9].*")
generic_link_regex = re.compile("(https?://)?(\w*[.]\w+)+([/?=&]+\w+)*")
remove_tco = re.sub(tco_link_regex, " ", text)
remove_generic = re.sub(generic_link_regex, " ", remove_tco)
return remove_generic | 0.005096 |
def _set(self, key, value, identity='image'):
"""
Serializing, prefix wrapper for _set_raw
"""
if identity == 'image':
s = serialize_image_file(value)
else:
s = serialize(value)
self._set_raw(add_prefix(key, identity), s) | 0.006826 |
def fcoe_get_login_input_fcoe_login_vfid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
input = ET.SubElement(fcoe_get_login, "input")
fcoe_login_vfid = ET.SubElement(input, "fcoe-login-vfid")
fcoe_login_vfid.text = kwargs.pop('fcoe_login_vfid')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004024 |
def applied_scroll_offsets(self):
"""
Return a :class:`.ScrollOffsets` instance that indicates the actual
offset. This can be less than or equal to what's configured. E.g, when
the cursor is completely at the top, the top offset will be zero rather
than what's configured.
"""
if self.displayed_lines[0] == 0:
top = 0
else:
# Get row where the cursor is displayed.
y = self.input_line_to_visible_line[self.ui_content.cursor_position.y]
top = min(y, self.configured_scroll_offsets.top)
return ScrollOffsets(
top=top,
bottom=min(self.ui_content.line_count - self.displayed_lines[-1] - 1,
self.configured_scroll_offsets.bottom),
# For left/right, it probably doesn't make sense to return something.
# (We would have to calculate the widths of all the lines and keep
# double width characters in mind.)
left=0, right=0) | 0.004845 |
def get(self, key, default=None):
"""
Retrieve the given configuration option. Configuration
options that can be queried this way are those that are
specified without prefix in the paste.ini file, or which are
specified in the '[turnstile]' section of the configuration
file. Returns the default value (None if not specified) if
the given option does not exist.
"""
return self._config.get(None, {}).get(key, default) | 0.004065 |
def template_response(self, template_name, headers={}, **values):
"""
Constructs a response, allowing custom template name and content_type
"""
response = make_response(
self.render_template(template_name, **values))
for field, value in headers.items():
response.headers.set(field, value)
return response | 0.005291 |
def print_dependencies(_run):
"""Print the detected source-files and dependencies."""
print('Dependencies:')
for dep in _run.experiment_info['dependencies']:
pack, _, version = dep.partition('==')
print(' {:<20} == {}'.format(pack, version))
print('\nSources:')
for source, digest in _run.experiment_info['sources']:
print(' {:<43} {}'.format(source, digest))
if _run.experiment_info['repositories']:
repos = _run.experiment_info['repositories']
print('\nVersion Control:')
for repo in repos:
mod = COLOR_DIRTY + 'M' if repo['dirty'] else ' '
print('{} {:<43} {}'.format(mod, repo['url'], repo['commit']) +
ENDC)
print('') | 0.00134 |
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(key_file=self.key_file,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint)
conn.ssl_version = self.ssl_version
if self.proxy is not None:
# Python 2.7+
try:
set_tunnel = conn.set_tunnel
except AttributeError: # Platform-specific: Python 2.6
set_tunnel = conn._set_tunnel
if sys.version_info <= (2, 6, 4) and not self.proxy_headers: # Python 2.6.4 and older
set_tunnel(self.host, self.port)
else:
set_tunnel(self.host, self.port, self.proxy_headers)
# Establish tunnel connection early, because otherwise httplib
# would improperly set Host: header to proxy's IP:port.
conn.connect()
return conn | 0.002327 |
def _validate_value_types(self, **kwargs):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
# keyword-only arguments
section = kwargs.get('section', "")
option = kwargs.get('option', "")
value = kwargs.get('value', "")
if PY2 and bytes in (type(section), type(option), type(value)):
# we allow for a little unholy magic for Python 2 so that
# people not using unicode_literals can still use the library
# conveniently
warnings.warn(
"You passed a bytestring. Implicitly decoding as UTF-8 string."
" This will not work on Python 3. Please switch to using"
" Unicode strings across the board.",
DeprecationWarning,
stacklevel=2,
)
if isinstance(section, bytes):
section = section.decode('utf8')
if isinstance(option, bytes):
option = option.decode('utf8')
if isinstance(value, bytes):
value = value.decode('utf8')
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
return section, option, value | 0.001031 |
def _make_intersection(edge_info, all_edge_nodes):
"""Convert a description of edges into a curved polygon.
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
Args:
edge_info (Tuple[Tuple[int, float, float], ...]): Information
describing each edge in the curved polygon by indicating which
surface / edge on the surface and then start and end parameters
along that edge. (See :func:`.ends_to_curve`.)
all_edge_nodes (Tuple[numpy.ndarray, ...]): The nodes of three edges
of the first surface being intersected followed by the nodes of
the three edges of the second.
Returns:
.CurvedPolygon: The intersection corresponding to ``edge_info``.
"""
edges = []
for index, start, end in edge_info:
nodes = all_edge_nodes[index]
new_nodes = _curve_helpers.specialize_curve(nodes, start, end)
degree = new_nodes.shape[1] - 1
edge = _curve_mod.Curve(new_nodes, degree, _copy=False)
edges.append(edge)
return curved_polygon.CurvedPolygon(
*edges, metadata=edge_info, _verify=False
) | 0.000858 |
def list_versions(self, project):
"""
Lists all deployed versions of a specific project. First class, maps
to Scrapyd's list versions endpoint.
"""
url = self._build_url(constants.LIST_VERSIONS_ENDPOINT)
params = {'project': project}
json = self.client.get(url, params=params, timeout=self.timeout)
return json['versions'] | 0.005181 |
def xmlChromatogramFromCi(index, ci, compression='zlib'):
""" #TODO: docstring
:param index: #TODO: docstring
:param ci: #TODO: docstring
:param compression: #TODO: docstring
:returns: #TODO: docstring
"""
arrayLength = [array.size for array in viewvalues(ci.arrays)]
if len(set(arrayLength)) != 1:
raise Exception('Unequal size for different array in sai.arrays')
else:
arrayLength = arrayLength[0]
chromatogramAttrib = {'index': str(index), 'id': ci.id,
'defaultArrayLength': str(arrayLength)}
if 'dataProcessingRef' in ci.attrib:
chromatogramAttrib.update({'dataProcessingRef': dataProcessingRef})
xmlChromatogram = ETREE.Element('chromatogram', **chromatogramAttrib)
maspy.xml.xmlAddParams(xmlChromatogram, ci.params)
#TODO: add appropriate functions for precursor and product
if ci.product is not None:
raise NotImplementedError()
if ci.precursor is not None:
raise NotImplementedError()
#Sort the array keys, that 'rt' is always the first, necessary for example
# for the software "SeeMS" to properly display chromatograms.
arrayTypes = set(ci.arrayInfo)
if 'rt' in arrayTypes:
arrayTypes.remove('rt')
arrayTypes = ['rt'] + list(arrayTypes)
else:
arrayTypes = list(arrayTypes)
xmlChromatogram.append(xmlGenBinaryDataArrayList(ci.arrayInfo,
ci.arrays,
compression=compression,
arrayTypes=arrayTypes
)
)
return xmlChromatogram | 0.001706 |
def isPointInsidePolygon(x, y, vertices_x, vertices_y):
"""Check if a given point is inside a polygon.
Parameters vertices_x[] and vertices_y[] define the polygon.
The number of array elements is equal to number of vertices of the polygon.
This function works for convex and concave polygons.
Parameters
----------
vertices_x, vertices_y : lists or arrays of floats
Vertices that define the polygon.
x, y : float
Coordinates of the point to check.
Returns
-------
inside : bool
`True` if the point is inside the polygon.
"""
inside = False
for i in range(len(vertices_x)):
j = i - 1
if ((vertices_x[i] > x) != (vertices_x[j] > x)):
if (y < (x - vertices_x[i]) *
(vertices_y[i] - vertices_y[j]) /
(vertices_x[i] - vertices_x[j]) +
vertices_y[i]):
inside = not inside
return inside | 0.00103 |
async def async_execute(self, command: Command, password: str = '',
timeout: int = EXECUTE_TIMEOUT_SECS) -> Response:
"""
Execute a command and return response.
command: the command instance to be executed
password: if specified, will be used to execute this command (overriding any
global password that may have been assigned to the property)
timeout: maximum number of seconds to wait for a response
"""
if not self._is_connected:
raise ConnectionError("Client is not connected to the server")
state = {
'command': command,
'event': asyncio.Event(loop=self._loop)
} # type: Dict[str, Any]
self._executing[command.name] = state
try:
self._send(command, password)
await asyncio.wait_for(state['event'].wait(), timeout)
return state['response']
finally:
self._executing[command.name] = None | 0.005848 |
def mark_deactivated(self,request,queryset):
"""An admin action for marking several cages as inactive.
This action sets the selected cages as Active=False and Death=today.
This admin action also shows as the output the number of mice sacrificed."""
rows_updated = queryset.update(Active=False, End=datetime.date.today() )
if rows_updated == 1:
message_bit = "1 cage was"
else:
message_bit = "%s cages were" % rows_updated
self.message_user(request, "%s successfully marked as deactivated." % message_bit) | 0.020443 |
def copy_topology_image(source, target):
"""
Copy any images of the topology to the converted topology
:param str source: Source topology directory
:param str target: Target Directory
"""
files = glob.glob(os.path.join(source, '*.png'))
for file in files:
shutil.copy(file, target) | 0.003135 |
def show_error_dialog(self, message, details=None):
"""
Convenience method for showing an error dialog.
"""
dlg = Gtk.MessageDialog(type=Gtk.MessageType.ERROR, buttons=Gtk.ButtonsType.OK,
message_format=message)
if details is not None:
dlg.format_secondary_text(details)
dlg.run()
dlg.destroy() | 0.010127 |
def _apply_common_rules(self, part, maxlength):
"""This method contains the rules that must be applied to both the
domain and the local part of the e-mail address.
"""
part = part.strip()
if self.fix:
part = part.strip('.')
if not part:
return part, 'It cannot be empty.'
if len(part) > maxlength:
return part, 'It cannot be longer than %i chars.' % maxlength
if part[0] == '.':
return part, 'It cannot start with a dot.'
if part[-1] == '.':
return part, 'It cannot end with a dot.'
if '..' in part:
return part, 'It cannot contain consecutive dots.'
return part, '' | 0.011407 |
def predict(self, peptides, allele_encoding=None, batch_size=4096):
"""
Predict affinities.
If peptides are specified as EncodableSequences, then the predictions
will be cached for this predictor as long as the EncodableSequences object
remains in memory. The cache is keyed in the object identity of the
EncodableSequences, not the sequences themselves.
Parameters
----------
peptides : EncodableSequences or list of string
allele_encoding : AlleleEncoding, optional
Only required when this model is a pan-allele model
batch_size : int
batch_size passed to Keras
Returns
-------
numpy.array of nM affinity predictions
"""
assert self.prediction_cache is not None
use_cache = (
allele_encoding is None and
isinstance(peptides, EncodableSequences))
if use_cache and peptides in self.prediction_cache:
return self.prediction_cache[peptides].copy()
x_dict = {
'peptide': self.peptides_to_network_input(peptides)
}
if allele_encoding is not None:
allele_input = self.allele_encoding_to_network_input(allele_encoding)
x_dict['allele'] = allele_input
network = self.network(borrow=True)
raw_predictions = network.predict(x_dict, batch_size=batch_size)
predictions = numpy.array(raw_predictions, dtype = "float64")[:,0]
result = to_ic50(predictions)
if use_cache:
self.prediction_cache[peptides] = result
return result | 0.005451 |
def get_help(command):
"""
Get the Cmd help function from the click command
:param command: The click Command object
:return: the help_* method for Cmd
:rtype: function
"""
assert isinstance(command, click.Command)
def help_(self): # pylint: disable=unused-argument
extra = {}
for key, value in command.context_settings.items():
if key not in extra:
extra[key] = value
# Print click's help message
with click.Context(command, info_name=command.name, parent=self.ctx, **extra) as ctx:
click.echo(ctx.get_help(), color=ctx.color)
help_.__name__ = 'help_%s' % command.name
return help_ | 0.002865 |
def response_change(self, request, obj):
"""
Overrides the default to be able to forward to the directory listing
instead of the default change_list_view
"""
r = super(FolderAdmin, self).response_change(request, obj)
# Code borrowed from django ModelAdmin to determine changelist on the
# fly
if r['Location']:
# it was a successful save
if (r['Location'] in ['../'] or
r['Location'] == self._get_post_url(obj)):
if obj.parent:
url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent.id})
else:
url = reverse('admin:filer-directory_listing-root')
url = "%s%s%s" % (url, popup_param(request),
selectfolder_param(request, "&"))
return HttpResponseRedirect(url)
else:
# this means it probably was a save_and_continue_editing
pass
return r | 0.001835 |
def register_layer(self, layer):
"""
Register one connected layer.
:type layer: NeuralLayer
"""
if self.fixed:
raise Exception("After a block is fixed, no more layers can be registered.")
self.layers.append(layer) | 0.010989 |
def shutdown_host(kwargs=None, call=None):
'''
Shut down the specified host system in this VMware environment
.. note::
If the host system is not in maintenance mode, it will not be shut down. If you
want to shut down the host system regardless of whether it is in maintenance mode,
set ``force=True``. Default is ``force=False``.
CLI Example:
.. code-block:: bash
salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True]
'''
if call != 'function':
raise SaltCloudSystemExit(
'The shutdown_host function must be called with '
'-f or --function.'
)
host_name = kwargs.get('host') if kwargs and 'host' in kwargs else None
force = _str_to_bool(kwargs.get('force')) if kwargs and 'force' in kwargs else False
if not host_name:
raise SaltCloudSystemExit(
'You must specify name of the host system.'
)
# Get the service instance
si = _get_si()
host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name)
if not host_ref:
raise SaltCloudSystemExit(
'Specified host system does not exist.'
)
if host_ref.runtime.connectionState == 'notResponding':
raise SaltCloudSystemExit(
'Specified host system cannot be shut down in it\'s current state (not responding).'
)
if not host_ref.capability.rebootSupported:
raise SaltCloudSystemExit(
'Specified host system does not support shutdown.'
)
if not host_ref.runtime.inMaintenanceMode and not force:
raise SaltCloudSystemExit(
'Specified host system is not in maintenance mode. Specify force=True to '
'force reboot even if there are virtual machines running or other operations '
'in progress.'
)
try:
host_ref.ShutdownHost_Task(force)
except Exception as exc:
log.error(
'Error while shutting down host %s: %s',
host_name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return {host_name: 'failed to shut down host'}
return {host_name: 'shut down host'} | 0.003915 |
def mean_values(self):
""" the mean value vector while respecting log transform
Returns
-------
mean_values : pandas.Series
"""
if not self.istransformed:
return self.pst.parameter_data.parval1.copy()
else:
# vals = (self.pst.parameter_data.parval1 *
# self.pst.parameter_data.scale) +\
# self.pst.parameter_data.offset
vals = self.pst.parameter_data.parval1.copy()
vals[self.log_indexer] = np.log10(vals[self.log_indexer])
return vals | 0.003373 |
def parseExtensionArgs(self, args, strict=False):
"""Parse the unqualified simple registration request
parameters and add them to this object.
This method is essentially the inverse of
C{L{getExtensionArgs}}. This method restores the serialized simple
registration request fields.
If you are extracting arguments from a standard OpenID
checkid_* request, you probably want to use C{L{fromOpenIDRequest}},
which will extract the sreg namespace and arguments from the
OpenID request. This method is intended for cases where the
OpenID server needs more control over how the arguments are
parsed than that method provides.
>>> args = message.getArgs(ns_uri)
>>> request.parseExtensionArgs(args)
@param args: The unqualified simple registration arguments
@type args: {str:str}
@param strict: Whether requests with fields that are not
defined in the simple registration specification should be
tolerated (and ignored)
@type strict: bool
@returns: None; updates this object
"""
for list_name in ['required', 'optional']:
required = (list_name == 'required')
items = args.get(list_name)
if items:
for field_name in items.split(','):
try:
self.requestField(field_name, required, strict)
except ValueError:
if strict:
raise
self.policy_url = args.get('policy_url') | 0.001233 |
def updateSchema(self, schemaId, schemaDefinition):
"""
Update a schema. Throws APIException on failure.
"""
req = ApiClient.oneSchemaUrl % (self.host, "/draft", schemaId)
body = {"schemaDefinition": schemaDefinition}
resp = requests.put(req, auth=self.credentials, headers={"Content-Type":"application/json"},
data=json.dumps(body), verify=self.verify)
if resp.status_code == 200:
self.logger.debug("Schema updated")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error updating schema", resp)
return resp.json() | 0.009274 |
def get_dataset_samples(self, dataset_name, owner=None):
""" Get the list of samples of a specific remote dataset.
:param dataset_name: the dataset name
:param owner: (optional) who owns the dataset. If it is not specified, the current user
is used. For public dataset use 'public'.
:return: a pandas Dataframe
"""
if isinstance(owner, str):
owner = owner.lower()
dataset_name = owner + "." + dataset_name
header = self.__check_authentication()
url = self.address + "/datasets/" + dataset_name
response = requests.get(url, headers=header)
if response.status_code != 200:
raise ValueError("Code {}: {}".format(response.status_code, response.json().get("error")))
response = response.json()
samples = response.get("samples")
if len(samples) == 0:
return None
res = pd.DataFrame.from_dict(samples)
return self.process_info_list(res, "info") | 0.00391 |
def unbind(self, binding):
""" Unbind the instance
Args:
binding (AtlasServiceBinding.Binding): Existing or New binding
"""
username = self.backend.config.generate_binding_username(binding)
try:
self.backend.atlas.DatabaseUsers.delete_a_database_user(username)
except ErrAtlasNotFound:
# The user does not exist. This is not an issue because this is possible that we
# removed it in a previous call that failed later on the broker.
# This cover a manually deleted user case too.
pass
self.backend.storage.remove(binding) | 0.008876 |
def parse(self):
"""
Returns a cleaned lxml ElementTree
:returns: Whether the cleaned HTML has matches or not
:rtype: bool
"""
# Create the element tree
self.tree = self._build_tree(self.html_contents)
# Get explicits elements to keep and discard
self.elts_to_keep = self._get_elements_to_keep()
self.elts_to_discard = self._get_elements_to_discard()
# Init an empty list of Elements to remove
self.elts_to_remove = []
# Check if the root is a match or if there is any matches
is_root = self._is_keep(self.tree)
has_descendant = self._has_keep_elt_in_descendants(self.tree)
if not(is_root or has_descendant):
return False
# Parse and clean the ElementTree
self._parse_element(self.tree, parent_is_keep=is_root)
self._remove_elements(self.elts_to_remove)
return True | 0.002119 |
def time_pad(x, filter_size, dilations):
"""Pad left across time and pad valid across the spatial components.
Also concats a binary feature that indicates if a feature is padded or not.
Args:
x: 5-D Tensor, (NTHWC)
filter_size: list of ints
dilations: list of ints, dilations - 1 specifies the number of holes
between two filter elements.
Returns:
x_pad: 5-D Tensor.
"""
x_shape = common_layers.shape_list(x)
if filter_size == [1, 1, 1]:
return x
_, h, w = filter_size
eff_h = h + (h - 1)*(dilations[2] - 1)
eff_w = w + (w - 1)*(dilations[3] - 1)
a = (eff_h - 1) // 2 # vertical padding size
b = (eff_w - 1) // 2 # horizontal padding size
c = filter_size[0] - 1
# pad across edges.
padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]]
# concat a binary feature across channels to indicate a padding.
# 1 indicates that the feature is a padding.
x_bias = tf.zeros(x_shape[:-1] + [1])
x_bias = tf.pad(x_bias, padding, constant_values=1)
x_pad = tf.pad(x, padding)
x_pad = tf.concat((x_bias, x_pad), axis=-1)
return x_pad | 0.017257 |
def publish_proto_metadata_update(self):
""" Publish protobuf model in ipfs and update existing metadata file """
metadata = load_mpe_service_metadata(self.args.metadata_file)
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58)
metadata.save_pretty(self.args.metadata_file) | 0.009501 |
def read_chd_header(chd_file):
"""
read the .chd header file created when Vision Research software saves the images in a file format other than .cine
"""
with open(chd_file, "rb") as f:
header = {
"cinefileheader": cine.CINEFILEHEADER(),
"bitmapinfoheader": cine.BITMAPINFOHEADER(),
"setup": cine.SETUP(),
}
f.readinto(header["cinefileheader"])
f.readinto(header["bitmapinfoheader"])
f.readinto(header["setup"])
return header | 0.003824 |
def decompressBWTPoolProcess(tup):
'''
Individual process for decompression
'''
(inputDir, outputDir, startIndex, endIndex) = tup
if startIndex == endIndex:
return True
#load the thing we'll be extracting from
msbwt = MultiStringBWT.CompressedMSBWT()
msbwt.loadMsbwt(inputDir, None)
#open our output
outputBwt = np.load(outputDir+'/msbwt.npy', 'r+')
outputBwt[startIndex:endIndex] = msbwt.getBWTRange(startIndex, endIndex)
return True | 0.015595 |
def main():
"""
need to add http://sphinx-doc.org/
"""
parser = argparse.ArgumentParser()
sub_parser = parser.add_subparsers()
lint_parser = sub_parser.add_parser('lint')
lint_parser.set_defaults(func=lint)
unit_test_parser = sub_parser.add_parser('unit-test')
unit_test_parser.set_defaults(func=unit_test)
test_suite_parser = sub_parser.add_parser('test-suite')
test_suite_parser.set_defaults(func=test_suite)
args, extra_args = parser.parse_known_args()
if extra_args:
args.func(extra_args)
else:
args.func() | 0.001704 |
def get_protection(self):
"""
:calls: `GET /repos/:owner/:repo/branches/:branch/protection <https://developer.github.com/v3/repos/branches>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
self.protection_url,
headers={'Accept': Consts.mediaTypeRequireMultipleApprovingReviews}
)
return github.BranchProtection.BranchProtection(self._requester, headers, data, completed=True) | 0.008403 |
def transcode(self, source, destinations, **kwargs):
"""
Changes the compression characteristics of an audio and/or video
stream. Allows you to change the resolution of a source stream, change
the bitrate of a stream, change a VP8 or MPEG2 stream into H.264 and
much more. Allow users to create overlays on the final stream as well
as crop streams.
:param source: Can be a URI or a local stream name from EMS.
:type source: str
:param destinations: The target URI(s) or stream name(s) of the
transcoded stream. If only a name is given, it will be pushed
back to the EMS.
:type destinations: str
:param targetStreamNames: The name of the stream(s) at destination(s).
If not specified, and a full URI is provided to destinations,
name will have a time stamped value.
:type targetStreamNames: str
:param groupName: The group name assigned to this process. If not
specified, groupName will have a random value.
:type groupName: str
:param videoBitrates: Target output video bitrate(s) (in bits/s,
append `k` to value for kbits/s). Accepts the value `copy` to
copy the input bitrate. An empty value passed would mean no video.
:type videoBitrates: str
:param videoSizes: Target output video size(s) in wxh (width x height)
format. IE: 240x480.
:type videoSizes: str
:param videoAdvancedParamsProfiles: Name of video profile template
that will be used.
:type videoAdvancedParamsProfiles: str
:param audioBitrates: Target output audio bitrate(s) (in bits/s,
append `k` to value for kbits/s). Accepts the value `copy` to
copy the input bitrate. An empty value passed would mean no audio.
:type audioBitrates: str
:param audioChannelsCounts: Target output audio channel(s) count(s).
Valid values are 1 (mono), 2 (stereo), and so on. Actual supported
channel count is dependent on the number of input audio channels.
:type audioChannelsCounts: str
:param audioFrequencies: Target output audio frequency(ies) (in Hz,
append `k` to value for kHz).
:type audioFrequencies: str
:param audioAdvancedParamsProfiles: Name of audio profile template
that will be used.
:type audioAdvancedParamsProfiles: str
:param overlays: Location of the overlay source(s) to be used. These
are transparent images (normally in PNG format) that have the same
or smaller size than the video. Image is placed at the top-left
position of the video.
:type overlays: str
:param croppings: Target video cropping position(s) and size(s) in
`left : top : width : height` format (e.g. 0:0:200:100. Positions
are optional (200:100 for a centered cropping of 200 width and 100
height in pixels). Values are limited to the actual size of the
video.
:type croppings: str
:param keepAlive: If keepAlive is set to 1, the server will restart
transcoding if it was previously activated.
:type keepAlive: int
:param commandFlags: Other commands to the transcode process that are
not supported by the baseline transcode command.
:type commandFlags: str
:link: http://docs.evostream.com/ems_api_definition/transcode
"""
return self.protocol.execute('transcode', source=source,
destinations=destinations, **kwargs) | 0.000539 |
def clear(self):
'''
Clear plugin manager state.
Registered mimetype functions will be disposed after calling this
method.
'''
self._mimetype_functions = list(self._default_mimetype_functions)
super(MimetypePluginManager, self).clear() | 0.006849 |
def upload_from_string(
self, data, content_type="text/plain", client=None, predefined_acl=None
):
"""Upload contents of this blob from the provided string.
.. note::
The effect of uploading to an existing blob depends on the
"versioning" and "lifecycle" policies defined on the blob's
bucket. In the absence of those policies, upload will
overwrite any existing contents.
See the `object versioning
<https://cloud.google.com/storage/docs/object-versioning>`_ and
`lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_
API documents for details.
If :attr:`user_project` is set on the bucket, bills the API request
to that project.
:type data: bytes or str
:param data: The data to store in this blob. If the value is
text, it will be encoded as UTF-8.
:type content_type: str
:param content_type: Optional type of content being uploaded. Defaults
to ``'text/plain'``.
:type client: :class:`~google.cloud.storage.client.Client` or
``NoneType``
:param client: Optional. The client to use. If not passed, falls back
to the ``client`` stored on the blob's bucket.
:type predefined_acl: str
:param predefined_acl: (Optional) predefined access control list
"""
data = _to_bytes(data, encoding="utf-8")
string_buffer = BytesIO(data)
self.upload_from_file(
file_obj=string_buffer,
size=len(data),
content_type=content_type,
client=client,
predefined_acl=predefined_acl,
) | 0.001693 |
def import_laid_out_tensor(mesh, laid_out_tensor, shape, name=None):
"""Import a laid_out_tensor.
For expert users.
The input must be laid out appropriately given the eventual MeshImpl,
and layout.
Args:
mesh: a Mesh
laid_out_tensor: a LaidOutTensor
shape: a mtf.Shape
name: an optional string
Returns:
a mtf.Tensor
"""
return ImportLaidOutTensorOperation(
mesh, laid_out_tensor, convert_to_shape(shape), name=name).outputs[0] | 0.006383 |
def _CopyField(field, number=None):
"""Copies a (potentially) owned ProtoRPC field instance into a new copy.
Args:
field: A ProtoRPC message field to be copied.
number: An integer for the field to override the number of the field.
Defaults to None.
Raises:
TypeError: If the field is not an instance of messages.Field.
Returns:
A copy of the ProtoRPC message field.
"""
positional_args, kwargs = _GetFieldAttributes(field)
number = number or field.number
positional_args.append(number)
return field.__class__(*positional_args, **kwargs) | 0.010327 |
def metadata_lint(old, new, locations):
"""Run the linter over the new metadata, comparing to the old."""
# ensure we don't modify the metadata
old = old.copy()
new = new.copy()
# remove version info
old.pop('$version', None)
new.pop('$version', None)
for old_group_name in old:
if old_group_name not in new:
yield LintError('', 'api group removed', api_name=old_group_name)
for group_name, new_group in new.items():
old_group = old.get(group_name, {'apis': {}})
for name, api in new_group['apis'].items():
old_api = old_group['apis'].get(name, {})
api_locations = locations[name]
for message in lint_api(name, old_api, api, api_locations):
message.api_name = name
if message.location is None:
message.location = api_locations['api']
yield message | 0.00108 |
def register_postcmd_hook(self, func: Callable[[plugin.PostcommandData], plugin.PostcommandData]) -> None:
"""Register a hook to be called after the command function."""
self._validate_prepostcmd_hook(func, plugin.PostcommandData)
self._postcmd_hooks.append(func) | 0.010453 |
def temporary_eject_device(self, name, controller_port, device, temporary_eject):
"""Sets the behavior for guest-triggered medium eject. In some situations
it is desirable that such ejects update the VM configuration, and in
others the eject should keep the VM configuration. The device must
already exist; see :py:func:`IMachine.attach_device` for how to
attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in temporary_eject of type bool
New value for the eject behavior.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(temporary_eject, bool):
raise TypeError("temporary_eject can only be an instance of type bool")
self._call("temporaryEjectDevice",
in_p=[name, controller_port, device, temporary_eject]) | 0.006821 |
def start(self):
"""Start the app for the start subcommand."""
# First see if the cluster is already running
try:
pid = self.get_pid_from_file()
except PIDFileError:
pass
else:
if self.check_pid(pid):
self.log.critical(
'Cluster is already running with [pid=%s]. '
'use "ipcluster stop" to stop the cluster.' % pid
)
# Here I exit with a unusual exit status that other processes
# can watch for to learn how I existed.
self.exit(ALREADY_STARTED)
else:
self.remove_pid_file()
# Now log and daemonize
self.log.info(
'Starting ipcluster with [daemon=%r]' % self.daemonize
)
# TODO: Get daemonize working on Windows or as a Windows Server.
if self.daemonize:
if os.name=='posix':
daemonize()
dc = ioloop.DelayedCallback(self.start_controller, 0, self.loop)
dc.start()
dc = ioloop.DelayedCallback(self.start_engines, 1000*self.delay, self.loop)
dc.start()
# Now write the new pid file AFTER our new forked pid is active.
self.write_pid_file()
try:
self.loop.start()
except KeyboardInterrupt:
pass
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
pass
else:
raise
finally:
self.remove_pid_file() | 0.003181 |
def ReadClientCrashInfoHistory(self, client_id):
"""Reads the full crash history for a particular client."""
history = self.crash_history.get(client_id)
if not history:
return []
res = []
for ts in sorted(history, reverse=True):
client_data = rdf_client.ClientCrash.FromSerializedString(history[ts])
client_data.timestamp = ts
res.append(client_data)
return res | 0.012225 |
def wnsumd(window):
"""
Summarize the contents of a double precision window.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html
:param window: Window to be summarized.
:type window: spiceypy.utils.support_types.SpiceCell
:return:
Total measure of intervals in window,
Average measure, Standard deviation,
Location of shortest interval,
Location of longest interval.
:rtype: tuple
"""
assert isinstance(window, stypes.SpiceCell)
assert window.dtype == 1
meas = ctypes.c_double()
avg = ctypes.c_double()
stddev = ctypes.c_double()
shortest = ctypes.c_int()
longest = ctypes.c_int()
libspice.wnsumd_c(ctypes.byref(window), ctypes.byref(meas),
ctypes.byref(avg), ctypes.byref(stddev),
ctypes.byref(shortest), ctypes.byref(longest))
return meas.value, avg.value, stddev.value, shortest.value, longest.value | 0.002035 |
def _raw_predict(self, Xnew, full_cov=False, kern=None):
"""
For making predictions, does not account for normalization or likelihood
full_cov is a boolean which defines whether the full covariance matrix
of the prediction is computed. If full_cov is False (default), only the
diagonal of the covariance is returned.
.. math::
p(f*|X*, X, Y) = \int^{\inf}_{\inf} p(f*|f,X*)p(f|X,Y) df
= MVN\left(\nu + N,f*| K_{x*x}(K_{xx})^{-1}Y,
\frac{\nu + \beta - 2}{\nu + N - 2}K_{x*x*} - K_{xx*}(K_{xx})^{-1}K_{xx*}\right)
\nu := \texttt{Degrees of freedom}
"""
mu, var = self.posterior._raw_predict(kern=self.kern if kern is None else kern, Xnew=Xnew,
pred_var=self._predictive_variable, full_cov=full_cov)
if self.mean_function is not None:
mu += self.mean_function.f(Xnew)
return mu, var | 0.010091 |
def to_s(self):
"""
this method is used to print the output of the executable in a readable/ tokenized format.
sample usage:
>>> from boa.compiler import Compiler
>>> module = Compiler.load('./boa/tests/src/LambdaTest.py').default
>>> module.write()
>>> print(module.to_s())
12 3 LOAD_CONST 9 [data]
4 STORE_FAST j [data]
22 11 LOAD_FAST j [data]
17 CALL_FUNCTION Main.<locals>.q_1 \
[<boa.code.pytoken.PyToken object at 0x10cb53c50>] [data] 22
20 STORE_FAST m [data]
24 27 243 b'\x03\x00' [data] 3
30 LOAD_FAST m [data]
35 NOP [data]
36 241 [data]
37 242 [data]
38 RETURN_VALUE [data]
20 49 243 b'\x03\x00' [data] 3
52 LOAD_FAST x [data]
57 LOAD_CONST 1 [data]
58 BINARY_ADD [data]
59 NOP [data]
60 241 [data]
61 242 [data]
62 RETURN_VALUE [data]
"""
# Initialize if needed
if self.all_vm_tokens is None:
self.link_methods()
lineno = 0
output = []
pstart = True
for i, (key, value) in enumerate(self.all_vm_tokens.items()):
if value.pytoken:
pt = value.pytoken
do_print_line_no = False
to_label = None
from_label = ' '
if pt.lineno != lineno:
output.append("\n")
lineno = pt.lineno
do_print_line_no = True
ds = ''
if value.data is not None:
try:
ds = int.from_bytes(value.data, 'little', signed=True)
except Exception as e:
pass
if type(ds) is not int and len(ds) < 1:
try:
ds = value.data.decode('utf-8')
except Exception as e:
pass
lno = "{:<10}".format(
pt.lineno if do_print_line_no or pstart else '')
addr = "{:<5}".format(key)
op = "{:<20}".format(pt.instruction.name)
# If this is a number, it is likely a custom python opcode, get the name
if str(pt.pyop).isnumeric():
opname = pyop.to_name(int(str(pt.pyop))).replace('HAVE_ARGUMENT', 'STORE_NAME').replace('YIELD_VALUE', 'REVERSE')
if opname is not None:
op = "{:<20}".format(opname)
arg = "{:<50}".format(pt.arg_str)
data = "[data] {:<20}".format(ds)
output.append("%s%s%s%s%s%s" % (lno, from_label, addr, op, arg, data))
pstart = False
return "\n".join(output) | 0.001919 |
def _parse_raw_data(self):
"""
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
"""
if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
if self._raw[0] == self._START_OF_FRAME:
self._raw.pop(0)
eof_index = self._raw.index(self._END_OF_FRAME)
raw_message = self._raw[:eof_index]
self._raw = self._raw[eof_index:]
logger.debug('raw message: {}'.format(raw_message))
message = self._remove_esc_chars(raw_message)
logger.debug('message with checksum: {}'.format(message))
expected_checksum = (message[-1] << 8) | message[-2]
logger.debug('checksum: {}'.format(expected_checksum))
message = message[:-2] # checksum bytes
logger.debug('message: {}'.format(message))
sum1, sum2 = self._fletcher16_checksum(message)
calculated_checksum = (sum2 << 8) | sum1
if expected_checksum == calculated_checksum:
message = message[2:] # remove length
logger.debug('valid message received: {}'.format(message))
self._messages.append(message)
else:
logger.warning('invalid message received: {}, discarding'.format(message))
logger.debug('expected checksum: {}, calculated checksum: {}'.format(expected_checksum, calculated_checksum))
# remove any extra bytes at the beginning
try:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
except IndexError:
pass | 0.00268 |
def deploy_local(self, dotfiles, target_root=None):
"""Deploy dotfiles to a local path."""
if target_root is None:
target_root = self.args.path
for source_path, target_path in dotfiles.items():
source_path = path.join(self.source, source_path)
target_path = path.join(target_root, target_path)
if path.isfile(target_path) or path.islink(target_path):
self.log.debug('Removing existing file at %s', target_path)
os.unlink(target_path)
elif path.isdir(target_path):
self.log.debug('Removing existing dir at %s', target_path)
shutil.rmtree(target_path)
parent_dir = path.dirname(target_path)
if not path.isdir(parent_dir):
self.log.debug('Creating parent dir %s', parent_dir)
os.makedirs(parent_dir)
if self.args.copy:
if path.isdir(source_path):
self.log.debug('Copying file %s to %s',
source_path, target_path)
shutil.copytree(source_path, target_path)
else:
self.log.debug('Copying dir %s to %s',
source_path, target_path)
shutil.copy(source_path, target_path)
else:
self.log.debug('Symlinking %s -> %s', target_path, source_path)
os.symlink(source_path, target_path) | 0.00132 |
def _variant_checkpoints(samples):
"""Check sample configuration to identify required steps in analysis.
"""
checkpoints = {}
checkpoints["vc"] = any([dd.get_variantcaller(d) or d.get("vrn_file") for d in samples])
checkpoints["sv"] = any([dd.get_svcaller(d) for d in samples])
checkpoints["jointvc"] = any([(dd.get_jointcaller(d) or "gvcf" in dd.get_tools_on(d))
for d in samples])
checkpoints["hla"] = any([dd.get_hlacaller(d) for d in samples])
checkpoints["align"] = any([(dd.get_aligner(d) or dd.get_bam_clean(d)) for d in samples])
checkpoints["align_split"] = not all([(dd.get_align_split_size(d) is False or
not dd.get_aligner(d))
for d in samples])
checkpoints["archive"] = any([dd.get_archive(d) for d in samples])
checkpoints["umi"] = any([dd.get_umi_consensus(d) for d in samples])
checkpoints["ensemble"] = any([dd.get_ensemble(d) for d in samples])
checkpoints["cancer"] = any(dd.get_phenotype(d) in ["tumor"] for d in samples)
return checkpoints | 0.005286 |
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid,
args=(self.axis_ratio, self.slope, self.core_radius))[0]
return self.einstein_radius_rescaled * self.axis_ratio * potential_grid | 0.009058 |
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val) | 0.003695 |
def sample(dataset, target, tolerance=None, pass_cell_arrays=True,
pass_point_arrays=True):
"""Resample scalar data from a passed mesh onto this mesh using
:class:`vtk.vtkResampleWithDataSet`.
Parameters
----------
dataset: vtki.Common
The source vtk data object as the mesh to sample values on to
target: vtki.Common
The vtk data object to sample from - point and cell arrays from
this object are sampled onto the nodes of the ``dataset`` mesh
tolerance: flaot, optional
tolerance used to compute whether a point in the source is in a
cell of the input. If not given, tolerance automatically generated.
pass_cell_arrays: bool, optional
Preserve source mesh's original cell data arrays
pass_point_arrays: bool, optional
Preserve source mesh's original point data arrays
"""
alg = vtk.vtkResampleWithDataSet() # Construct the ResampleWithDataSet object
alg.SetInputData(dataset) # Set the Input data (actually the source i.e. where to sample from)
alg.SetSourceData(target) # Set the Source data (actually the target, i.e. where to sample to)
alg.SetPassCellArrays(pass_cell_arrays)
alg.SetPassPointArrays(pass_point_arrays)
if tolerance is not None:
alg.SetComputeTolerance(False)
alg.SetTolerance(tolerance)
alg.Update() # Perfrom the resampling
return _get_output(alg) | 0.006456 |
def create_context_plot(ra, dec, name="Your object"):
"""Creates a K2FootprintPlot showing a given position in context
with respect to the campaigns."""
plot = K2FootprintPlot()
plot.plot_galactic()
plot.plot_ecliptic()
for c in range(0, 20):
plot.plot_campaign_outline(c, facecolor="#666666")
# for c in [11, 12, 13, 14, 15, 16]:
# plot.plot_campaign_outline(c, facecolor="green")
plot.ax.scatter(ra, dec, marker='x', s=250, lw=3, color="red", zorder=500)
plot.ax.text(ra, dec - 2, name,
ha="center", va="top", color="red",
fontsize=20, fontweight='bold', zorder=501)
return plot | 0.001495 |
def can_create_log_entry_with_record_types(self, log_entry_record_types):
"""Tests if this user can create a single ``LogEntry`` using the desired record types.
While ``LoggingManager.getLogEntryRecordTypes()`` can be used to
examine which records are supported, this method tests which
record(s) are required for creating a specific ``LogEntry``.
Providing an empty array tests if a ``LogEntry`` can be created
with no records.
arg: log_entry_record_types (osid.type.Type[]): array of log
entry record types
return: (boolean) - ``true`` if ``LogEntry`` creation using the
specified record ``Types`` is supported, ``false``
otherwise
raise: NullArgument - ``log_entry_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.can_create_bin_with_record_types
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if self._catalog_session is not None:
return self._catalog_session.can_create_catalog_with_record_types(catalog_record_types=log_entry_record_types)
return True | 0.002972 |
def template_substitute(text, **kwargs):
"""
Replace placeholders in text by using the data mapping.
Other placeholders that is not represented by data is left untouched.
:param text: Text to search and replace placeholders.
:param data: Data mapping/dict for placeholder key and values.
:return: Potentially modified text with replaced placeholders.
"""
for name, value in kwargs.items():
placeholder_pattern = "{%s}" % name
if placeholder_pattern in text:
text = text.replace(placeholder_pattern, value)
return text | 0.001706 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.