code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def extract_field(self, field):
if not isinstance(field, basestring):
err_msg = u"Invalid extractor! => {}\n".format(field)
logger.log_error(err_msg)
raise exceptions.ParamsError(err_msg)
msg = "extract: {}".format(field)
if text_extractor_regexp_compile.match(field):
value = self._extract_field_with_regex(field)
else:
value = self._extract_field_with_delimiter(field)
if is_py2 and isinstance(value, unicode):
value = value.encode("utf-8")
msg += "\t=> {}".format(value)
logger.log_debug(msg)
return value | extract value from requests.Response. |
def invoke(self):
logger.debug('Running deferred function %s.', self)
self.module.makeLoadable()
function, args, kwargs = list(map(dill.loads, (self.function, self.args, self.kwargs)))
return function(*args, **kwargs) | Invoke the captured function with the captured arguments. |
def identify_denonavr_receivers():
devices = send_ssdp_broadcast()
receivers = []
for device in devices:
try:
receiver = evaluate_scpd_xml(device["URL"])
except ConnectionError:
continue
if receiver:
receivers.append(receiver)
return receivers | Identify DenonAVR using SSDP and SCPD queries.
Returns a list of dictionaries which includes all discovered Denon AVR
devices with keys "host", "modelName", "friendlyName", "presentationURL". |
def n2s(self, offset, length):
s = ''
for dummy in range(length):
if self.endian == 'I':
s += chr(offset & 0xFF)
else:
s = chr(offset & 0xFF) + s
offset = offset >> 8
return s | Convert offset to string. |
def unregister(self, name):
try:
name = name.name
except AttributeError:
pass
return self.pop(name,None) | Unregister function by name. |
def get_parent_objective_bank_ids(self, objective_bank_id):
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalog_ids(catalog_id=objective_bank_id)
return self._hierarchy_session.get_parents(id_=objective_bank_id) | Gets the parent ``Ids`` of the given objective bank.
arg: objective_bank_id (osid.id.Id): the ``Id`` of an
objective bank
return: (osid.id.IdList) - the parent ``Ids`` of the objective
bank
raise: NotFound - ``objective_bank_id`` is not found
raise: NullArgument - ``objective_bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
def remove_product_version_from_build_configuration(id=None, name=None, product_version_id=None):
data = remove_product_version_from_build_configuration_raw(id, name, product_version_id)
if data:
return utils.format_json_list(data) | Remove a ProductVersion from association with a BuildConfiguration |
def append(self, state, symbol, action, destinationstate, production = None):
if action not in (None, "Accept", "Shift", "Reduce"):
raise TypeError
rule = {"action":action, "dest":destinationstate}
if action == "Reduce":
if rule is None:
raise TypeError("Expected production parameter")
rule["rule"] = production
while isinstance(symbol, TerminalSymbol) and isinstance(symbol.gd, Iterable) and len(symbol.gd) == 1 and isinstance(list(symbol.gd)[0], Grammar):
symbol = TerminalSymbol(list(symbol.gd)[0])
if not isinstance(symbol, Symbol):
raise TypeError("Expected symbol, got %s" % symbol)
self[state][symbol] = rule | Appends a new rule |
def get_status(video_id, _connection=None):
c = _connection
if not c:
c = connection.APIConnection()
return c.post('get_upload_status', video_id=video_id) | Get the status of a video given the ``video_id`` parameter. |
def compress(data,
mode=DEFAULT_MODE,
quality=lib.BROTLI_DEFAULT_QUALITY,
lgwin=lib.BROTLI_DEFAULT_WINDOW,
lgblock=0,
dictionary=b''):
compressor = Compressor(
mode=mode,
quality=quality,
lgwin=lgwin,
lgblock=lgblock,
dictionary=dictionary
)
compressed_data = compressor._compress(data, lib.BROTLI_OPERATION_FINISH)
assert lib.BrotliEncoderIsFinished(compressor._encoder) == lib.BROTLI_TRUE
assert (
lib.BrotliEncoderHasMoreOutput(compressor._encoder) == lib.BROTLI_FALSE
)
return compressed_data | Compress a string using Brotli.
.. versionchanged:: 0.5.0
Added ``mode``, ``quality``, `lgwin``, ``lgblock``, and ``dictionary``
parameters.
:param data: A bytestring containing the data to compress.
:type data: ``bytes``
:param mode: The encoder mode.
:type mode: :class:`BrotliEncoderMode` or ``int``
:param quality: Controls the compression-speed vs compression-density
tradeoffs. The higher the quality, the slower the compression. The
range of this value is 0 to 11.
:type quality: ``int``
:param lgwin: The base-2 logarithm of the sliding window size. The range of
this value is 10 to 24.
:type lgwin: ``int``
:param lgblock: The base-2 logarithm of the maximum input block size. The
range of this value is 16 to 24. If set to 0, the value will be set
based on ``quality``.
:type lgblock: ``int``
:param dictionary: A pre-set dictionary for LZ77. Please use this with
caution: if a dictionary is used for compression, the same dictionary
**must** be used for decompression!
:type dictionary: ``bytes``
:returns: The compressed bytestring.
:rtype: ``bytes`` |
def host_context(func):
"Sets the context of the setting to the current host"
@wraps(func)
def decorator(*args, **kwargs):
hosts = get_hosts_settings()
with settings(**hosts[env.host]):
return func(*args, **kwargs)
return decorator | Sets the context of the setting to the current host |
def usages(self):
row, col = self.editor.cursor()
self.log.debug('usages: in')
self.call_options[self.call_id] = {
"word_under_cursor": self.editor.current_word(),
"false_resp_msg": "Not a valid symbol under the cursor"}
self.send_at_point("UsesOfSymbol", row, col) | Request usages of whatever at cursor. |
def get(cls, bucket, key):
return cls.query.filter_by(
bucket_id=as_bucket_id(bucket),
key=key,
).one_or_none() | Get tag object. |
def code(self, text, lang=None):
with self.paragraph(stylename='code'):
lines = text.splitlines()
for line in lines[:-1]:
self._code_line(line)
self.linebreak()
self._code_line(lines[-1]) | Add a code block. |
def set_setting(key, val, env=None):
return settings.set(key, val, env=env) | Changes the value of the specified key in the current environment, or in
another environment if specified. |
def apply_depth_first(nodes, func, depth=0, as_dict=False, parents=None):
if as_dict:
items = OrderedDict()
else:
items = []
if parents is None:
parents = []
node_count = len(nodes)
for i, node in enumerate(nodes):
first = (i == 0)
last = (i == (node_count - 1))
if isinstance(node, tuple):
node, nodes = node
else:
nodes = []
item = func(node, parents, nodes, first, last, depth)
item_parents = parents + [node]
if nodes:
children = apply_depth_first(nodes, func,
depth=depth + 1,
as_dict=as_dict,
parents=item_parents)
else:
children = None
if as_dict:
items[node] = Node(item, children)
elif nodes:
items.append((item, children))
else:
items.append(item)
return items | Given a structure such as the application menu layout described above, we
may want to apply an operation to each entry to create a transformed
version of the structure.
For example, let's convert all entries in the application menu layout from
above to upper-case:
>>> pprint(apply_depth_first(menu_actions, lambda node, parents, nodes: node.upper()))
[('FILE',
['LOAD', 'SAVE', ('QUIT', ['QUIT WITHOUT SAVING', 'SAVE AND QUIT'])]),
('EDIT', ['COPY', 'PASTE', ('FILL', ['DOWN', 'SERIES'])])]
Here we used the `apply_depth_first` function to apply a `lambda` function
to each entry to compute the upper-case value corresponding to each node/key.
`as_dict`
---------
To make traversing the structure easier, the output may be expressed as a
nested `OrderedDict` structure. For instance, let's apply the upper-case
transformation from above, but this time with `as_dict=True`:
>>> result = apply_depth_first(menu_actions, as_dict=True, \
... func=lambda node, parents, nodes: node.upper())
>>> type(result)
<class 'collections.OrderedDict'>
Here we see that the result is an ordered dictionary. Moreover, we can
look up the transformed `"File"` entry based on the original key/node
value. Since an entry may contain children, each entry is wrapped as a
`namedtuple` with `item` and `children` attributes.
>>> type(result['File'])
<class 'nested_structures.Node'>
>>> result['File'].item
'FILE'
>>> type(result['File'].children)
<class 'collections.OrderedDict'>
If an entry has children, the `children` attribute is an `OrderedDict`.
Otherwise, the `children` is set to `None`.
Given the information from above, we can look up the `"Load"` child entry
of the `"File"` entry.
>>> result['File'].children['Load']
Node(item='LOAD', children=None)
Similarly, we can look up the `"Save and quit"` child entry of the `"Quit"`
entry.
>>> result['File'].children['Quit'].children['Save and quit']
Node(item='SAVE AND QUIT', children=None)
Note that this function *(i.e., `apply_depth_first`)* could be used to,
e.g., create a menu GUI item for each entry in the structure. This would
decouple the description of the layout from the GUI framework used. |
def chunk_size(self, value):
if value is not None and value > 0 and value % self._CHUNK_SIZE_MULTIPLE != 0:
raise ValueError(
"Chunk size must be a multiple of %d." % (self._CHUNK_SIZE_MULTIPLE,)
)
self._chunk_size = value | Set the blob's default chunk size.
:type value: int
:param value: (Optional) The current blob's chunk size, if it is set.
:raises: :class:`ValueError` if ``value`` is not ``None`` and is not a
multiple of 256 KB. |
def main():
"Send some test strings"
actions =
SendKeys(actions, pause = .1)
keys = parse_keys(actions)
for k in keys:
print(k)
k.Run()
time.sleep(.1)
test_strings = [
"\n"
"(aa)some text\n",
"(a)some{ }text\n",
"(b)some{{}text\n",
"(c)some{+}text\n",
"(d)so%me{ab 4}text",
"(e)so%me{LEFT 4}text",
"(f)so%me{ENTER 4}text",
"(g)so%me{^aa 4}text",
"(h)some +(asdf)text",
"(i)some %^+(asdf)text",
"(j)some %^+a text+",
"(k)some %^+a tex+{&}",
"(l)some %^+a tex+(dsf)",
"",
]
for s in test_strings:
print(repr(s))
keys = parse_keys(s, with_newlines = True)
print(keys)
for k in keys:
k.Run()
time.sleep(.1)
print() | Send some test strings |
def prepend(self, key, val, time=0, min_compress_len=0):
return self._set("prepend", key, val, time, min_compress_len) | Prepend the value to the beginning of the existing key's value.
Only stores in memcache if key already exists.
Also see L{append}.
@return: Nonzero on success.
@rtype: int |
def addsshkey(self, title, key):
data = {'title': title, 'key': key}
request = requests.post(
self.keys_url, headers=self.headers, data=data,
verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 201:
return True
else:
return False | Add a new ssh key for the current user
:param title: title of the new key
:param key: the key itself
:return: true if added, false if it didn't add it (it could be because the name or key already exists) |
def deploy(self, id_networkv4):
data = dict()
uri = 'api/networkv4/%s/equipments/' % id_networkv4
return super(ApiNetworkIPv4, self).post(uri, data=data) | Deploy network in equipments and set column 'active = 1' in tables redeipv4
:param id_networkv4: ID for NetworkIPv4
:return: Equipments configuration output |
def read_local_manifest(output_path):
local_manifest_path = get_local_manifest_path(output_path)
try:
with open(local_manifest_path, 'r') as f:
manifest = dict(get_files_from_textfile(f))
logging.debug('Retrieving %s elements from manifest', len(manifest))
return manifest
except IOError:
logging.debug('No local manifest at %s', local_manifest_path)
return {} | Return the contents of the local manifest, as a dictionary. |
def delete_shell(self, pid):
try:
os.kill(pid, signal.SIGHUP)
except OSError:
pass
num_tries = 30
while num_tries > 0:
try:
if os.waitpid(pid, os.WNOHANG)[0] != 0:
break
except OSError:
break
sleep(0.1)
num_tries -= 1
if num_tries == 0:
try:
os.kill(pid, signal.SIGKILL)
os.waitpid(pid, 0)
except OSError:
pass | This function will kill the shell on a tab, trying to send
a sigterm and if it doesn't work, a sigkill. Between these two
signals, we have a timeout of 3 seconds, so is recommended to
call this in another thread. This doesn't change any thing in
UI, so you can use python's start_new_thread. |
def avail_images(conn=None, call=None):
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
if not conn:
conn = get_conn()
ret = {}
for item in conn.list_os_images():
ret[item.name] = object_to_dict(item)
for item in conn.list_vm_images():
ret[item.name] = object_to_dict(item)
return ret | List available images for Azure |
def register_functions(lib, ignore_errors):
def register(item):
return register_function(lib, item, ignore_errors)
for f in functionList:
register(f) | Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library. |
def collection_choices():
from invenio_collections.models import Collection
return [(0, _('-None-'))] + [
(c.id, c.name) for c in Collection.query.all()
] | Return collection choices. |
def iterable_source(iterable, target):
it = iter(iterable)
for item in it:
try:
target.send(item)
except StopIteration:
return prepend(item, it)
return empty_iter() | Convert an iterable into a stream of events.
Args:
iterable: A series of items which will be sent to the target one by one.
target: The target coroutine or sink.
Returns:
An iterator over any remaining items. |
def nodes_ali(c_obj):
ali_nodes = []
try:
ali_nodes = c_obj.list_nodes()
except BaseHTTPError as e:
abort_err("\r HTTP Error with AliCloud: {}".format(e))
ali_nodes = adj_nodes_ali(ali_nodes)
return ali_nodes | Get node objects from AliCloud. |
def get_client(self, client_id):
self.assert_has_permission('clients.read')
uri = self.uri + '/oauth/clients/' + client_id
headers = self.get_authorization_headers()
response = requests.get(uri, headers=headers)
if response.status_code == 200:
return response.json()
else:
return | Returns details about a specific client by the client_id. |
def destroy(self):
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop() | Tear down the syndic minion |
def C(w, Xs):
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
return np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A)) | Calculate the cylinder center given the cylinder direction and
a list of data points. |
def define_mask_borders(image2d, sought_value, nadditional=0):
naxis2, naxis1 = image2d.shape
mask2d = np.zeros((naxis2, naxis1), dtype=bool)
borders = []
for i in range(naxis2):
jborder_min, jborder_max = find_pix_borders(
image2d[i, :],
sought_value=sought_value
)
borders.append((jborder_min, jborder_max))
if (jborder_min, jborder_max) != (-1, naxis1):
if jborder_min != -1:
j1 = 0
j2 = jborder_min + nadditional + 1
mask2d[i, j1:j2] = True
if jborder_max != naxis1:
j1 = jborder_max - nadditional
j2 = naxis1
mask2d[i, j1:j2] = True
return mask2d, borders | Generate mask avoiding undesired values at the borders.
Set to True image borders with values equal to 'sought_value'
Parameters
----------
image2d : numpy array
Initial 2D image.
sought_value : int, float, bool
Pixel value that indicates missing data in the spectrum.
nadditional : int
Number of additional pixels to be masked at each border.
Returns
-------
mask2d : numpy array
2D mask.
borders : list of tuples
List of tuples (jmin, jmax) with the border limits (in array
coordinates) found by find_pix_borders. |
def hz2cents(freq_hz, base_frequency=10.0):
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent | Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency |
def decompose(self):
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next | Recursively destroys the contents of this tree. |
def requires(*params):
def requires(f, self, *args, **kwargs):
missing = filter(lambda x: kwargs.get(x) is None, params)
if missing:
msgs = ", ".join([PARAMETERS[x]['msg'] for x in missing])
raise ValueError("Missing the following parameters: %s" % msgs)
return f(self, *args, **kwargs)
return decorator(requires) | Raise ValueError if any ``params`` are omitted from the decorated kwargs.
None values are considered omissions.
Example usage on an AWS() method:
@requires('zone', 'security_groups')
def my_aws_method(self, custom_args, **kwargs):
# We'll only get here if 'kwargs' contained non-None values for
# both 'zone' and 'security_groups'. |
def set_scope(self, value):
if self.default_command:
self.default_command += ' ' + value
else:
self.default_command += value
return value | narrows the scopes the commands |
def _parse(self, text):
text = str(text).strip()
if not text.startswith(DELIMITER):
return {}, text
try:
_, fm, content = BOUNDARY.split(text, 2)
except ValueError:
return {}, text
metadata = yaml.load(fm, Loader=self.loader_class)
metadata = metadata if (isinstance(metadata, dict)) else {}
return metadata, content | Parse text with frontmatter, return metadata and content.
If frontmatter is not found, returns an empty metadata dictionary and original text content. |
def from_translation_key(
cls,
translation_key,
translations,
overlapping_reads,
ref_reads,
alt_reads,
alt_reads_supporting_protein_sequence,
transcripts_overlapping_variant,
transcripts_supporting_protein_sequence,
gene):
return cls(
amino_acids=translation_key.amino_acids,
variant_aa_interval_start=translation_key.variant_aa_interval_start,
variant_aa_interval_end=translation_key.variant_aa_interval_end,
ends_with_stop_codon=translation_key.ends_with_stop_codon,
frameshift=translation_key.frameshift,
translations=translations,
overlapping_reads=overlapping_reads,
ref_reads=ref_reads,
alt_reads=alt_reads,
alt_reads_supporting_protein_sequence=(
alt_reads_supporting_protein_sequence),
transcripts_overlapping_variant=transcripts_overlapping_variant,
transcripts_supporting_protein_sequence=(
transcripts_supporting_protein_sequence),
gene=gene) | Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires. |
def create_entity_type(self,
parent,
entity_type,
language_code=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
if 'create_entity_type' not in self._inner_api_calls:
self._inner_api_calls[
'create_entity_type'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_entity_type,
default_retry=self._method_configs[
'CreateEntityType'].retry,
default_timeout=self._method_configs['CreateEntityType']
.timeout,
client_info=self._client_info,
)
request = entity_type_pb2.CreateEntityTypeRequest(
parent=parent,
entity_type=entity_type,
language_code=language_code,
)
return self._inner_api_calls['create_entity_type'](
request, retry=retry, timeout=timeout, metadata=metadata) | Creates an entity type in the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.EntityTypesClient()
>>>
>>> parent = client.project_agent_path('[PROJECT]')
>>>
>>> # TODO: Initialize ``entity_type``:
>>> entity_type = {}
>>>
>>> response = client.create_entity_type(parent, entity_type)
Args:
parent (str): Required. The agent to create a entity type for.
Format: ``projects/<Project ID>/agent``.
entity_type (Union[dict, ~google.cloud.dialogflow_v2.types.EntityType]): Required. The entity type to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2.types.EntityType`
language_code (str): Optional. The language of entity synonyms defined in ``entity_type``. If not
specified, the agent's default language is used.
[More than a dozen
languages](https://dialogflow.com/docs/reference/language) are supported.
Note: languages must be enabled in the agent, before they can be used.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.EntityType` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def parse_filename(filename):
_patterns = patterns.get_expressions()
result = {}
for cmatcher in _patterns:
match = cmatcher.match(filename)
if match:
namedgroups = match.groupdict().keys()
result['pattern'] = cmatcher.pattern
result['series_name'] = match.group('seriesname')
result['season_number'] = _get_season_no(match, namedgroups)
result['episode_numbers'] = _get_episodes(match, namedgroups)
break
else:
result = None
return result | Parse media filename for metadata.
:param str filename: the name of media file
:returns: dict of metadata attributes found in filename
or None if no matching expression.
:rtype: dict |
def puts(s='', newline=True, stream=STDOUT):
max_width_ctx = _get_max_width_context()
if max_width_ctx:
cols, separator = max_width_ctx[-1]
s = max_width(s, cols, separator)
if newline:
s = tsplit(s, NEWLINES)
s = map(str, s)
indent = ''.join(INDENT_STRINGS)
s = (str('\n' + indent)).join(s)
_str = ''.join((
''.join(INDENT_STRINGS),
str(s),
'\n' if newline else ''
))
stream(_str) | Prints given string to stdout. |
def Find(self, node_type, item_type):
if node_type == OtherNodes.DirectionNode:
child = self.GetChild(len(self.children) - 1)
while child is not None and not isinstance(
child.GetItem(),
item_type):
if child.GetItem().__class__.__name__ == item_type.__name__:
return True
child = child.GetChild(0)
if node_type == OtherNodes.ExpressionNode:
child = self.GetChild(len(self.children) - 2)
while child is not None and not isinstance(
child.GetItem(),
item_type):
if child.GetItem().__class__.__name__ == item_type.__name__:
return True
child = child.GetChild(0) | method for finding specific types of notation from nodes.
will currently return the first one it encounters because this method's only really intended
for some types of notation for which the exact value doesn't really
matter.
:param node_type: the type of node to look under
:param item_type: the type of item (notation) being searched for
:return: first item_type object encountered |
def get_name(self):
if hasattr(self, 'service_description'):
return self.service_description
if hasattr(self, 'name'):
return self.name
return 'SERVICE-DESCRIPTION-MISSING' | Accessor to service_description attribute or name if first not defined
:return: service name
:rtype: str |
def read_time_range(cls, *args, **kwargs):
criteria = list(args)
start = kwargs.get('start_timestamp')
end = kwargs.get('end_timestamp')
if start is not None:
criteria.append(cls.time_order <= -start)
if end is not None:
criteria.append(cls.time_order >= -end)
return cls.read(*criteria) | Get all timezones set within a given time. Uses time_dsc_index
SELECT *
FROM <table>
WHERE time_order <= -<start_timestamp>
AND time_order >= -<end_timestamp>
:param args: SQLAlchemy filter criteria, (e.g., uid == uid, type == 1)
:param kwargs: start_timestamp and end_timestamp are the only kwargs, they specify the range (inclusive)
:return: model generator |
def add_channel(channel: EFBChannel):
global master, slaves
if isinstance(channel, EFBChannel):
if channel.channel_type == ChannelType.Slave:
slaves[channel.channel_id] = channel
else:
master = channel
else:
raise TypeError("Channel instance is expected") | Register the channel with the coordinator.
Args:
channel (EFBChannel): Channel to register |
def visit_Stmt(self, node):
save_defs, self.defs = self.defs or list(), list()
self.generic_visit(node)
new_defs, self.defs = self.defs, save_defs
return new_defs + [node] | Add new variable definition before the Statement. |
def was_run_code(self, get_all=True):
if self.stored is None:
return ""
else:
if get_all:
self.stored = ["\n".join(self.stored)]
return self.stored[-1] | Get all the code that was run. |
def analyze(self, text):
logger.debug('Sending %r to LUIS app %s', text, self._url)
r = requests.get(self._url, {'q': text})
logger.debug('Request sent to LUIS URL: %s', r.url)
logger.debug(
'LUIS returned status %s with text: %s', r.status_code, r.text)
r.raise_for_status()
json_response = r.json()
result = LuisResult._from_json(json_response)
logger.debug('Returning %s', result)
return result | Sends text to LUIS for analysis.
Returns a LuisResult. |
def find_by_id(self, tag, params={}, **options):
path = "/tags/%s" % (tag)
return self.client.get(path, params, **options) | Returns the complete tag record for a single tag.
Parameters
----------
tag : {Id} The tag to get.
[params] : {Object} Parameters for the request |
def create_bucket(self, bucket_name, headers=None,
location=Location.DEFAULT, policy=None):
check_lowercase_bucketname(bucket_name)
if policy:
if headers:
headers[self.provider.acl_header] = policy
else:
headers = {self.provider.acl_header : policy}
if location == Location.DEFAULT:
data = ''
else:
data = '<CreateBucketConstraint><LocationConstraint>' + \
location + '</LocationConstraint></CreateBucketConstraint>'
response = self.make_request('PUT', bucket_name, headers=headers,
data=data)
body = response.read()
if response.status == 409:
raise self.provider.storage_create_error(
response.status, response.reason, body)
if response.status == 200:
return self.bucket_class(self, bucket_name)
else:
raise self.provider.storage_response_error(
response.status, response.reason, body) | Creates a new located bucket. By default it's in the USA. You can pass
Location.EU to create an European bucket.
:type bucket_name: string
:param bucket_name: The name of the new bucket
:type headers: dict
:param headers: Additional headers to pass along with the request to AWS.
:type location: :class:`boto.s3.connection.Location`
:param location: The location of the new bucket
:type policy: :class:`boto.s3.acl.CannedACLStrings`
:param policy: A canned ACL policy that will be applied to the
new key in S3. |
def get_snapshot(name, config_path=_DEFAULT_CONFIG_PATH, with_packages=False):
_validate_config(config_path)
sources = list()
cmd = ['snapshot', 'show', '-config={}'.format(config_path),
'-with-packages={}'.format(str(with_packages).lower()),
name]
cmd_ret = _cmd_run(cmd)
ret = _parse_show_output(cmd_ret=cmd_ret)
if ret:
log.debug('Found shapshot: %s', name)
else:
log.debug('Unable to find snapshot: %s', name)
return ret | Get detailed information about a snapshot.
:param str name: The name of the snapshot given during snapshot creation.
:param str config_path: The path to the configuration file for the aptly instance.
:param bool with_packages: Return a list of packages in the snapshot.
:return: A dictionary containing information about the snapshot.
:rtype: dict
CLI Example:
.. code-block:: bash
salt '*' aptly.get_snapshot name="test-repo" |
def line(
xo: int, yo: int, xd: int, yd: int, py_callback: Callable[[int, int], bool]
) -> bool:
for x, y in line_iter(xo, yo, xd, yd):
if not py_callback(x, y):
break
else:
return True
return False | Iterate over a line using a callback function.
Your callback function will take x and y parameters and return True to
continue iteration or False to stop iteration and return.
This function includes both the start and end points.
Args:
xo (int): X starting point.
yo (int): Y starting point.
xd (int): X destination point.
yd (int): Y destination point.
py_callback (Callable[[int, int], bool]):
A callback which takes x and y parameters and returns bool.
Returns:
bool: False if the callback cancels the line interation by
returning False or None, otherwise True.
.. deprecated:: 2.0
Use `line_iter` instead. |
def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin) | Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set. |
def GetRootKey(self):
root_registry_key = virtual.VirtualWinRegistryKey('')
for mapped_key in self._MAPPED_KEYS:
key_path_segments = key_paths.SplitKeyPath(mapped_key)
if not key_path_segments:
continue
registry_key = root_registry_key
for name in key_path_segments[:-1]:
sub_registry_key = registry_key.GetSubkeyByName(name)
if not sub_registry_key:
sub_registry_key = virtual.VirtualWinRegistryKey(name)
registry_key.AddSubkey(sub_registry_key)
registry_key = sub_registry_key
sub_registry_key = registry_key.GetSubkeyByName(key_path_segments[-1])
if (not sub_registry_key and
isinstance(registry_key, virtual.VirtualWinRegistryKey)):
sub_registry_key = virtual.VirtualWinRegistryKey(
key_path_segments[-1], registry=self)
registry_key.AddSubkey(sub_registry_key)
return root_registry_key | Retrieves the Windows Registry root key.
Returns:
WinRegistryKey: Windows Registry root key.
Raises:
RuntimeError: if there are multiple matching mappings and
the correct mapping cannot be resolved. |
def lookupEncoding(encoding):
if isinstance(encoding, binary_type):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding is not None:
try:
return webencodings.lookup(encoding)
except AttributeError:
return None
else:
return None | Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding. |
def GetNewEventId(self, event_time=None):
if event_time is None:
event_time = int(time.time() * 1e6)
return "%s:%s:%s" % (event_time, socket.gethostname(), os.getpid()) | Return a unique Event ID string. |
def create_server(self, server_name, *args, **kwargs):
server = ServerConnection(name=server_name, reactor=self)
if args or kwargs:
server.set_connect_info(*args, **kwargs)
for verb, infos in self._event_handlers.items():
for info in infos:
server.register_event(info['direction'], verb, info['handler'],
priority=info['priority'])
self.servers[server_name] = server
return server | Create an IRC server connection slot.
The server will actually be connected to when
:meth:`girc.client.ServerConnection.connect` is called later.
Args:
server_name (str): Name of the server, to be used for functions and accessing the
server later through the reactor.
Returns:
server (girc.client.ServerConnection): A not-yet-connected server. |
def round(arg, digits=None):
op = ops.Round(arg, digits)
return op.to_expr() | Round values either to integer or indicated number of decimal places.
Returns
-------
rounded : type depending on digits argument
digits None or 0
decimal types: decimal
other numeric types: bigint
digits nonzero
decimal types: decimal
other numeric types: double |
def publish_command_start(self, command, database_name,
request_id, connection_id, op_id=None):
if op_id is None:
op_id = request_id
event = CommandStartedEvent(
command, database_name, request_id, connection_id, op_id)
for subscriber in self.__command_listeners:
try:
subscriber.started(event)
except Exception:
_handle_exception() | Publish a CommandStartedEvent to all command listeners.
:Parameters:
- `command`: The command document.
- `database_name`: The name of the database this command was run
against.
- `request_id`: The request id for this operation.
- `connection_id`: The address (host, port) of the server this
command was sent to.
- `op_id`: The (optional) operation id for this operation. |
def _has_x(self, kwargs):
return (('x' in kwargs) or (self._element_x in kwargs) or
(self._type == 3 and self._element_1mx in kwargs)) | Returns True if x is explicitly defined in kwargs |
def add_section(self, section):
section, _, _ = self._validate_value_types(section=section)
super(ConfigParser, self).add_section(section) | Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string. |
def get_by(self, name):
item = self.controlled_list.get_by(name)
if item:
return TodoElementUX(parent=self, controlled_element=item) | find a todo list element by name |
def save_json(obj, filename, **kwargs):
with open(filename, 'w', encoding='utf-8') as f:
json.dump(obj, f, **kwargs) | Save an object as a JSON file.
Args:
obj: The object to save. Must be JSON-serializable.
filename: Path to the output file.
**kwargs: Additional arguments to `json.dump`. |
def as_text(str_or_bytes, encoding='utf-8', errors='strict'):
if isinstance(str_or_bytes, text):
return str_or_bytes
return str_or_bytes.decode(encoding, errors) | Return input string as a text string.
Should work for input string that's unicode or bytes,
given proper encoding.
>>> print(as_text(b'foo'))
foo
>>> b'foo'.decode('utf-8') == u'foo'
True |
def create_basic_op_node(op_name, node, kwargs):
name, input_nodes, _ = get_inputs(node, kwargs)
node = onnx.helper.make_node(
op_name,
input_nodes,
[name],
name=name
)
return [node] | Helper function to create a basic operator
node that doesn't contain op specific attrs |
def _build_credentials(self, nexus_switches):
credentials = {}
for switch_ip, attrs in nexus_switches.items():
credentials[switch_ip] = (
attrs[const.USERNAME], attrs[const.PASSWORD],
attrs[const.HTTPS_VERIFY], attrs[const.HTTPS_CERT],
None)
if not attrs[const.HTTPS_VERIFY]:
LOG.warning("HTTPS Certificate verification is "
"disabled. Your connection to Nexus "
"Switch %(ip)s is insecure.",
{'ip': switch_ip})
return credentials | Build credential table for Rest API Client.
:param nexus_switches: switch config
:returns credentials: switch credentials list |
def global_include(self, pattern):
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found) | Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees. |
def _aux_type(self, i):
aux_type = ctypes.c_int()
check_call(_LIB.MXNDArrayGetAuxType(self.handle, i, ctypes.byref(aux_type)))
return _DTYPE_MX_TO_NP[aux_type.value] | Data-type of the array's ith aux data.
Returns
-------
numpy.dtype
This BaseSparseNDArray's aux data type. |
def angle2xyz(azi, zen):
azi = xu.deg2rad(azi)
zen = xu.deg2rad(zen)
x = xu.sin(zen) * xu.sin(azi)
y = xu.sin(zen) * xu.cos(azi)
z = xu.cos(zen)
return x, y, z | Convert azimuth and zenith to cartesian. |
def calc_mass_from_fit_and_conv_factor(A, Damping, ConvFactor):
T0 = 300
mFromA = 2*Boltzmann*T0/(pi*A) * ConvFactor**2 * Damping
return mFromA | Calculates mass from the A parameter from fitting, the damping from
fitting in angular units and the Conversion factor calculated from
comparing the ratio of the z signal and first harmonic of z.
Parameters
----------
A : float
A factor calculated from fitting
Damping : float
damping in radians/second calcualted from fitting
ConvFactor : float
conversion factor between volts and nms
Returns
-------
mass : float
mass in kgs |
def progress_bar(name, maxval, prefix='Converting'):
widgets = ['{} {}: '.format(prefix, name), Percentage(), ' ',
Bar(marker='=', left='[', right=']'), ' ', ETA()]
bar = ProgressBar(widgets=widgets, max_value=maxval, fd=sys.stdout).start()
try:
yield bar
finally:
bar.update(maxval)
bar.finish() | Manages a progress bar for a conversion.
Parameters
----------
name : str
Name of the file being converted.
maxval : int
Total number of steps for the conversion. |
def set_flowcontrol_receive(self, name, value=None, default=False,
disable=False):
return self.set_flowcontrol(name, 'receive', value, default, disable) | Configures the interface flowcontrol receive value
Args:
name (string): The interface identifier. It must be a full
interface name (ie Ethernet, not Et)
value (boolean): True if the interface should enable receiving
flow control packets, otherwise False
default (boolean): Specifies to default the interface flow
control receive value
disable (boolean): Specifies to disable the interface flow
control receive value
Returns:
True if the operation succeeds otherwise False is returned |
def connect(self, agent='Python'):
headers = {'User-Agent': agent}
request = urlopen(Request(self.url, headers=headers))
try:
yield request
finally:
request.close() | Context manager for HTTP Connection state and ensures proper handling
of network sockets, sends a GET request.
Exception is raised at the yield statement.
:yield request: FileIO<Socket> |
def _JsonDecodeDict(self, data):
rv = {}
for key, value in data.iteritems():
if isinstance(key, unicode):
key = self._TryStr(key)
if isinstance(value, unicode):
value = self._TryStr(value)
elif isinstance(value, list):
value = self._JsonDecodeList(value)
rv[key] = value
if '__pyringe_type_name__' in data:
rv = ProxyObject(rv)
return rv | Json object decode hook that automatically converts unicode objects. |
def material_advantage(self, input_color, val_scheme):
if self.get_king(input_color).in_check(self) and self.no_moves(input_color):
return -100
if self.get_king(-input_color).in_check(self) and self.no_moves(-input_color):
return 100
return sum([val_scheme.val(piece, input_color) for piece in self]) | Finds the advantage a particular side possesses given a value scheme.
:type: input_color: Color
:type: val_scheme: PieceValues
:rtype: double |
def _convert_to_array(array_like, dtype):
if isinstance(array_like, bytes):
return np.frombuffer(array_like, dtype=dtype)
return np.asarray(array_like, dtype=dtype) | Convert Matrix attributes which are array-like or buffer to array. |
def on_add_vrf_conf(self, evt):
vrf_conf = evt.value
route_family = vrf_conf.route_family
assert route_family in vrfs.SUPPORTED_VRF_RF
vrf_table = self._table_manager.create_and_link_vrf_table(vrf_conf)
vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_LOG_ENABLED_EVT,
self.on_stats_config_change)
vrf_conf.add_listener(ConfWithStats.UPDATE_STATS_TIME_EVT,
self.on_stats_config_change)
vrf_conf.add_listener(VrfConf.VRF_CHG_EVT, self.on_chg_vrf_conf)
self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
self._rt_manager.update_local_rt_nlris()
self._signal_bus.vrf_added(vrf_conf) | Event handler for new VrfConf.
Creates a VrfTable to store routing information related to new Vrf.
Also arranges for related paths to be imported to this VrfTable. |
async def info(self) -> Optional[JobDef]:
info = await self.result_info()
if not info:
v = await self._redis.get(job_key_prefix + self.job_id, encoding=None)
if v:
info = unpickle_job(v)
if info:
info.score = await self._redis.zscore(queue_name, self.job_id)
return info | All information on a job, including its result if it's available, does not wait for the result. |
async def add_user(self, username, password=None, display_name=None):
if not display_name:
display_name = username
user_facade = client.UserManagerFacade.from_connection(
self.connection())
users = [client.AddUser(display_name=display_name,
username=username,
password=password)]
results = await user_facade.AddUser(users)
secret_key = results.results[0].secret_key
return await self.get_user(username, secret_key=secret_key) | Add a user to this controller.
:param str username: Username
:param str password: Password
:param str display_name: Display name
:returns: A :class:`~juju.user.User` instance |
def pattern_to_regex(pattern: str) -> str:
if pattern and pattern[-1] == "*":
pattern = pattern[:-1]
end = ""
else:
end = "$"
for metac in META_CHARS:
pattern = pattern.replace(metac, "\\" + metac)
return "^" + VARS_PT.sub(regex_replacer, pattern) + end | convert url patten to regex |
def draw_status(self, writer, idx):
if self.term.is_a_tty:
writer(self.term.hide_cursor())
style = self.screen.style
writer(self.term.move(self.term.height - 1))
if idx == self.last_page:
last_end = u'(END)'
else:
last_end = u'/{0}'.format(self.last_page)
txt = (u'Page {idx}{last_end} - '
u'{q} to quit, [keys: {keyset}]'
.format(idx=style.attr_minor(u'{0}'.format(idx)),
last_end=style.attr_major(last_end),
keyset=style.attr_major('kjfb12-='),
q=style.attr_minor(u'q')))
writer(self.term.center(txt).rstrip()) | Conditionally draw status bar when output terminal is a tty.
:param writer: callable writes to output stream, receiving unicode.
:param idx: current page position index.
:type idx: int |
def _uniform_phi(M):
return np.random.uniform(-np.pi, np.pi, M) | Generate M random numbers in [-pi, pi). |
def pagination_links(paginator_page, show_pages, url_params=None,
first_page_label=None, last_page_label=None,
page_url=''):
return {
'items': paginator_page,
'show_pages': show_pages,
'url_params': url_params,
'first_page_label': first_page_label,
'last_page_label': last_page_label,
'page_url': page_url,
} | Django template tag to display pagination links for a paginated
list of items.
Expects the following variables:
* the current :class:`~django.core.paginator.Page` of a
:class:`~django.core.paginator.Paginator` object
* a dictionary of the pages to be displayed, in the format
generated by :meth:`eulcommon.searchutil.pages_to_show`
* optional url params to include in pagination link (e.g., search
terms when paginating search results)
* optional first page label (only used when first page is not in
list of pages to be shown)
* optional last page label (only used when last page is not in
list of pages to be shown)
* optional url to use for page links (only needed when the url is
different from the current one)
Example use::
{% load search_utils %}
{% pagination_links paged_items show_pages %} |
def Matches(self, registry_key, search_depth):
if self._key_path_segments is None:
key_path_match = None
else:
key_path_match = self._CheckKeyPath(registry_key, search_depth)
if not key_path_match:
return False, key_path_match
if search_depth != self._number_of_key_path_segments:
return False, key_path_match
return True, key_path_match | Determines if the Windows Registry key matches the find specification.
Args:
registry_key (WinRegistryKey): Windows Registry key.
search_depth (int): number of key path segments to compare.
Returns:
tuple: contains:
bool: True if the Windows Registry key matches the find specification,
False otherwise.
bool: True if the key path matches, False if not or None if no key path
specified. |
def check_folders(name):
if os.getcwd().endswith('analyses'):
correct = input('You are in an analyses folder. This will create '
'another analyses folder inside this one. Do '
'you want to continue? (y/N)')
if correct != 'y':
return False
if not os.path.exists(os.path.join(os.getcwd(), 'analyses')):
correct = input('This is the first analysis here. Do '
'you want to continue? (y/N)')
if correct != 'y':
return False
if os.path.exists(os.path.join(os.getcwd(), 'analyses', name)):
correct = input('An analysis with this name exists already. Do '
'you want to continue? (y/N)')
if correct != 'y':
return False
return True | Only checks and asks questions. Nothing is written to disk. |
def render_chart_to_file(self, template_name: str, chart: Any, path: str):
tpl = self.env.get_template(template_name)
html = tpl.render(chart=self.generate_js_link(chart))
write_utf8_html_file(path, self._reg_replace(html)) | Render a chart or page to local html files.
:param chart: A Chart or Page object
:param path: The destination file which the html code write to
:param template_name: The name of template file. |
def _get_sample_generator(samples):
if isinstance(samples, Mapping):
def samples_generator():
for ind in range(samples[list(samples.keys())[0]].shape[0]):
yield np.array([samples[s][ind, :] for s in sorted(samples)])
elif isinstance(samples, np.ndarray):
def samples_generator():
for ind in range(samples.shape[0]):
yield samples[ind]
else:
samples_generator = samples
return samples_generator | Get a sample generator from the given polymorphic input.
Args:
samples (ndarray, dict or generator): either an matrix of shape (d, p, n) with d problems, p parameters and
n samples, or a dictionary with for every parameter a matrix with shape (d, n) or, finally,
a generator function that yields sample arrays of shape (p, n).
Returns:
generator: a generator that yields a matrix of size (p, n) for every problem in the input. |
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
if context is not None:
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs | Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors. |
def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):
valid_count = count(value, axis=axis)
filled_value = fillna(value, fill_value)
data = getattr(np, func)(filled_value, axis=axis, **kwargs)
if not hasattr(data, 'dtype'):
data = dtypes.fill_value(value.dtype) if valid_count == 0 else data
return np.array(data, dtype=value.dtype)
return where_method(data, valid_count != 0) | In house nanmin and nanmax for object array |
def encode_eternal_jwt_token(self, user, **custom_claims):
return self.encode_jwt_token(
user,
override_access_lifespan=VITAM_AETERNUM,
override_refresh_lifespan=VITAM_AETERNUM,
**custom_claims
) | This utility function encodes a jwt token that never expires
.. note:: This should be used sparingly since the token could become
a security concern if it is ever lost. If you use this
method, you should be sure that your application also
implements a blacklist so that a given token can be blocked
should it be lost or become a security concern |
def combine_relevance_tables(relevance_tables):
def _combine(a, b):
a.relevant |= b.relevant
a.p_value = a.p_value.combine(b.p_value, min, 1)
return a
return reduce(_combine, relevance_tables) | Create a combined relevance table out of a list of relevance tables,
aggregating the p-values and the relevances.
:param relevance_tables: A list of relevance tables
:type relevance_tables: List[pd.DataFrame]
:return: The combined relevance table
:rtype: pandas.DataFrame |
def build_plans(self):
if not self.__build_plans:
self.__build_plans = BuildPlans(self.__connection)
return self.__build_plans | Gets the Build Plans API client.
Returns:
BuildPlans: |
def _gst_available():
try:
import gi
except ImportError:
return False
try:
gi.require_version('Gst', '1.0')
except (ValueError, AttributeError):
return False
try:
from gi.repository import Gst
except ImportError:
return False
return True | Determine whether Gstreamer and the Python GObject bindings are
installed. |
def query_param(self, key, value=None, default=None, as_list=False):
parse_result = self.query_params()
if value is not None:
if isinstance(value, (list, tuple)):
value = list(map(to_unicode, value))
else:
value = to_unicode(value)
parse_result[to_unicode(key)] = value
return URL._mutate(
self, query=unicode_urlencode(parse_result, doseq=True))
try:
result = parse_result[key]
except KeyError:
return default
if as_list:
return result
return result[0] if len(result) == 1 else result | Return or set a query parameter for the given key
The value can be a list.
:param string key: key to look for
:param string default: value to return if ``key`` isn't found
:param boolean as_list: whether to return the values as a list
:param string value: the new query parameter to use |
def istring(self, in_string=''):
new_string = IString(in_string)
new_string.set_std(self.features.get('casemapping'))
if not self._casemap_set:
self._imaps.append(new_string)
return new_string | Return a string that uses this server's IRC casemapping.
This string's equality with other strings, ``lower()``, and ``upper()`` takes this
server's casemapping into account. This should be used for things such as nicks and
channel names, where comparing strings using the correct casemapping can be very
important. |
def copy_file(self, file_id, dest_folder_id):
return self.__request("POST", "/files/" + unicode(file_id) + "/copy",
data={ "parent": {"id": unicode(dest_folder_id)} }) | Copy file to new destination
Args:
file_id (int): ID of the folder.
dest_folder_id (int): ID of parent folder you are copying to.
Returns:
dict. Response from Box.
Raises:
BoxError: An error response is returned from Box (status_code >= 400).
BoxError: 409 - Item with the same name already exists.
In this case you will need download the file and upload a new version to your destination.
(Box currently doesn't have a method to copy a new verison.)
BoxHttpResponseError: Response from Box is malformed.
requests.exceptions.*: Any connection related problem. |
def getBWTRange(self, start, end):
startBlockIndex = start >> self.bitPower
endBlockIndex = int(math.floor(float(end)/self.binSize))
trueStart = startBlockIndex*self.binSize
return self.decompressBlocks(startBlockIndex, endBlockIndex)[start-trueStart:end-trueStart] | This function masks the complexity of retrieving a chunk of the BWT from the compressed format
@param start - the beginning of the range to retrieve
@param end - the end of the range in normal python notation (bwt[end] is not part of the return)
@return - a range of integers representing the characters in the bwt from start to end |
def _centroids(self, verts):
r
value = sp.zeros([len(verts), 3])
for i, i_verts in enumerate(verts):
value[i] = np.mean(i_verts, axis=0)
return value | r'''
Function to calculate the centroid as the mean of a set of vertices.
Used for pore and throat. |
def clean(self):
super().clean()
if (
(self.user is None and not self.anonymous_user) or
(self.user and self.anonymous_user)
):
raise ValidationError(
_('A permission should target either a user or an anonymous user'),
) | Validates the current instance. |
def debug_variable_node_render(self, context):
try:
output = self.filter_expression.resolve(context)
output = template_localtime(output, use_tz=context.use_tz)
output = localize(output, use_l10n=context.use_l10n)
output = force_text(output)
except Exception as e:
if not hasattr(e, 'django_template_source'):
e.django_template_source = self.source
raise
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output | Like DebugVariableNode.render, but doesn't catch UnicodeDecodeError. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.