text stringlengths 89 104k | code_tokens list | avg_line_len float64 7.91 980 | score float64 0 630 |
|---|---|---|---|
def add(self, data):
"""
>>> ob = OutBuffer().add(OutBuffer.sizelimit * b"x")
>>> ob.add(b"y") # doctest: +ELLIPSIS
Traceback (most recent call last):
...
OmapiSizeLimitError: ...
@type data: bytes
@returns: self
@raises OmapiSizeLimitError:
"""
if len(self) + len(data) > self.sizelimit:
raise OmapiSizeLimitError()
self.buff.write(data)
return self | [
"def",
"add",
"(",
"self",
",",
"data",
")",
":",
"if",
"len",
"(",
"self",
")",
"+",
"len",
"(",
"data",
")",
">",
"self",
".",
"sizelimit",
":",
"raise",
"OmapiSizeLimitError",
"(",
")",
"self",
".",
"buff",
".",
"write",
"(",
"data",
")",
"ret... | 22.8125 | 15.0625 |
def validNormalizeAttributeValue(self, doc, name, value):
"""Does the validation related extra step of the normalization
of attribute values: If the declared value is not CDATA,
then the XML processor must further process the normalized
attribute value by discarding any leading and trailing
space (#x20) characters, and by replacing sequences of
space (#x20) characters by single space (#x20) character. """
if doc is None: doc__o = None
else: doc__o = doc._o
ret = libxml2mod.xmlValidNormalizeAttributeValue(doc__o, self._o, name, value)
return ret | [
"def",
"validNormalizeAttributeValue",
"(",
"self",
",",
"doc",
",",
"name",
",",
"value",
")",
":",
"if",
"doc",
"is",
"None",
":",
"doc__o",
"=",
"None",
"else",
":",
"doc__o",
"=",
"doc",
".",
"_o",
"ret",
"=",
"libxml2mod",
".",
"xmlValidNormalizeAtt... | 57.454545 | 18.363636 |
def new(self, item_lists, processor:PreProcessor=None, **kwargs)->'ItemList':
"Create a new `ItemList` from `items`, keeping the same attributes."
processor = ifnone(processor, self.processor)
copy_d = {o:getattr(self,o) for o in self.copy_new}
kwargs = {**copy_d, **kwargs}
return self.__class__(item_lists, processor=processor, **kwargs) | [
"def",
"new",
"(",
"self",
",",
"item_lists",
",",
"processor",
":",
"PreProcessor",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"'ItemList'",
":",
"processor",
"=",
"ifnone",
"(",
"processor",
",",
"self",
".",
"processor",
")",
"copy_d",
"=",
"{... | 62.333333 | 23.333333 |
def get_impl_ver(env):
"""Return implementation version."""
impl_ver = env.config_var("py_version_nodot")
if not impl_ver or get_abbr_impl(env) == "pp":
impl_ver = "".join(map(str, get_impl_version_info(env)))
return impl_ver | [
"def",
"get_impl_ver",
"(",
"env",
")",
":",
"impl_ver",
"=",
"env",
".",
"config_var",
"(",
"\"py_version_nodot\"",
")",
"if",
"not",
"impl_ver",
"or",
"get_abbr_impl",
"(",
"env",
")",
"==",
"\"pp\"",
":",
"impl_ver",
"=",
"\"\"",
".",
"join",
"(",
"ma... | 34.857143 | 17.428571 |
def get_enabled_regions(exec_type, json_spec, from_command_line, executable_builder_exeception):
"""
Return a list of regions in which the global executable (app or global workflow)
will be enabled, based on the "regionalOption" in their JSON specification
and/or --region CLI argument used with "dx build".
:param exec_type: 'app' or 'globalworkflow'
:type json_spec: str.
:param json_spec: The contents of dxapp.json or dxworkflow.json
:type json_spec: dict or None.
:param from_command_line: The regional options specified on the command-line via --region.
:type from_command_line: list or None.
:param builder_exception: Exception that will be thrown.
:type builder_exception: AppBuilderException or WorkflowBuilderException.
"""
from_spec = json_spec.get('regionalOptions')
if from_spec is not None:
assert_consistent_reg_options(exec_type, json_spec, executable_builder_exeception)
assert_consistent_regions(from_spec, from_command_line, executable_builder_exeception)
enabled_regions = None
if from_spec is not None:
enabled_regions = from_spec.keys()
elif from_command_line is not None:
enabled_regions = from_command_line
return enabled_regions | [
"def",
"get_enabled_regions",
"(",
"exec_type",
",",
"json_spec",
",",
"from_command_line",
",",
"executable_builder_exeception",
")",
":",
"from_spec",
"=",
"json_spec",
".",
"get",
"(",
"'regionalOptions'",
")",
"if",
"from_spec",
"is",
"not",
"None",
":",
"asse... | 41.133333 | 24.133333 |
def distance(self, other):
"""Distance to another point on the sphere"""
return math.acos(self._pos3d.dot(other.vector)) | [
"def",
"distance",
"(",
"self",
",",
"other",
")",
":",
"return",
"math",
".",
"acos",
"(",
"self",
".",
"_pos3d",
".",
"dot",
"(",
"other",
".",
"vector",
")",
")"
] | 44.666667 | 9.666667 |
def _transform(self, *transforms):
"""
Copies the given Sequence and appends new transformation
:param transform: transform to apply or list of transforms to apply
:return: transformed sequence
"""
sequence = None
for transform in transforms:
if sequence:
sequence = Sequence(sequence, transform=transform)
else:
sequence = Sequence(self, transform=transform)
return sequence | [
"def",
"_transform",
"(",
"self",
",",
"*",
"transforms",
")",
":",
"sequence",
"=",
"None",
"for",
"transform",
"in",
"transforms",
":",
"if",
"sequence",
":",
"sequence",
"=",
"Sequence",
"(",
"sequence",
",",
"transform",
"=",
"transform",
")",
"else",
... | 37.153846 | 14.846154 |
def _retry_from_retry_config(retry_params, retry_codes):
"""Creates a Retry object given a gapic retry configuration.
Args:
retry_params (dict): The retry parameter values, for example::
{
"initial_retry_delay_millis": 1000,
"retry_delay_multiplier": 2.5,
"max_retry_delay_millis": 120000,
"initial_rpc_timeout_millis": 120000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 120000,
"total_timeout_millis": 600000
}
retry_codes (sequence[str]): The list of retryable gRPC error code
names.
Returns:
google.api_core.retry.Retry: The default retry object for the method.
"""
exception_classes = [
_exception_class_for_grpc_status_name(code) for code in retry_codes
]
return retry.Retry(
retry.if_exception_type(*exception_classes),
initial=(retry_params["initial_retry_delay_millis"] / _MILLIS_PER_SECOND),
maximum=(retry_params["max_retry_delay_millis"] / _MILLIS_PER_SECOND),
multiplier=retry_params["retry_delay_multiplier"],
deadline=retry_params["total_timeout_millis"] / _MILLIS_PER_SECOND,
) | [
"def",
"_retry_from_retry_config",
"(",
"retry_params",
",",
"retry_codes",
")",
":",
"exception_classes",
"=",
"[",
"_exception_class_for_grpc_status_name",
"(",
"code",
")",
"for",
"code",
"in",
"retry_codes",
"]",
"return",
"retry",
".",
"Retry",
"(",
"retry",
... | 38.46875 | 23.5625 |
def parse(self, response):
"""
Checks any given response on being an article and if positiv,
passes the response to the pipeline.
:param obj response: The scrapy response
"""
if not self.helper.parse_crawler.content_type(response):
return
for request in self.helper.parse_crawler \
.recursive_requests(response, self, self.ignore_regex,
self.ignore_file_extensions):
yield request
yield self.helper.parse_crawler.pass_to_pipeline_if_article(
response, self.allowed_domains[0], self.original_url) | [
"def",
"parse",
"(",
"self",
",",
"response",
")",
":",
"if",
"not",
"self",
".",
"helper",
".",
"parse_crawler",
".",
"content_type",
"(",
"response",
")",
":",
"return",
"for",
"request",
"in",
"self",
".",
"helper",
".",
"parse_crawler",
".",
"recursi... | 37.294118 | 20.823529 |
def _create_header(self):
"""
Function to create the GroupHeader (GrpHdr) in the
CstmrCdtTrfInitn Node
"""
# Retrieve the node to which we will append the group header.
CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn')
# Create the header nodes.
GrpHdr_node = ET.Element("GrpHdr")
MsgId_node = ET.Element("MsgId")
CreDtTm_node = ET.Element("CreDtTm")
NbOfTxs_node = ET.Element("NbOfTxs")
CtrlSum_node = ET.Element("CtrlSum")
InitgPty_node = ET.Element("InitgPty")
Nm_node = ET.Element("Nm")
# Add data to some header nodes.
MsgId_node.text = self.msg_id
CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
Nm_node.text = self._config['name']
# Append the nodes
InitgPty_node.append(Nm_node)
GrpHdr_node.append(MsgId_node)
GrpHdr_node.append(CreDtTm_node)
GrpHdr_node.append(NbOfTxs_node)
GrpHdr_node.append(CtrlSum_node)
GrpHdr_node.append(InitgPty_node)
# Append the header to its parent
CstmrCdtTrfInitn_node.append(GrpHdr_node) | [
"def",
"_create_header",
"(",
"self",
")",
":",
"# Retrieve the node to which we will append the group header.",
"CstmrCdtTrfInitn_node",
"=",
"self",
".",
"_xml",
".",
"find",
"(",
"'CstmrCdtTrfInitn'",
")",
"# Create the header nodes.",
"GrpHdr_node",
"=",
"ET",
".",
"E... | 35.9375 | 11.5 |
def _use_memcache(self, key, options=None):
"""Return whether to use memcache for this key.
Args:
key: Key instance.
options: ContextOptions instance, or None.
Returns:
True if the key should be cached in memcache, False otherwise.
"""
flag = ContextOptions.use_memcache(options)
if flag is None:
flag = self._memcache_policy(key)
if flag is None:
flag = ContextOptions.use_memcache(self._conn.config)
if flag is None:
flag = True
return flag | [
"def",
"_use_memcache",
"(",
"self",
",",
"key",
",",
"options",
"=",
"None",
")",
":",
"flag",
"=",
"ContextOptions",
".",
"use_memcache",
"(",
"options",
")",
"if",
"flag",
"is",
"None",
":",
"flag",
"=",
"self",
".",
"_memcache_policy",
"(",
"key",
... | 27.722222 | 18.277778 |
def wallet_key_valid(self, wallet):
"""
Returns if a **wallet** key is valid
:param wallet: Wallet to check key is valid
:type wallet: str
>>> rpc.wallet_key_valid(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
True
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_key_valid', payload)
return resp['valid'] == '1' | [
"def",
"wallet_key_valid",
"(",
"self",
",",
"wallet",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'wallet_key... | 25.1 | 21.1 |
def set_image(self, image, filename=None, resize=False):
"""
Set the poster or thumbnail of a this Vidoe.
"""
if self.id:
data = self.connection.post('add_image', filename,
video_id=self.id, image=image.to_dict(), resize=resize)
if data:
self.image = Image(data=data) | [
"def",
"set_image",
"(",
"self",
",",
"image",
",",
"filename",
"=",
"None",
",",
"resize",
"=",
"False",
")",
":",
"if",
"self",
".",
"id",
":",
"data",
"=",
"self",
".",
"connection",
".",
"post",
"(",
"'add_image'",
",",
"filename",
",",
"video_id... | 38.555556 | 14.111111 |
def hide_routemap_holder_route_map_content_match_metric_metric_rmm(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
match = ET.SubElement(content, "match")
metric = ET.SubElement(match, "metric")
metric_rmm = ET.SubElement(metric, "metric-rmm")
metric_rmm.text = kwargs.pop('metric_rmm')
callback = kwargs.pop('callback', self._callback)
return callback(config) | [
"def",
"hide_routemap_holder_route_map_content_match_metric_metric_rmm",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"ET",
".",
"Element",
"(",
"\"config\"",
")",
"hide_routemap_holder",
"=",
"ET",
".",
"SubElement",
"(",
"config",
",",
"\"hide-... | 50.45 | 17.6 |
def route(self, path, routinemethod, container = None, host = None, vhost = None, method = [b'GET', b'HEAD']):
'''
Route specified path to a WSGI-styled routine factory
:param path: path to match, can be a regular expression
:param routinemethod: factory function routinemethod(env), env is an Environment object
see also utils.http.Environment
:param container: routine container
:param host: if specified, only response to request to specified host
:param vhost: if specified, only response to request to specified vhost.
If not specified, response to dispatcher default vhost.
:param method: if specified, response to specified methods
'''
self.routeevent(path, statichttp(container)(routinemethod), container, host, vhost, method) | [
"def",
"route",
"(",
"self",
",",
"path",
",",
"routinemethod",
",",
"container",
"=",
"None",
",",
"host",
"=",
"None",
",",
"vhost",
"=",
"None",
",",
"method",
"=",
"[",
"b'GET'",
",",
"b'HEAD'",
"]",
")",
":",
"self",
".",
"routeevent",
"(",
"p... | 46.789474 | 33 |
def _remove_stashed_checkpoints(self, till_3pc_key=None):
"""
Remove stashed received checkpoints up to `till_3pc_key` if provided,
otherwise remove all stashed received checkpoints
"""
if till_3pc_key is None:
self.stashedRecvdCheckpoints.clear()
self.logger.info('{} removing all stashed checkpoints'.format(self))
return
for view_no in list(self.stashedRecvdCheckpoints.keys()):
if view_no < till_3pc_key[0]:
self.logger.info('{} removing stashed checkpoints for view {}'.format(self, view_no))
del self.stashedRecvdCheckpoints[view_no]
elif view_no == till_3pc_key[0]:
for (s, e) in list(self.stashedRecvdCheckpoints[view_no].keys()):
if e <= till_3pc_key[1]:
self.logger.info('{} removing stashed checkpoints: '
'viewNo={}, seqNoStart={}, seqNoEnd={}'.
format(self, view_no, s, e))
del self.stashedRecvdCheckpoints[view_no][(s, e)]
if len(self.stashedRecvdCheckpoints[view_no]) == 0:
del self.stashedRecvdCheckpoints[view_no] | [
"def",
"_remove_stashed_checkpoints",
"(",
"self",
",",
"till_3pc_key",
"=",
"None",
")",
":",
"if",
"till_3pc_key",
"is",
"None",
":",
"self",
".",
"stashedRecvdCheckpoints",
".",
"clear",
"(",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'{} removing all s... | 50.04 | 24.36 |
def with_known_args(self, **kwargs):
"""Send only known keyword-arguments to the phase when called."""
argspec = inspect.getargspec(self.func)
stored = {}
for key, arg in six.iteritems(kwargs):
if key in argspec.args or argspec.keywords:
stored[key] = arg
if stored:
return self.with_args(**stored)
return self | [
"def",
"with_known_args",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"argspec",
"=",
"inspect",
".",
"getargspec",
"(",
"self",
".",
"func",
")",
"stored",
"=",
"{",
"}",
"for",
"key",
",",
"arg",
"in",
"six",
".",
"iteritems",
"(",
"kwargs",
... | 34.5 | 11.2 |
def init_datamembers(self, rec):
"""Initialize current GOTerm with data members for storing optional attributes."""
# pylint: disable=multiple-statements
if 'synonym' in self.optional_attrs: rec.synonym = []
if 'xref' in self.optional_attrs: rec.xref = set()
if 'subset' in self.optional_attrs: rec.subset = set()
if 'comment' in self.optional_attrs: rec.comment = ""
if 'relationship' in self.optional_attrs:
rec.relationship = {}
rec.relationship_rev = {} | [
"def",
"init_datamembers",
"(",
"self",
",",
"rec",
")",
":",
"# pylint: disable=multiple-statements",
"if",
"'synonym'",
"in",
"self",
".",
"optional_attrs",
":",
"rec",
".",
"synonym",
"=",
"[",
"]",
"if",
"'xref'",
"in",
"self",
".",
"optional_attrs",
":",
... | 55.2 | 13.8 |
def date_matches(self, timestamp):
"""Determine whether the timestamp date is equal to the argument date."""
if self.date is None:
return False
timestamp = datetime.fromtimestamp(float(timestamp), self.timezone)
if self.date.date() == timestamp.date():
return True
return False | [
"def",
"date_matches",
"(",
"self",
",",
"timestamp",
")",
":",
"if",
"self",
".",
"date",
"is",
"None",
":",
"return",
"False",
"timestamp",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"float",
"(",
"timestamp",
")",
",",
"self",
".",
"timezone",
")",
... | 33.4 | 19.3 |
def _get_bounds(self):
"""
Subclasses may override this method.
"""
from fontTools.pens.boundsPen import BoundsPen
pen = BoundsPen(self.layer)
self.draw(pen)
return pen.bounds | [
"def",
"_get_bounds",
"(",
"self",
")",
":",
"from",
"fontTools",
".",
"pens",
".",
"boundsPen",
"import",
"BoundsPen",
"pen",
"=",
"BoundsPen",
"(",
"self",
".",
"layer",
")",
"self",
".",
"draw",
"(",
"pen",
")",
"return",
"pen",
".",
"bounds"
] | 28 | 9.25 |
def beta_code(self, text):
"""Replace method. Note: regex.subn() returns a tuple (new_string,
number_of_subs_made).
"""
text = text.upper().replace('-', '')
for (pattern, repl) in self.pattern1:
text = pattern.subn(repl, text)[0]
for (pattern, repl) in self.pattern2:
text = pattern.subn(repl, text)[0]
# remove third run, if punct list not used
for (pattern, repl) in self.pattern3:
text = pattern.subn(repl, text)[0]
return text | [
"def",
"beta_code",
"(",
"self",
",",
"text",
")",
":",
"text",
"=",
"text",
".",
"upper",
"(",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
"for",
"(",
"pattern",
",",
"repl",
")",
"in",
"self",
".",
"pattern1",
":",
"text",
"=",
"pattern",
... | 40.461538 | 7.153846 |
def transceive(self, data, timeout=0.1, retries=2):
"""Send a Type 2 Tag command and receive the response.
:meth:`transceive` is a type 2 tag specific wrapper around the
:meth:`nfc.ContactlessFrontend.exchange` method. It can be
used to send custom commands as a sequence of *data* bytes to
the tag and receive the response data bytes. If *timeout*
seconds pass without a response, the operation is aborted and
:exc:`~nfc.tag.TagCommandError` raised with the TIMEOUT_ERROR
error code.
Command execution errors raise :exc:`Type2TagCommandError`.
"""
log.debug(">> {0} ({1:f}s)".format(hexlify(data), timeout))
if not self.target:
# Sometimes we have to (re)sense the target during
# communication. If that failed (tag gone) then any
# further attempt to transceive() is the same as
# "unrecoverable timeout error".
raise Type2TagCommandError(nfc.tag.TIMEOUT_ERROR)
started = time.time()
for retry in range(1 + retries):
try:
data = self.clf.exchange(data, timeout)
break
except nfc.clf.CommunicationError as error:
reason = error.__class__.__name__
log.debug("%s after %d retries" % (reason, retry))
else:
if type(error) is nfc.clf.TimeoutError:
raise Type2TagCommandError(nfc.tag.TIMEOUT_ERROR)
if type(error) is nfc.clf.TransmissionError:
raise Type2TagCommandError(nfc.tag.RECEIVE_ERROR)
if type(error) is nfc.clf.ProtocolError:
raise Type2TagCommandError(nfc.tag.PROTOCOL_ERROR)
raise RuntimeError("unexpected " + repr(error))
elapsed = time.time() - started
log.debug("<< {0} ({1:f}s)".format(hexlify(data), elapsed))
return data | [
"def",
"transceive",
"(",
"self",
",",
"data",
",",
"timeout",
"=",
"0.1",
",",
"retries",
"=",
"2",
")",
":",
"log",
".",
"debug",
"(",
"\">> {0} ({1:f}s)\"",
".",
"format",
"(",
"hexlify",
"(",
"data",
")",
",",
"timeout",
")",
")",
"if",
"not",
... | 43.837209 | 21.534884 |
def uniform_partition(min_pt=None, max_pt=None, shape=None, cell_sides=None,
nodes_on_bdry=False):
"""Return a partition with equally sized cells.
Parameters
----------
min_pt, max_pt : float or sequence of float, optional
Vectors defining the lower/upper limits of the intervals in an
`IntervalProd` (a rectangular box). ``None`` entries mean
"compute the value".
shape : int or sequence of ints, optional
Number of nodes per axis. ``None`` entries mean
"compute the value".
cell_sides : float or sequence of floats, optional
Side length of the partition cells per axis. ``None`` entries mean
"compute the value".
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Notes
-----
In each axis, 3 of the 4 possible parameters ``min_pt``, ``max_pt``,
``shape`` and ``cell_sides`` must be given. If all four are
provided, they are checked for consistency.
See Also
--------
uniform_partition_fromintv : partition an existing set
uniform_partition_fromgrid : use an existing grid as basis
Examples
--------
Any combination of three of the four parameters can be used for
creation of a partition:
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, shape=4)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(max_pt=2, shape=4, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
>>> part = odl.uniform_partition(min_pt=0, max_pt=2, cell_sides=0.5)
>>> part.cell_boundary_vecs
(array([ 0. , 0.5, 1. , 1.5, 2. ]),)
In higher dimensions, the parameters can be given differently in
each axis. Where ``None`` is given, the value will be computed:
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[4, 2])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, 0], max_pt=[1, 2],
... shape=[None, 2], cell_sides=[0.25, None])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
>>> part = odl.uniform_partition(min_pt=[0, None], max_pt=[None, 2],
... shape=[4, 2], cell_sides=[0.25, 1])
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]), array([ 0., 1., 2.]))
By default, no grid points are placed on the boundary:
>>> part = odl.uniform_partition(0, 1, 4)
>>> part.nodes_on_bdry
False
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.5 , 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0.125, 0.375, 0.625, 0.875]),)
This can be changed with the nodes_on_bdry parameter:
>>> part = odl.uniform_partition(0, 1, 3, nodes_on_bdry=True)
>>> part.nodes_on_bdry
True
>>> part.cell_boundary_vecs
(array([ 0. , 0.25, 0.75, 1. ]),)
>>> part.grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
We can specify this per axis, too. In this case we choose both
in the first axis and only the rightmost in the second:
>>> part = odl.uniform_partition([0, 0], [1, 1], (3, 3),
... nodes_on_bdry=(True, (False, True)))
...
>>> part.cell_boundary_vecs[0] # first axis, as above
array([ 0. , 0.25, 0.75, 1. ])
>>> part.grid.coord_vectors[0]
array([ 0. , 0.5, 1. ])
>>> part.cell_boundary_vecs[1] # second, asymmetric axis
array([ 0. , 0.4, 0.8, 1. ])
>>> part.grid.coord_vectors[1]
array([ 0.2, 0.6, 1. ])
"""
# Normalize partition parameters
# np.size(None) == 1, so that would screw it for sizes 0 of the rest
sizes = [np.size(p) for p in (min_pt, max_pt, shape, cell_sides)
if p is not None]
ndim = int(np.max(sizes))
min_pt = normalized_scalar_param_list(min_pt, ndim, param_conv=float,
keep_none=True)
max_pt = normalized_scalar_param_list(max_pt, ndim, param_conv=float,
keep_none=True)
shape = normalized_scalar_param_list(shape, ndim, param_conv=safe_int_conv,
keep_none=True)
cell_sides = normalized_scalar_param_list(cell_sides, ndim,
param_conv=float, keep_none=True)
nodes_on_bdry = normalized_nodes_on_bdry(nodes_on_bdry, ndim)
# Calculate the missing parameters in min_pt, max_pt, shape
for i, (xmin, xmax, n, dx, on_bdry) in enumerate(
zip(min_pt, max_pt, shape, cell_sides, nodes_on_bdry)):
num_params = sum(p is not None for p in (xmin, xmax, n, dx))
if num_params < 3:
raise ValueError('in axis {}: expected at least 3 of the '
'parameters `min_pt`, `max_pt`, `shape`, '
'`cell_sides`, got {}'
''.format(i, num_params))
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
# For each node on the boundary, we subtract 1/2 from the number of
# full cells between min_pt and max_pt.
if xmin is None:
min_pt[i] = xmax - (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif xmax is None:
max_pt[i] = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
elif n is None:
# Here we add to n since (e-b)/s gives the reduced number of cells.
n_calc = (xmax - xmin) / dx + sum([bdry_l, bdry_r]) / 2.0
n_round = int(round(n_calc))
if abs(n_calc - n_round) > 1e-5:
raise ValueError('in axis {}: calculated number of nodes '
'{} = ({} - {}) / {} too far from integer'
''.format(i, n_calc, xmax, xmin, dx))
shape[i] = n_round
elif dx is None:
pass
else:
xmax_calc = xmin + (n - sum([bdry_l, bdry_r]) / 2.0) * dx
if not np.isclose(xmax, xmax_calc):
raise ValueError('in axis {}: calculated endpoint '
'{} = {} + {} * {} too far from given '
'endpoint {}.'
''.format(i, xmax_calc, xmin, n, dx, xmax))
return uniform_partition_fromintv(
IntervalProd(min_pt, max_pt), shape, nodes_on_bdry) | [
"def",
"uniform_partition",
"(",
"min_pt",
"=",
"None",
",",
"max_pt",
"=",
"None",
",",
"shape",
"=",
"None",
",",
"cell_sides",
"=",
"None",
",",
"nodes_on_bdry",
"=",
"False",
")",
":",
"# Normalize partition parameters",
"# np.size(None) == 1, so that would scre... | 42.83432 | 22.147929 |
def has_active_condition(self, condition, instances):
"""
Given a list of instances, and the condition active for
this switch, returns a boolean representing if the
conditional is met, including a non-instance default.
"""
return_value = None
for instance in instances + [None]:
if not self.can_execute(instance):
continue
result = self.is_active(instance, condition)
if result is False:
return False
elif result is True:
return_value = True
return return_value | [
"def",
"has_active_condition",
"(",
"self",
",",
"condition",
",",
"instances",
")",
":",
"return_value",
"=",
"None",
"for",
"instance",
"in",
"instances",
"+",
"[",
"None",
"]",
":",
"if",
"not",
"self",
".",
"can_execute",
"(",
"instance",
")",
":",
"... | 37.875 | 11 |
def _read_master_branch_resource(self, fn, is_json=False):
"""This will force the current branch to master! """
with self._master_branch_repo_lock:
ga = self._create_git_action_for_global_resource()
with ga.lock():
ga.checkout_master()
if os.path.exists(fn):
if is_json:
return read_as_json(fn)
with codecs.open(fn, 'rU', encoding='utf-8') as f:
ret = f.read()
return ret
return None | [
"def",
"_read_master_branch_resource",
"(",
"self",
",",
"fn",
",",
"is_json",
"=",
"False",
")",
":",
"with",
"self",
".",
"_master_branch_repo_lock",
":",
"ga",
"=",
"self",
".",
"_create_git_action_for_global_resource",
"(",
")",
"with",
"ga",
".",
"lock",
... | 43.615385 | 10.230769 |
def select_action(self, pos1, pos2, ctrl, shift):
"""Return a `sc_pb.Action` with the selection filled."""
assert pos1.surf.surf_type == pos2.surf.surf_type
assert pos1.surf.world_to_obs == pos2.surf.world_to_obs
action = sc_pb.Action()
action_spatial = pos1.action_spatial(action)
if pos1.world_pos == pos2.world_pos: # select a point
select = action_spatial.unit_selection_point
pos1.obs_pos.assign_to(select.selection_screen_coord)
mod = sc_spatial.ActionSpatialUnitSelectionPoint
if ctrl:
select.type = mod.AddAllType if shift else mod.AllType
else:
select.type = mod.Toggle if shift else mod.Select
else:
select = action_spatial.unit_selection_rect
rect = select.selection_screen_coord.add()
pos1.obs_pos.assign_to(rect.p0)
pos2.obs_pos.assign_to(rect.p1)
select.selection_add = shift
# Clear the queued action if something will be selected. An alternative
# implementation may check whether the selection changed next frame.
units = self._units_in_area(point.Rect(pos1.world_pos, pos2.world_pos))
if units:
self.clear_queued_action()
return action | [
"def",
"select_action",
"(",
"self",
",",
"pos1",
",",
"pos2",
",",
"ctrl",
",",
"shift",
")",
":",
"assert",
"pos1",
".",
"surf",
".",
"surf_type",
"==",
"pos2",
".",
"surf",
".",
"surf_type",
"assert",
"pos1",
".",
"surf",
".",
"world_to_obs",
"==",
... | 38.633333 | 19.9 |
def local_transform_runner(transform_py_name, value, fields, params, config, message_writer=message):
"""
Internal API: The local transform runner is responsible for executing the local transform.
Parameters:
transform - The name or module of the transform to execute (i.e sploitego.transforms.whatismyip).
value - The input entity value.
fields - A dict of the field names and their respective values.
params - The extra parameters passed into the transform via the command line.
config - The Canari configuration object.
message_writer - The message writing function used to write the MaltegoTransformResponseMessage to stdout. This is
can either be the console_message or message functions. Alternatively, the message_writer function
can be any callable object that accepts the MaltegoTransformResponseMessage as the first parameter
and writes the output to a destination of your choosing.
This helper function is only used by the run-transform, debug-transform, and dispatcher commands.
"""
transform = None
try:
transform = load_object(transform_py_name)()
if os.name == 'posix' and transform.superuser and os.geteuid():
rc = sudo(sys.argv)
if rc == 1:
message_writer(MaltegoTransformResponseMessage() + UIMessage('User cancelled transform.'))
elif rc == 2:
message_writer(MaltegoTransformResponseMessage() + UIMessage('Too many incorrect password attempts.'))
elif rc:
message_writer(MaltegoTransformResponseMessage() + UIMessage('Unknown error occurred.'))
sys.exit(rc)
on_terminate(transform.on_terminate)
request = MaltegoTransformRequestMessage(
parameters={'canari.local.arguments': Field(name='canari.local.arguments', value=params)}
)
request._entities = [to_entity(transform.input_type, value, fields)]
request.limits = Limits(soft=10000)
msg = transform.do_transform(
request,
MaltegoTransformResponseMessage(),
config
)
if isinstance(msg, MaltegoTransformResponseMessage):
message_writer(msg)
elif isinstance(msg, string_types):
raise MaltegoException(msg)
else:
raise MaltegoException('Could not resolve message type returned by transform.')
except MaltegoException as me:
croak(me, message_writer)
except KeyboardInterrupt:
# Ensure that the keyboard interrupt handler does not execute twice if a transform is sudo'd
if transform and (transform.superuser and not os.geteuid()) or (not transform.superuser and os.geteuid()):
transform.on_terminate()
except Exception:
croak(traceback.format_exc(), message_writer) | [
"def",
"local_transform_runner",
"(",
"transform_py_name",
",",
"value",
",",
"fields",
",",
"params",
",",
"config",
",",
"message_writer",
"=",
"message",
")",
":",
"transform",
"=",
"None",
"try",
":",
"transform",
"=",
"load_object",
"(",
"transform_py_name"... | 47.147541 | 31.344262 |
def _improve_class_docs(app, cls, lines):
"""Improve the documentation of a class."""
if issubclass(cls, models.Model):
_add_model_fields_as_params(app, cls, lines)
elif issubclass(cls, forms.Form):
_add_form_fields(cls, lines) | [
"def",
"_improve_class_docs",
"(",
"app",
",",
"cls",
",",
"lines",
")",
":",
"if",
"issubclass",
"(",
"cls",
",",
"models",
".",
"Model",
")",
":",
"_add_model_fields_as_params",
"(",
"app",
",",
"cls",
",",
"lines",
")",
"elif",
"issubclass",
"(",
"cls... | 41.666667 | 3.833333 |
def __generate_file(self, template_filename, context, generated_filename, force=False):
"""
Generate **one** (source code) file from a template.
The file is **only** generated if needed, i.e. if ``force`` is set to ``True`` or if generated file is older
than the template file. The generated file is written in the same directory as the template file.
Args:
template_filename (str): **Absolute** filename of a template file to translate.
context (dict): Dictionary with ``(key, val)`` replacements.
generated_filename (str): **Absolute** filename of the generated file filename.
force (bool): If set to ``True``, file is generated no matter what.
"""
# TODO: maybe avoid reading same template file again and again... i.e. parse it once and generate all needed files without reparsing the template.
# test if file is non existing or needs to be regenerated
if force or (not os.path.isfile(generated_filename) or os.stat(template_filename).st_mtime - os.stat(generated_filename).st_mtime > 1):
self.log_info(' Parsing file %s' % template_filename)
code_generated = self.__jinja2_environment.get_template(template_filename).render(context)
with open(generated_filename, 'w') as f:
self.log_info(' Generating file %s' % generated_filename)
f.write(code_generated.encode('utf8')) | [
"def",
"__generate_file",
"(",
"self",
",",
"template_filename",
",",
"context",
",",
"generated_filename",
",",
"force",
"=",
"False",
")",
":",
"# TODO: maybe avoid reading same template file again and again... i.e. parse it once and generate all needed files without reparsing the ... | 60.333333 | 41.666667 |
def hsvToRGB(h, s, v):
"""
Convert HSV (hue, saturation, value) color space to RGB (red, green blue)
color space.
**Parameters**
**h** : float
Hue, a number in [0, 360].
**s** : float
Saturation, a number in [0, 1].
**v** : float
Value, a number in [0, 1].
**Returns**
**r** : float
Red, a number in [0, 1].
**g** : float
Green, a number in [0, 1].
**b** : float
Blue, a number in [0, 1].
"""
hi = math.floor(h / 60.0) % 6
f = (h / 60.0) - math.floor(h / 60.0)
p = v * (1.0 - s)
q = v * (1.0 - (f * s))
t = v * (1.0 - ((1.0 - f) * s))
D = {0: (v, t, p), 1: (q, v, p), 2: (p, v, t), 3: (p, q, v), 4: (t, p, v),
5: (v, p, q)}
return D[hi] | [
"def",
"hsvToRGB",
"(",
"h",
",",
"s",
",",
"v",
")",
":",
"hi",
"=",
"math",
".",
"floor",
"(",
"h",
"/",
"60.0",
")",
"%",
"6",
"f",
"=",
"(",
"h",
"/",
"60.0",
")",
"-",
"math",
".",
"floor",
"(",
"h",
"/",
"60.0",
")",
"p",
"=",
"v"... | 19.818182 | 24 |
def copy_file(stream, target, maxread=-1, buffer_size=2*16):
''' Read from :stream and write to :target until :maxread or EOF. '''
size, read = 0, stream.read
while 1:
to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
part = read(to_read)
if not part:
return size
target.write(part)
size += len(part) | [
"def",
"copy_file",
"(",
"stream",
",",
"target",
",",
"maxread",
"=",
"-",
"1",
",",
"buffer_size",
"=",
"2",
"*",
"16",
")",
":",
"size",
",",
"read",
"=",
"0",
",",
"stream",
".",
"read",
"while",
"1",
":",
"to_read",
"=",
"buffer_size",
"if",
... | 37.8 | 20.8 |
def line_shortening_rank(candidate, indent_word, max_line_length,
experimental=False):
"""Return rank of candidate.
This is for sorting candidates.
"""
if not candidate.strip():
return 0
rank = 0
lines = candidate.rstrip().split('\n')
offset = 0
if (
not lines[0].lstrip().startswith('#') and
lines[0].rstrip()[-1] not in '([{'
):
for (opening, closing) in ('()', '[]', '{}'):
# Don't penalize empty containers that aren't split up. Things like
# this "foo(\n )" aren't particularly good.
opening_loc = lines[0].find(opening)
closing_loc = lines[0].find(closing)
if opening_loc >= 0:
if closing_loc < 0 or closing_loc != opening_loc + 1:
offset = max(offset, 1 + opening_loc)
current_longest = max(offset + len(x.strip()) for x in lines)
rank += 4 * max(0, current_longest - max_line_length)
rank += len(lines)
# Too much variation in line length is ugly.
rank += 2 * standard_deviation(len(line) for line in lines)
bad_staring_symbol = {
'(': ')',
'[': ']',
'{': '}'}.get(lines[0][-1])
if len(lines) > 1:
if (
bad_staring_symbol and
lines[1].lstrip().startswith(bad_staring_symbol)
):
rank += 20
for lineno, current_line in enumerate(lines):
current_line = current_line.strip()
if current_line.startswith('#'):
continue
for bad_start in ['.', '%', '+', '-', '/']:
if current_line.startswith(bad_start):
rank += 100
# Do not tolerate operators on their own line.
if current_line == bad_start:
rank += 1000
if (
current_line.endswith(('.', '%', '+', '-', '/')) and
"': " in current_line
):
rank += 1000
if current_line.endswith(('(', '[', '{', '.')):
# Avoid lonely opening. They result in longer lines.
if len(current_line) <= len(indent_word):
rank += 100
# Avoid the ugliness of ", (\n".
if (
current_line.endswith('(') and
current_line[:-1].rstrip().endswith(',')
):
rank += 100
# Avoid the ugliness of "something[\n" and something[index][\n.
if (
current_line.endswith('[') and
len(current_line) > 1 and
(current_line[-2].isalnum() or current_line[-2] in ']')
):
rank += 300
# Also avoid the ugliness of "foo.\nbar"
if current_line.endswith('.'):
rank += 100
if has_arithmetic_operator(current_line):
rank += 100
# Avoid breaking at unary operators.
if re.match(r'.*[(\[{]\s*[\-\+~]$', current_line.rstrip('\\ ')):
rank += 1000
if re.match(r'.*lambda\s*\*$', current_line.rstrip('\\ ')):
rank += 1000
if current_line.endswith(('%', '(', '[', '{')):
rank -= 20
# Try to break list comprehensions at the "for".
if current_line.startswith('for '):
rank -= 50
if current_line.endswith('\\'):
# If a line ends in \-newline, it may be part of a
# multiline string. In that case, we would like to know
# how long that line is without the \-newline. If it's
# longer than the maximum, or has comments, then we assume
# that the \-newline is an okay candidate and only
# penalize it a bit.
total_len = len(current_line)
lineno += 1
while lineno < len(lines):
total_len += len(lines[lineno])
if lines[lineno].lstrip().startswith('#'):
total_len = max_line_length
break
if not lines[lineno].endswith('\\'):
break
lineno += 1
if total_len < max_line_length:
rank += 10
else:
rank += 100 if experimental else 1
# Prefer breaking at commas rather than colon.
if ',' in current_line and current_line.endswith(':'):
rank += 10
# Avoid splitting dictionaries between key and value.
if current_line.endswith(':'):
rank += 100
rank += 10 * count_unbalanced_brackets(current_line)
return max(0, rank) | [
"def",
"line_shortening_rank",
"(",
"candidate",
",",
"indent_word",
",",
"max_line_length",
",",
"experimental",
"=",
"False",
")",
":",
"if",
"not",
"candidate",
".",
"strip",
"(",
")",
":",
"return",
"0",
"rank",
"=",
"0",
"lines",
"=",
"candidate",
"."... | 30.739726 | 21.047945 |
def retrieve(self, id) :
"""
Retrieve a single contact
Returns a single contact available to the user, according to the unique contact ID provided
If the specified contact does not exist, the request will return an error
:calls: ``get /contacts/{id}``
:param int id: Unique identifier of a Contact.
:return: Dictionary that support attriubte-style access and represent Contact resource.
:rtype: dict
"""
_, _, contact = self.http_client.get("/contacts/{id}".format(id=id))
return contact | [
"def",
"retrieve",
"(",
"self",
",",
"id",
")",
":",
"_",
",",
"_",
",",
"contact",
"=",
"self",
".",
"http_client",
".",
"get",
"(",
"\"/contacts/{id}\"",
".",
"format",
"(",
"id",
"=",
"id",
")",
")",
"return",
"contact"
] | 37.6 | 25.866667 |
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out | [
"def",
"prefilter_lines",
"(",
"self",
",",
"lines",
",",
"continue_prompt",
"=",
"False",
")",
":",
"llines",
"=",
"lines",
".",
"rstrip",
"(",
"'\\n'",
")",
".",
"split",
"(",
"'\\n'",
")",
"# We can get multiple lines in one shot, where multiline input 'blends'",... | 44.666667 | 25.541667 |
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic() | [
"def",
"_proc_asym_top",
"(",
"self",
")",
":",
"self",
".",
"_check_R2_axes_asym",
"(",
")",
"if",
"len",
"(",
"self",
".",
"rot_sym",
")",
"==",
"0",
":",
"logger",
".",
"debug",
"(",
"\"No rotation symmetries detected.\"",
")",
"self",
".",
"_proc_no_rot_... | 35.266667 | 10.866667 |
def article_views(
self, project, articles,
access='all-access', agent='all-agents', granularity='daily',
start=None, end=None):
"""
Get pageview counts for one or more articles
See `<https://wikimedia.org/api/rest_v1/metrics/pageviews/?doc\\
#!/Pageviews_data/get_metrics_pageviews_per_article_project\\
_access_agent_article_granularity_start_end>`_
:Parameters:
project : str
a wikimedia project such as en.wikipedia or commons.wikimedia
articles : list(str) or a simple str if asking for a single article
access : str
access method (desktop, mobile-web, mobile-app, or by default, all-access)
agent : str
user agent type (spider, user, bot, or by default, all-agents)
end : str|date
can be a datetime.date object or string in YYYYMMDD format
default: today
start : str|date
can be a datetime.date object or string in YYYYMMDD format
default: 30 days before end date
granularity : str
can be daily or monthly
default: daily
:Returns:
a nested dictionary that looks like: {
start_date: {
article_1: view_count,
article_2: view_count,
...
article_n: view_count,
},
...
end_date: {
article_1: view_count,
article_2: view_count,
...
article_n: view_count,
}
}
The view_count will be None where no data is available, to distinguish from 0
TODO: probably doesn't handle unicode perfectly, look into it
"""
endDate = end or date.today()
if type(endDate) is not date:
endDate = parse_date(end)
startDate = start or endDate - timedelta(30)
if type(startDate) is not date:
startDate = parse_date(start)
# If the user passes in a string as "articles", convert to a list
if type(articles) is str:
articles = [articles]
articles = [a.replace(' ', '_') for a in articles]
articlesSafe = [quote(a, safe='') for a in articles]
urls = [
'/'.join([
endpoints['article'], project, access, agent, a, granularity,
format_date(startDate), format_date(endDate),
])
for a in articlesSafe
]
outputDays = timestamps_between(startDate, endDate, timedelta(days=1))
if granularity == 'monthly':
outputDays = list(set([month_from_day(day) for day in outputDays]))
output = defaultdict(dict, {
day: {a: None for a in articles} for day in outputDays
})
try:
results = self.get_concurrent(urls)
some_data_returned = False
for result in results:
if 'items' in result:
some_data_returned = True
else:
continue
for item in result['items']:
output[parse_date(item['timestamp'])][item['article']] = item['views']
if not some_data_returned:
raise Exception(
'The pageview API returned nothing useful at: {}'.format(urls)
)
return output
except:
print('ERROR while fetching and parsing ' + str(urls))
traceback.print_exc()
raise | [
"def",
"article_views",
"(",
"self",
",",
"project",
",",
"articles",
",",
"access",
"=",
"'all-access'",
",",
"agent",
"=",
"'all-agents'",
",",
"granularity",
"=",
"'daily'",
",",
"start",
"=",
"None",
",",
"end",
"=",
"None",
")",
":",
"endDate",
"=",... | 37.183673 | 19.673469 |
def get_spn(unit):
"""获取文本行中非中文字符数的个数
Keyword arguments:
unit -- 文本行
Return:
spn -- 特殊字符数
"""
spn = 0
match_re = re.findall(no_chinese, unit)
if match_re:
string = ''.join(match_re)
spn = len(string)
return int(spn) | [
"def",
"get_spn",
"(",
"unit",
")",
":",
"spn",
"=",
"0",
"match_re",
"=",
"re",
".",
"findall",
"(",
"no_chinese",
",",
"unit",
")",
"if",
"match_re",
":",
"string",
"=",
"''",
".",
"join",
"(",
"match_re",
")",
"spn",
"=",
"len",
"(",
"string",
... | 21.285714 | 15.5 |
def parse_keyring(self, namespace=None):
"""Find settings from keyring."""
results = {}
if not keyring:
return results
if not namespace:
namespace = self.prog
for option in self._options:
secret = keyring.get_password(namespace, option.name)
if secret:
results[option.dest] = option.type(secret)
return results | [
"def",
"parse_keyring",
"(",
"self",
",",
"namespace",
"=",
"None",
")",
":",
"results",
"=",
"{",
"}",
"if",
"not",
"keyring",
":",
"return",
"results",
"if",
"not",
"namespace",
":",
"namespace",
"=",
"self",
".",
"prog",
"for",
"option",
"in",
"self... | 34.25 | 13 |
def schema(self):
"""
The generated budget data package schema for this resource.
If the resource has any fields that do not conform to the
provided specification this will raise a
NotABudgetDataPackageException.
"""
if self.headers is None:
raise exceptions.NoResourceLoadedException(
'Resource must be loaded to find schema')
try:
fields = self.specification.get('fields', {})
parsed = {
'primaryKey': 'id',
'fields': [{
'name': header,
'type': fields[header]['type'],
'description': fields[header]['description']
} for header in self.headers]
}
except KeyError:
raise exceptions.NotABudgetDataPackageException(
'Includes other fields than the Budget Data Package fields')
return parsed | [
"def",
"schema",
"(",
"self",
")",
":",
"if",
"self",
".",
"headers",
"is",
"None",
":",
"raise",
"exceptions",
".",
"NoResourceLoadedException",
"(",
"'Resource must be loaded to find schema'",
")",
"try",
":",
"fields",
"=",
"self",
".",
"specification",
".",
... | 38.76 | 16.6 |
def getImage(self):
"""Returns the project image when available."""
value = self.__getNone(self.__dataItem['image']['url'])
if value == None:
return None
else:
return Sitools2Abstract.getBaseUrl(self) + self.__dataItem['image']['url'] | [
"def",
"getImage",
"(",
"self",
")",
":",
"value",
"=",
"self",
".",
"__getNone",
"(",
"self",
".",
"__dataItem",
"[",
"'image'",
"]",
"[",
"'url'",
"]",
")",
"if",
"value",
"==",
"None",
":",
"return",
"None",
"else",
":",
"return",
"Sitools2Abstract"... | 40.571429 | 21.285714 |
def _get_rating(self, entry):
"""Get the rating and share for a specific row"""
r_info = ''
for string in entry[2].strings:
r_info += string
rating, share = r_info.split('/')
return (rating, share.strip('*')) | [
"def",
"_get_rating",
"(",
"self",
",",
"entry",
")",
":",
"r_info",
"=",
"''",
"for",
"string",
"in",
"entry",
"[",
"2",
"]",
".",
"strings",
":",
"r_info",
"+=",
"string",
"rating",
",",
"share",
"=",
"r_info",
".",
"split",
"(",
"'/'",
")",
"ret... | 36.285714 | 6.714286 |
def get_ctm(self):
"""Copies the scaled font’s font current transform matrix.
Note that the translation offsets ``(x0, y0)`` of the CTM
are ignored by :class:`ScaledFont`.
So, the matrix this method returns always has 0 as ``x0`` and ``y0``.
:returns: A new :class:`Matrix` object.
"""
matrix = Matrix()
cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer)
self._check_status()
return matrix | [
"def",
"get_ctm",
"(",
"self",
")",
":",
"matrix",
"=",
"Matrix",
"(",
")",
"cairo",
".",
"cairo_scaled_font_get_ctm",
"(",
"self",
".",
"_pointer",
",",
"matrix",
".",
"_pointer",
")",
"self",
".",
"_check_status",
"(",
")",
"return",
"matrix"
] | 33.714286 | 20.785714 |
def save_json(py_obj, json_path):
"""Serialize a native object to JSON and save it normalized, pretty printed to a
file.
The JSON string is normalized by sorting any dictionary keys.
Args:
py_obj: object
Any object that can be represented in JSON. Some types, such as datetimes are
automatically converted to strings.
json_path: str
File path to which to write the JSON file. E.g.: The path must exist. The
filename will normally end with ".json".
See Also:
ToJsonCompatibleTypes()
"""
with open(json_path, 'w', encoding='utf-8') as f:
f.write(serialize_to_normalized_pretty_json(py_obj)) | [
"def",
"save_json",
"(",
"py_obj",
",",
"json_path",
")",
":",
"with",
"open",
"(",
"json_path",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"serialize_to_normalized_pretty_json",
"(",
"py_obj",
")",
")"
] | 31.380952 | 23.904762 |
def error_map(func):
"""Wrap exceptions raised by requests.
.. py:decorator:: error_map
"""
@six.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.RequestException as err:
raise TVDBRequestException(
err,
response=getattr(err, 'response', None),
request=getattr(err, 'request', None))
return wrapper | [
"def",
"error_map",
"(",
"func",
")",
":",
"@",
"six",
".",
"wraps",
"(",
"func",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"try",
":",
"return",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except"... | 29.466667 | 13.8 |
def subarc(self, from_angle=None, to_angle=None):
'''
Creates a sub-arc from a given angle (or beginning of this arc) to a given angle (or end of this arc).
Verifies that from_angle and to_angle are within the arc and properly ordered.
If from_angle is None, start of this arc is used instead.
If to_angle is None, end of this arc is used instead.
Angles are given in degrees.
>>> a = Arc((0, 0), 1, 0, 360, True)
>>> a.subarc(None, None)
Arc([0.000, 0.000], 1.000, 0.000, 360.000, True, degrees=360.000)
>>> a.subarc(360, None)
Arc([0.000, 0.000], 1.000, 360.000, 360.000, True, degrees=0.000)
>>> a.subarc(0, None)
Arc([0.000, 0.000], 1.000, 0.000, 360.000, True, degrees=360.000)
>>> a.subarc(-10, None)
Arc([0.000, 0.000], 1.000, 350.000, 360.000, True, degrees=10.000)
>>> a.subarc(None, -10)
Arc([0.000, 0.000], 1.000, 0.000, 350.000, True, degrees=350.000)
>>> a.subarc(1, 359).subarc(2, 358).subarc()
Arc([0.000, 0.000], 1.000, 2.000, 358.000, True, degrees=356.000)
'''
if from_angle is None:
from_angle = self.from_angle
if to_angle is None:
to_angle = self.to_angle
cur_length = self.length_degrees()
d_new_from = self.sign * (from_angle - self.from_angle)
if (d_new_from != 360.0):
d_new_from = d_new_from % 360.0
d_new_to = self.sign * (to_angle - self.from_angle)
if (d_new_to != 360.0):
d_new_to = d_new_to % 360.0
# Gracefully handle numeric precision issues for zero-length arcs
if abs(d_new_from - d_new_to) < tol:
d_new_from = d_new_to
if d_new_to < d_new_from:
raise ValueError("Subarc to-angle must be smaller than from-angle.")
if d_new_to > cur_length + tol:
raise ValueError("Subarc to-angle must lie within the current arc.")
return Arc(self.center, self.radius, self.from_angle + self.sign*d_new_from, self.from_angle + self.sign*d_new_to, self.direction) | [
"def",
"subarc",
"(",
"self",
",",
"from_angle",
"=",
"None",
",",
"to_angle",
"=",
"None",
")",
":",
"if",
"from_angle",
"is",
"None",
":",
"from_angle",
"=",
"self",
".",
"from_angle",
"if",
"to_angle",
"is",
"None",
":",
"to_angle",
"=",
"self",
"."... | 50.809524 | 21.761905 |
def recordings(self):
"""
Access the recordings
:returns: twilio.rest.video.v1.room.recording.RoomRecordingList
:rtype: twilio.rest.video.v1.room.recording.RoomRecordingList
"""
if self._recordings is None:
self._recordings = RoomRecordingList(self._version, room_sid=self._solution['sid'], )
return self._recordings | [
"def",
"recordings",
"(",
"self",
")",
":",
"if",
"self",
".",
"_recordings",
"is",
"None",
":",
"self",
".",
"_recordings",
"=",
"RoomRecordingList",
"(",
"self",
".",
"_version",
",",
"room_sid",
"=",
"self",
".",
"_solution",
"[",
"'sid'",
"]",
",",
... | 37.6 | 20 |
def _finalize_stats(self, ipyclient):
""" write final tree files """
## print stats file location:
#print(STATSOUT.format(opr(self.files.stats)))
## print finished tree information ---------------------
print(FINALTREES.format(opr(self.trees.tree)))
## print bootstrap information --------------------------
if self.params.nboots:
## get consensus, map values to tree edges, record stats file
self._compute_tree_stats(ipyclient)
## print bootstrap info
print(BOOTTREES.format(opr(self.trees.cons), opr(self.trees.boots)))
## print the ASCII tree only if its small
if len(self.samples) < 20:
if self.params.nboots:
wctre = ete3.Tree(self.trees.cons, format=0)
wctre.ladderize()
print(wctre.get_ascii(show_internal=True,
attributes=["dist", "name"]))
print("")
else:
qtre = ete3.Tree(self.trees.tree, format=0)
qtre.ladderize()
#qtre = toytree.tree(self.trees.tree, format=0)
#qtre.tree.unroot()
print(qtre.get_ascii())
print("")
## print PDF filename & tips -----------------------------
docslink = "https://toytree.readthedocs.io/"
citelink = "https://ipyrad.readthedocs.io/tetrad.html"
print(LINKS.format(docslink, citelink)) | [
"def",
"_finalize_stats",
"(",
"self",
",",
"ipyclient",
")",
":",
"## print stats file location:",
"#print(STATSOUT.format(opr(self.files.stats)))",
"## print finished tree information ---------------------",
"print",
"(",
"FINALTREES",
".",
"format",
"(",
"opr",
"(",
"self",
... | 41 | 18.111111 |
def cli(env, columns, sortby, volume_id):
"""List suitable replication datacenters for the given volume."""
file_storage_manager = SoftLayer.FileStorageManager(env.client)
legal_centers = file_storage_manager.get_replication_locations(
volume_id
)
if not legal_centers:
click.echo("No data centers compatible for replication.")
else:
table = formatting.KeyValueTable(columns.columns)
table.sortby = sortby
for legal_center in legal_centers:
table.add_row([value or formatting.blank()
for value in columns.row(legal_center)])
env.fout(table) | [
"def",
"cli",
"(",
"env",
",",
"columns",
",",
"sortby",
",",
"volume_id",
")",
":",
"file_storage_manager",
"=",
"SoftLayer",
".",
"FileStorageManager",
"(",
"env",
".",
"client",
")",
"legal_centers",
"=",
"file_storage_manager",
".",
"get_replication_locations"... | 35.388889 | 21.777778 |
def _set_sample_rate_cpu(self, v, load=False):
"""
Setter method for sample_rate_cpu, mapped from YANG variable /resource_monitor/cpu/sample_rate_cpu (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sample_rate_cpu is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sample_rate_cpu() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20']}), is_leaf=True, yang_name="sample-rate-cpu", rest_name="sample-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sampling rate for CPU usage monitoring', u'hidden': u'debug', u'alt-name': u'sample-rate', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sample_rate_cpu must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 20']}), is_leaf=True, yang_name="sample-rate-cpu", rest_name="sample-rate", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sampling rate for CPU usage monitoring', u'hidden': u'debug', u'alt-name': u'sample-rate', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-resource-monitor', defining_module='brocade-resource-monitor', yang_type='uint32', is_config=True)""",
})
self.__sample_rate_cpu = t
if hasattr(self, '_set'):
self._set() | [
"def",
"_set_sample_rate_cpu",
"(",
"self",
",",
"v",
",",
"load",
"=",
"False",
")",
":",
"if",
"hasattr",
"(",
"v",
",",
"\"_utype\"",
")",
":",
"v",
"=",
"v",
".",
"_utype",
"(",
"v",
")",
"try",
":",
"t",
"=",
"YANGDynClass",
"(",
"v",
",",
... | 95.136364 | 45.136364 |
def do_file(self, filename):
""" Read and execute a .dql file """
with open(filename, "r") as infile:
self._run_cmd(infile.read()) | [
"def",
"do_file",
"(",
"self",
",",
"filename",
")",
":",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"infile",
":",
"self",
".",
"_run_cmd",
"(",
"infile",
".",
"read",
"(",
")",
")"
] | 38.75 | 3.75 |
def add_not_null(self, model, *names):
"""Add not null."""
for name in names:
field = model._meta.fields[name]
field.null = False
self.ops.append(self.migrator.add_not_null(model._meta.table_name, field.column_name))
return model | [
"def",
"add_not_null",
"(",
"self",
",",
"model",
",",
"*",
"names",
")",
":",
"for",
"name",
"in",
"names",
":",
"field",
"=",
"model",
".",
"_meta",
".",
"fields",
"[",
"name",
"]",
"field",
".",
"null",
"=",
"False",
"self",
".",
"ops",
".",
"... | 40.428571 | 15.428571 |
def create(self, quality_score, issue=values.unset):
"""
Create a new FeedbackInstance
:param unicode quality_score: The call quality expressed as an integer from 1 to 5
:param FeedbackInstance.Issues issue: Issues experienced during the call
:returns: Newly created FeedbackInstance
:rtype: twilio.rest.api.v2010.account.call.feedback.FeedbackInstance
"""
return self._proxy.create(quality_score, issue=issue, ) | [
"def",
"create",
"(",
"self",
",",
"quality_score",
",",
"issue",
"=",
"values",
".",
"unset",
")",
":",
"return",
"self",
".",
"_proxy",
".",
"create",
"(",
"quality_score",
",",
"issue",
"=",
"issue",
",",
")"
] | 42.545455 | 22.909091 |
def read_i2c_block_data(self, addr, cmd, len=32):
"""read_i2c_block_data(addr, cmd, len=32) -> results
Perform I2C Block Read transaction.
"""
self._set_addr(addr)
data = ffi.new("union i2c_smbus_data *")
data.block[0] = len
if len == 32:
arg = SMBUS.I2C_SMBUS_I2C_BLOCK_BROKEN
else:
arg = SMBUS.I2C_SMBUS_I2C_BLOCK_DATA
if SMBUS.i2c_smbus_access(self._fd,
int2byte(SMBUS.I2C_SMBUS_READ),
ffi.cast("__u8", cmd),
arg, data):
raise IOError(ffi.errno)
return smbus_data_to_list(data) | [
"def",
"read_i2c_block_data",
"(",
"self",
",",
"addr",
",",
"cmd",
",",
"len",
"=",
"32",
")",
":",
"self",
".",
"_set_addr",
"(",
"addr",
")",
"data",
"=",
"ffi",
".",
"new",
"(",
"\"union i2c_smbus_data *\"",
")",
"data",
".",
"block",
"[",
"0",
"... | 37.888889 | 11.277778 |
def triads(key):
"""Return all the triads in key.
Implemented using a cache.
"""
if _triads_cache.has_key(key):
return _triads_cache[key]
res = map(lambda x: triad(x, key), keys.get_notes(key))
_triads_cache[key] = res
return res | [
"def",
"triads",
"(",
"key",
")",
":",
"if",
"_triads_cache",
".",
"has_key",
"(",
"key",
")",
":",
"return",
"_triads_cache",
"[",
"key",
"]",
"res",
"=",
"map",
"(",
"lambda",
"x",
":",
"triad",
"(",
"x",
",",
"key",
")",
",",
"keys",
".",
"get... | 25.7 | 14.4 |
def read_stream(cls, stream, validate=True):
"""
Read torrent metainfo from file-like object
:param stream: Readable file-like object (e.g. :class:`io.BytesIO`)
:param bool validate: Whether to run :meth:`validate` on the new Torrent
object
:raises ReadError: if reading from `stream` fails
:raises ParseError: if `stream` does not produce a valid bencoded byte
string
:raises MetainfoError: if `validate` is `True` and the read metainfo is
invalid
:return: New Torrent object
"""
try:
content = stream.read(cls.MAX_TORRENT_FILE_SIZE)
except OSError as e:
raise error.ReadError(e.errno)
else:
try:
metainfo_enc = bdecode(content)
except BTFailure as e:
raise error.ParseError()
if validate:
if b'info' not in metainfo_enc:
raise error.MetainfoError("Missing 'info'")
elif not isinstance(metainfo_enc[b'info'], abc.Mapping):
raise error.MetainfoError("'info' is not a dictionary")
elif b'pieces' not in metainfo_enc[b'info']:
raise error.MetainfoError("Missing 'pieces' in ['info']")
# Extract 'pieces' from metainfo because it's the only byte string
# that isn't supposed to be decoded to unicode.
if b'info' in metainfo_enc and b'pieces' in metainfo_enc[b'info']:
pieces = metainfo_enc[b'info'].pop(b'pieces')
metainfo = utils.decode_dict(metainfo_enc)
metainfo['info']['pieces'] = pieces
else:
metainfo = utils.decode_dict(metainfo_enc)
torrent = cls()
torrent._metainfo = metainfo
# Convert some values from official types to something nicer
# (e.g. int -> datetime)
for attr in ('creation_date', 'private'):
setattr(torrent, attr, getattr(torrent, attr))
# Auto-set 'include_md5'
info = torrent.metainfo['info']
torrent.include_md5 = ('length' in info and 'md5sum' in info) or \
('files' in info and all('md5sum' in fileinfo
for fileinfo in info['files']))
if validate:
torrent.validate()
return torrent | [
"def",
"read_stream",
"(",
"cls",
",",
"stream",
",",
"validate",
"=",
"True",
")",
":",
"try",
":",
"content",
"=",
"stream",
".",
"read",
"(",
"cls",
".",
"MAX_TORRENT_FILE_SIZE",
")",
"except",
"OSError",
"as",
"e",
":",
"raise",
"error",
".",
"Read... | 40.098361 | 22.786885 |
def defadj(self, singular, plural):
"""
Set the adjective plural of singular to plural.
"""
self.checkpat(singular)
self.checkpatplural(plural)
self.pl_adj_user_defined.extend((singular, plural))
return 1 | [
"def",
"defadj",
"(",
"self",
",",
"singular",
",",
"plural",
")",
":",
"self",
".",
"checkpat",
"(",
"singular",
")",
"self",
".",
"checkpatplural",
"(",
"plural",
")",
"self",
".",
"pl_adj_user_defined",
".",
"extend",
"(",
"(",
"singular",
",",
"plura... | 28.111111 | 13 |
def wallet_frontiers(self, wallet):
"""
Returns a list of pairs of account and block hash representing the
head block starting for accounts from **wallet**
:param wallet: Wallet to return frontiers for
:type wallet: str
:raises: :py:exc:`nano.rpc.RPCException`
>>> rpc.wallet_frontiers(
... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
... )
{
"xrb_3e3j5tkog48pnny9dmfzj1r16pg8t1e76dz5tmac6iq689wyjfpi00000000": "000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F"
}
"""
wallet = self._process_value(wallet, 'wallet')
payload = {"wallet": wallet}
resp = self.call('wallet_frontiers', payload)
return resp.get('frontiers') or {} | [
"def",
"wallet_frontiers",
"(",
"self",
",",
"wallet",
")",
":",
"wallet",
"=",
"self",
".",
"_process_value",
"(",
"wallet",
",",
"'wallet'",
")",
"payload",
"=",
"{",
"\"wallet\"",
":",
"wallet",
"}",
"resp",
"=",
"self",
".",
"call",
"(",
"'wallet_fro... | 30.653846 | 26.730769 |
def _get_hosts_from_names(self, names):
""" validate hostnames from a list of names
"""
result = set()
hosts = map(lambda x: x.strip(), names.split(','))
for h in hosts:
if valid_hostname(h.split(':')[0]):
result.add(h if ':' in h else '%s:%d' % (h, self.PORT))
else:
raise conferr('Invalid hostname: %s' % h.split(':')[0])
return list(result) | [
"def",
"_get_hosts_from_names",
"(",
"self",
",",
"names",
")",
":",
"result",
"=",
"set",
"(",
")",
"hosts",
"=",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"strip",
"(",
")",
",",
"names",
".",
"split",
"(",
"','",
")",
")",
"for",
"h",
"in",
... | 39.727273 | 14.454545 |
def split_command(cmd, posix=None):
'''
- cmd is string list -> nothing to do
- cmd is string -> split it using shlex
:param cmd: string ('ls -l') or list of strings (['ls','-l'])
:rtype: string list
'''
if not isinstance(cmd, string_types):
# cmd is string list
pass
else:
if not PY3:
# cmd is string
# The shlex module currently does not support Unicode input (in
# 2.x)!
if isinstance(cmd, unicode):
try:
cmd = unicodedata.normalize(
'NFKD', cmd).encode('ascii', 'strict')
except UnicodeEncodeError:
raise EasyProcessUnicodeError('unicode command "%s" can not be processed.' % cmd +
'Use string list instead of string')
log.debug('unicode is normalized')
if posix is None:
posix = 'win' not in sys.platform
cmd = shlex.split(cmd, posix=posix)
return cmd | [
"def",
"split_command",
"(",
"cmd",
",",
"posix",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"cmd",
",",
"string_types",
")",
":",
"# cmd is string list",
"pass",
"else",
":",
"if",
"not",
"PY3",
":",
"# cmd is string",
"# The shlex module currentl... | 36.785714 | 19.285714 |
def cache_file(self, path, saltenv='base', cachedir=None, source_hash=None):
'''
Pull a file down from the file server and store it in the minion
file cache
'''
return self.get_url(
path, '', True, saltenv, cachedir=cachedir, source_hash=source_hash) | [
"def",
"cache_file",
"(",
"self",
",",
"path",
",",
"saltenv",
"=",
"'base'",
",",
"cachedir",
"=",
"None",
",",
"source_hash",
"=",
"None",
")",
":",
"return",
"self",
".",
"get_url",
"(",
"path",
",",
"''",
",",
"True",
",",
"saltenv",
",",
"cached... | 42.285714 | 28.571429 |
def discover(email, credentials):
"""
Performs the autodiscover dance and returns the primary SMTP address of the account and a Protocol on success. The
autodiscover and EWS server might not be the same, so we use a different Protocol to do the autodiscover request,
and return a hopefully-cached Protocol to the callee.
"""
log.debug('Attempting autodiscover on email %s', email)
if not isinstance(credentials, Credentials):
raise ValueError("'credentials' %r must be a Credentials instance" % credentials)
domain = get_domain(email)
# We may be using multiple different credentials and changing our minds on TLS verification. This key combination
# should be safe.
autodiscover_key = (domain, credentials)
# Use lock to guard against multiple threads competing to cache information
log.debug('Waiting for _autodiscover_cache_lock')
with _autodiscover_cache_lock:
# Don't recurse while holding the lock!
log.debug('_autodiscover_cache_lock acquired')
if autodiscover_key in _autodiscover_cache:
protocol = _autodiscover_cache[autodiscover_key]
if not isinstance(protocol, AutodiscoverProtocol):
raise ValueError('Unexpected autodiscover cache contents: %s' % protocol)
log.debug('Cache hit for domain %s credentials %s: %s', domain, credentials, protocol.server)
try:
# This is the main path when the cache is primed
return _autodiscover_quick(credentials=credentials, email=email, protocol=protocol)
except AutoDiscoverFailed:
# Autodiscover no longer works with this domain. Clear cache and try again after releasing the lock
del _autodiscover_cache[autodiscover_key]
except AutoDiscoverRedirect as e:
log.debug('%s redirects to %s', email, e.redirect_email)
if email.lower() == e.redirect_email.lower():
raise_from(AutoDiscoverCircularRedirect('Redirect to same email address: %s' % email), None)
# Start over with the new email address after releasing the lock
email = e.redirect_email
else:
log.debug('Cache miss for domain %s credentials %s', domain, credentials)
log.debug('Cache contents: %s', _autodiscover_cache)
try:
# This eventually fills the cache in _autodiscover_hostname
return _try_autodiscover(hostname=domain, credentials=credentials, email=email)
except AutoDiscoverRedirect as e:
if email.lower() == e.redirect_email.lower():
raise_from(AutoDiscoverCircularRedirect('Redirect to same email address: %s' % email), None)
log.debug('%s redirects to %s', email, e.redirect_email)
# Start over with the new email address after releasing the lock
email = e.redirect_email
log.debug('Released autodiscover_cache_lock')
# We fell out of the with statement, so either cache was filled by someone else, or autodiscover redirected us to
# another email address. Start over after releasing the lock.
return discover(email=email, credentials=credentials) | [
"def",
"discover",
"(",
"email",
",",
"credentials",
")",
":",
"log",
".",
"debug",
"(",
"'Attempting autodiscover on email %s'",
",",
"email",
")",
"if",
"not",
"isinstance",
"(",
"credentials",
",",
"Credentials",
")",
":",
"raise",
"ValueError",
"(",
"\"'cr... | 63.254902 | 29.215686 |
def setValue(self, newText):
"""Sets a text value (string) into the text field."""
newText = str(newText) # attempt to convert to string (might be int or float ...)
if self.text == newText:
return # nothing to change
self.text = newText # save the new text
textLines = self.text.splitlines()
nLines = len(textLines)
surfacesList = [] # build up a list of surfaces, one for each line of original text
actualWidth = 0 # will eventually be set the width of longest line
for line in textLines:
lineSurface = self.font.render(line, True, self.textColor)
surfacesList.append(lineSurface)
thisRect = lineSurface.get_rect()
if thisRect.width > actualWidth:
actualWidth = thisRect.width
heightOfOneLine = self.fontHeight
actualHeight = nLines * heightOfOneLine
self.rect = pygame.Rect(self.loc[0], self.loc[1], actualWidth, actualHeight)
# Create one larger surface, then blit all line surfaces into it
# Special flags are needed to set the background alpha as transparent
self.textImage = pygame.Surface((actualWidth, actualHeight), flags=SRCALPHA)
if self.backgroundColor is not None:
self.textImage.fill(self.backgroundColor)
thisLineTop = 0
for lineSurface in surfacesList:
if self.justified == 'left':
self.textImage.blit(lineSurface, (0, thisLineTop))
else:
thisSurfaceWidth = lineSurface.get_rect()[2] # element 2 is the width
if self.justified == 'center':
theLeft = (actualWidth - thisSurfaceWidth) / 2
elif self.justified == 'right': # right justified
theLeft = actualWidth - thisSurfaceWidth
else:
raise Exception('Value of justified was: ' + self.justified + '. Must be left, center, or right')
self.textImage.blit(lineSurface, (theLeft, thisLineTop))
thisLineTop = thisLineTop + heightOfOneLine
if self.useSpecifiedArea:
# Fit the text image into a user specified area, may truncate the text off left, right, or bottom
textRect = self.textImage.get_rect()
if self.userWidth is None:
theWidth = textRect.width
else:
theWidth = self.userWidth
if self.userHeight is None:
theHeight = textRect.height
else:
theHeight = self.userHeight
# Create a surface that is the size that the user asked for
userSizedImage = pygame.Surface((theWidth, theHeight), flags=SRCALPHA)
self.rect = pygame.Rect(self.loc[0], self.loc[1], theWidth, theHeight)
if self.backgroundColor is not None:
userSizedImage.fill(self.backgroundColor)
# Figure out the appropriate left edge within the userSizedImage
if self.justified == 'left':
theLeft = 0
elif self.justified == 'center':
theLeft = (theWidth - textRect.width) / 2
else: # right justified
theLeft = theWidth - textRect.width
# Copy the appropriate part from the text image into the user sized image
# Then re-name it to the textImage so it can be drawn later
userSizedImage.blit(self.textImage, (theLeft, 0))
self.textImage = userSizedImage
self.textImage = pygame.Surface.convert_alpha(self.textImage) | [
"def",
"setValue",
"(",
"self",
",",
"newText",
")",
":",
"newText",
"=",
"str",
"(",
"newText",
")",
"# attempt to convert to string (might be int or float ...)\r",
"if",
"self",
".",
"text",
"==",
"newText",
":",
"return",
"# nothing to change\r",
"self",
".",
... | 47.311688 | 22.246753 |
def operator(self, lhs, min_precedence):
"""Climb operator precedence as long as there are operators.
This function implements a basic precedence climbing parser to deal
with binary operators in a sane fashion. The outer loop will keep
spinning as long as the next token is an operator with a precedence
of at least 'min_precedence', parsing operands as atoms (which,
in turn, recurse into 'expression' which recurses back into 'operator').
This supports both left- and right-associativity. The only part of the
code that's not a regular precedence-climber deals with mixfix
operators. A mixfix operator in DottySQL consists of an infix part
and a suffix (they are still binary, they just have a terminator).
"""
# Spin as long as the next token is an operator of higher
# precedence. (This may not do anything, which is fine.)
while self.accept_operator(precedence=min_precedence):
operator = self.tokens.matched.operator
# If we're parsing a mixfix operator we can keep going until
# the suffix.
if operator.suffix:
rhs = self.expression()
self.tokens.expect(common_grammar.match_tokens(operator.suffix))
rhs.end = self.tokens.matched.end
elif operator.name == ".":
# The dot operator changes the meaning of RHS.
rhs = self.dot_rhs()
else:
# The right hand side is an atom, which might turn out to be
# an expression. Isn't recursion exciting?
rhs = self.atom()
# Keep going as long as the next token is an infix operator of
# higher precedence.
next_min_precedence = operator.precedence
if operator.assoc == "left":
next_min_precedence += 1
while self.tokens.match(grammar.infix):
if (self.tokens.matched.operator.precedence
< next_min_precedence):
break
rhs = self.operator(rhs,
self.tokens.matched.operator.precedence)
lhs = operator.handler(lhs, rhs, start=lhs.start, end=rhs.end,
source=self.original)
return lhs | [
"def",
"operator",
"(",
"self",
",",
"lhs",
",",
"min_precedence",
")",
":",
"# Spin as long as the next token is an operator of higher",
"# precedence. (This may not do anything, which is fine.)",
"while",
"self",
".",
"accept_operator",
"(",
"precedence",
"=",
"min_precedence... | 45.72549 | 22.45098 |
def kaczmarz(ops, x, rhs, niter, omega=1, projection=None, random=False,
callback=None, callback_loop='outer'):
r"""Optimized implementation of Kaczmarz's method.
Solves the inverse problem given by the set of equations::
A_n(x) = rhs_n
This is also known as the Landweber-Kaczmarz's method, since the method
coincides with the Landweber method for a single operator.
Parameters
----------
ops : sequence of `Operator`'s
Operators in the inverse problem. ``op[i].derivative(x).adjoint`` must
be well-defined for ``x`` in the operator domain and for all ``i``.
x : ``op.domain`` element
Element to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
rhs : sequence of ``ops[i].range`` elements
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
omega : positive float or sequence of positive floats, optional
Relaxation parameter in the iteration. If a single float is given the
same step is used for all operators, otherwise separate steps are used.
projection : callable, optional
Function that can be used to modify the iterates in each iteration,
for example enforcing positivity. The function should take one
argument and modify it in-place.
random : bool, optional
If `True`, the order of the operators is randomized in each iteration.
callback : callable, optional
Object executing code per iteration, e.g. plotting each iterate.
callback_loop : {'inner', 'outer'}
Whether the callback should be called in the inner or outer loop.
Notes
-----
This method calculates an approximate least-squares solution of
the inverse problem of the first kind
.. math::
\mathcal{A}_i (x) = y_i \quad 1 \leq i \leq n,
for a given :math:`y_n \in \mathcal{Y}_n`, i.e. an approximate
solution :math:`x^*` to
.. math::
\min_{x\in \mathcal{X}}
\sum_{i=1}^n \| \mathcal{A}_i(x) - y_i \|_{\mathcal{Y}_i}^2
for a (Frechet-) differentiable operator
:math:`\mathcal{A}: \mathcal{X} \to \mathcal{Y}` between Hilbert
spaces :math:`\mathcal{X}` and :math:`\mathcal{Y}`. The method
starts from an initial guess :math:`x_0` and uses the
iteration
.. math::
x_{k+1} = x_k - \omega_{[k]} \ \partial \mathcal{A}_{[k]}(x_k)^*
(\mathcal{A}_{[k]}(x_k) - y_{[k]}),
where :math:`\partial \mathcal{A}_{[k]}(x_k)` is the Frechet derivative
of :math:`\mathcal{A}_{[k]}` at :math:`x_k`, :math:`\omega_{[k]}` is a
relaxation parameter and :math:`[k] := k \text{ mod } n`.
For linear problems, a choice
:math:`0 < \omega_i < 2/\lVert \mathcal{A}_{i}^2\rVert` guarantees
convergence, where :math:`\|\mathcal{A}_{i}\|` stands for the
operator norm of :math:`\mathcal{A}_{i}`.
This implementation uses a minimum amount of memory copies by
applying re-usable temporaries and in-place evaluation.
The method is also described in a
`Wikipedia article
<https://en.wikipedia.org/wiki/Kaczmarz_method>`_. and in Natterer, F.
Mathematical Methods in Image Reconstruction, section 5.3.2.
See Also
--------
landweber
"""
domain = ops[0].domain
if any(domain != opi.domain for opi in ops):
raise ValueError('domains of `ops` are not all equal')
if x not in domain:
raise TypeError('`x` {!r} is not in the domain of `ops` {!r}'
''.format(x, domain))
if len(ops) != len(rhs):
raise ValueError('`number of `ops` {} does not match number of '
'`rhs` {}'.format(len(ops), len(rhs)))
omega = normalized_scalar_param_list(omega, len(ops), param_conv=float)
# Reusable elements in the range, one per type of space
ranges = [opi.range for opi in ops]
unique_ranges = set(ranges)
tmp_rans = {ran: ran.element() for ran in unique_ranges}
# Single reusable element in the domain
tmp_dom = domain.element()
# Iteratively find solution
for _ in range(niter):
if random:
rng = np.random.permutation(range(len(ops)))
else:
rng = range(len(ops))
for i in rng:
# Find residual
tmp_ran = tmp_rans[ops[i].range]
ops[i](x, out=tmp_ran)
tmp_ran -= rhs[i]
# Update x
ops[i].derivative(x).adjoint(tmp_ran, out=tmp_dom)
x.lincomb(1, x, -omega[i], tmp_dom)
if projection is not None:
projection(x)
if callback is not None and callback_loop == 'inner':
callback(x)
if callback is not None and callback_loop == 'outer':
callback(x) | [
"def",
"kaczmarz",
"(",
"ops",
",",
"x",
",",
"rhs",
",",
"niter",
",",
"omega",
"=",
"1",
",",
"projection",
"=",
"None",
",",
"random",
"=",
"False",
",",
"callback",
"=",
"None",
",",
"callback_loop",
"=",
"'outer'",
")",
":",
"domain",
"=",
"op... | 36.679389 | 23.480916 |
def create_guest_screen_info(self, display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel):
"""Make a IGuestScreenInfo object with the provided parameters.
in display of type int
The number of the guest display.
in status of type :class:`GuestMonitorStatus`
@c True, if this guest screen is enabled,
@c False otherwise.
in primary of type bool
Whether this guest monitor must be primary.
in change_origin of type bool
@c True, if the origin of the guest screen should be changed,
@c False otherwise.
in origin_x of type int
The X origin of the guest screen.
in origin_y of type int
The Y origin of the guest screen.
in width of type int
The width of the guest screen.
in height of type int
The height of the guest screen.
in bits_per_pixel of type int
The number of bits per pixel of the guest screen.
return guest_screen_info of type :class:`IGuestScreenInfo`
The created object.
"""
if not isinstance(display, baseinteger):
raise TypeError("display can only be an instance of type baseinteger")
if not isinstance(status, GuestMonitorStatus):
raise TypeError("status can only be an instance of type GuestMonitorStatus")
if not isinstance(primary, bool):
raise TypeError("primary can only be an instance of type bool")
if not isinstance(change_origin, bool):
raise TypeError("change_origin can only be an instance of type bool")
if not isinstance(origin_x, baseinteger):
raise TypeError("origin_x can only be an instance of type baseinteger")
if not isinstance(origin_y, baseinteger):
raise TypeError("origin_y can only be an instance of type baseinteger")
if not isinstance(width, baseinteger):
raise TypeError("width can only be an instance of type baseinteger")
if not isinstance(height, baseinteger):
raise TypeError("height can only be an instance of type baseinteger")
if not isinstance(bits_per_pixel, baseinteger):
raise TypeError("bits_per_pixel can only be an instance of type baseinteger")
guest_screen_info = self._call("createGuestScreenInfo",
in_p=[display, status, primary, change_origin, origin_x, origin_y, width, height, bits_per_pixel])
guest_screen_info = IGuestScreenInfo(guest_screen_info)
return guest_screen_info | [
"def",
"create_guest_screen_info",
"(",
"self",
",",
"display",
",",
"status",
",",
"primary",
",",
"change_origin",
",",
"origin_x",
",",
"origin_y",
",",
"width",
",",
"height",
",",
"bits_per_pixel",
")",
":",
"if",
"not",
"isinstance",
"(",
"display",
",... | 44.672414 | 23.293103 |
def stage(self, name):
"""
Method for searching specific stage by it's name.
:param name: name of the stage to search.
:return: found stage or None.
:rtype: yagocd.resources.stage.StageInstance
"""
for stage in self.stages():
if stage.data.name == name:
return stage | [
"def",
"stage",
"(",
"self",
",",
"name",
")",
":",
"for",
"stage",
"in",
"self",
".",
"stages",
"(",
")",
":",
"if",
"stage",
".",
"data",
".",
"name",
"==",
"name",
":",
"return",
"stage"
] | 31 | 10.636364 |
def is_ip_valid(self, ip_to_check=None):
"""
Check if the given IP is a valid IPv4.
:param ip_to_check: The IP to test.
:type ip_to_check: str
:return: The validity of the IP.
:rtype: bool
.. note::
We only test IPv4 because for now we only them for now.
"""
# We initate our regex which will match for valid IPv4.
regex_ipv4 = r"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?|[0-9]{1,}\/[0-9]{1,})$" # pylint: disable=line-too-long
if ip_to_check:
# An element is localy given.
# We consider it as the element to test.
to_test = ip_to_check
elif self.element:
# An element is given globally.
# We consider it as the element to test.
to_test = self.element
else:
# An element is not localy given.
# We consider the global element to test as the element to test.
to_test = PyFunceble.INTERN["to_test"]
# We check if it passes our IPv4 regex.
# * True: It's a valid IPv4.
# * False: It's an invalid IPv4.
return Regex(to_test, regex_ipv4, return_data=False).match() | [
"def",
"is_ip_valid",
"(",
"self",
",",
"ip_to_check",
"=",
"None",
")",
":",
"# We initate our regex which will match for valid IPv4.",
"regex_ipv4",
"=",
"r\"^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-... | 35.27027 | 23.216216 |
def _spectrum(self, photon_energy):
"""Compute intrinsic synchrotron differential spectrum for energies in
``photon_energy``
Compute synchrotron for random magnetic field according to
approximation of Aharonian, Kelner, and Prosekin 2010, PhysRev D 82,
3002 (`arXiv:1006.1045 <http://arxiv.org/abs/1006.1045>`_).
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` instance
Photon energy array.
"""
outspecene = _validate_ene(photon_energy)
from scipy.special import cbrt
def Gtilde(x):
"""
AKP10 Eq. D7
Factor ~2 performance gain in using cbrt(x)**n vs x**(n/3.)
Invoking crbt only once reduced time by ~40%
"""
cb = cbrt(x)
gt1 = 1.808 * cb / np.sqrt(1 + 3.4 * cb ** 2.0)
gt2 = 1 + 2.210 * cb ** 2.0 + 0.347 * cb ** 4.0
gt3 = 1 + 1.353 * cb ** 2.0 + 0.217 * cb ** 4.0
return gt1 * (gt2 / gt3) * np.exp(-x)
log.debug("calc_sy: Starting synchrotron computation with AKB2010...")
# strip units, ensuring correct conversion
# astropy units do not convert correctly for gyroradius calculation
# when using cgs (SI is fine, see
# https://github.com/astropy/astropy/issues/1687)
CS1_0 = np.sqrt(3) * e.value ** 3 * self.B.to("G").value
CS1_1 = (
2
* np.pi
* m_e.cgs.value
* c.cgs.value ** 2
* hbar.cgs.value
* outspecene.to("erg").value
)
CS1 = CS1_0 / CS1_1
# Critical energy, erg
Ec = (
3
* e.value
* hbar.cgs.value
* self.B.to("G").value
* self._gam ** 2
)
Ec /= 2 * (m_e * c).cgs.value
EgEc = outspecene.to("erg").value / np.vstack(Ec)
dNdE = CS1 * Gtilde(EgEc)
# return units
spec = (
trapz_loglog(np.vstack(self._nelec) * dNdE, self._gam, axis=0)
/ u.s
/ u.erg
)
spec = spec.to("1/(s eV)")
return spec | [
"def",
"_spectrum",
"(",
"self",
",",
"photon_energy",
")",
":",
"outspecene",
"=",
"_validate_ene",
"(",
"photon_energy",
")",
"from",
"scipy",
".",
"special",
"import",
"cbrt",
"def",
"Gtilde",
"(",
"x",
")",
":",
"\"\"\"\n AKP10 Eq. D7\n\n ... | 30.724638 | 20.623188 |
def call(name, function, *args, **kwargs):
'''
Executes a Salt function inside a running container
.. versionadded:: 2016.11.0
The container does not need to have Salt installed, but Python is required.
name
Container name or ID
function
Salt execution module function
CLI Example:
.. code-block:: bash
salt myminion docker.call test.ping
salt myminion test.arg arg1 arg2 key1=val1
salt myminion dockerng.call compassionate_mirzakhani test.arg arg1 arg2 key1=val1
'''
# where to put the salt-thin
thin_dest_path = _generate_tmp_path()
mkdirp_thin_argv = ['mkdir', '-p', thin_dest_path]
# make thin_dest_path in the container
ret = run_all(name, subprocess.list2cmdline(mkdirp_thin_argv))
if ret['retcode'] != 0:
return {'result': False, 'comment': ret['stderr']}
if function is None:
raise CommandExecutionError('Missing function parameter')
# move salt into the container
thin_path = __utils__['thin.gen_thin'](
__opts__['cachedir'],
extra_mods=__salt__['config.option']("thin_extra_mods", ''),
so_mods=__salt__['config.option']("thin_so_mods", '')
)
ret = copy_to(name, thin_path, os.path.join(
thin_dest_path, os.path.basename(thin_path)))
# untar archive
untar_cmd = ["python", "-c", (
"import tarfile; "
"tarfile.open(\"{0}/{1}\").extractall(path=\"{0}\")"
).format(thin_dest_path, os.path.basename(thin_path))]
ret = run_all(name, subprocess.list2cmdline(untar_cmd))
if ret['retcode'] != 0:
return {'result': False, 'comment': ret['stderr']}
try:
salt_argv = [
'python{0}'.format(sys.version_info[0]),
os.path.join(thin_dest_path, 'salt-call'),
'--metadata',
'--local',
'--log-file', os.path.join(thin_dest_path, 'log'),
'--cachedir', os.path.join(thin_dest_path, 'cache'),
'--out', 'json',
'-l', 'quiet',
'--',
function
] + list(args) + ['{0}={1}'.format(key, value) for (key, value) in kwargs.items() if not key.startswith('__')]
ret = run_all(name, subprocess.list2cmdline(map(str, salt_argv)))
# python not found
if ret['retcode'] != 0:
raise CommandExecutionError(ret['stderr'])
# process "real" result in stdout
try:
data = __utils__['json.find_json'](ret['stdout'])
local = data.get('local', data)
if isinstance(local, dict):
if 'retcode' in local:
__context__['retcode'] = local['retcode']
return local.get('return', data)
except ValueError:
return {'result': False,
'comment': 'Can\'t parse container command output'}
finally:
# delete the thin dir so that it does not end in the image
rm_thin_argv = ['rm', '-rf', thin_dest_path]
run_all(name, subprocess.list2cmdline(rm_thin_argv)) | [
"def",
"call",
"(",
"name",
",",
"function",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# where to put the salt-thin",
"thin_dest_path",
"=",
"_generate_tmp_path",
"(",
")",
"mkdirp_thin_argv",
"=",
"[",
"'mkdir'",
",",
"'-p'",
",",
"thin_dest_path"... | 34.287356 | 21.528736 |
def _ParseJournalEntry(self, file_object, file_offset):
"""Parses a journal entry.
This method will generate an event per ENTRY object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry object relative to the start
of the file-like object.
Returns:
dict[str, objects]: entry items per key.
Raises:
ParseError: when an object offset is out of bounds.
"""
entry_object = self._ParseEntryObject(file_object, file_offset)
# The data is read separately for performance reasons.
entry_item_map = self._GetDataTypeMap('systemd_journal_entry_item')
file_offset += 64
data_end_offset = file_offset + entry_object.data_size - 64
fields = {'real_time': entry_object.real_time}
while file_offset < data_end_offset:
try:
entry_item, entry_item_data_size = self._ReadStructureFromFileObject(
file_object, file_offset, entry_item_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry item at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
file_offset += entry_item_data_size
if entry_item.object_offset < self._maximum_journal_file_offset:
raise errors.ParseError(
'object offset should be after hash tables ({0:d} < {1:d})'.format(
entry_item.object_offset, self._maximum_journal_file_offset))
event_data = self._ParseDataObject(file_object, entry_item.object_offset)
event_string = event_data.decode('utf-8')
key, value = event_string.split('=', 1)
fields[key] = value
return fields | [
"def",
"_ParseJournalEntry",
"(",
"self",
",",
"file_object",
",",
"file_offset",
")",
":",
"entry_object",
"=",
"self",
".",
"_ParseEntryObject",
"(",
"file_object",
",",
"file_offset",
")",
"# The data is read separately for performance reasons.",
"entry_item_map",
"=",... | 35.041667 | 24.104167 |
def update_free_shipping_by_id(cls, free_shipping_id, free_shipping, **kwargs):
"""Update FreeShipping
Update attributes of FreeShipping
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_free_shipping_by_id(free_shipping_id, free_shipping, async=True)
>>> result = thread.get()
:param async bool
:param str free_shipping_id: ID of freeShipping to update. (required)
:param FreeShipping free_shipping: Attributes of freeShipping to update. (required)
:return: FreeShipping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs)
else:
(data) = cls._update_free_shipping_by_id_with_http_info(free_shipping_id, free_shipping, **kwargs)
return data | [
"def",
"update_free_shipping_by_id",
"(",
"cls",
",",
"free_shipping_id",
",",
"free_shipping",
",",
"*",
"*",
"kwargs",
")",
":",
"kwargs",
"[",
"'_return_http_data_only'",
"]",
"=",
"True",
"if",
"kwargs",
".",
"get",
"(",
"'async'",
")",
":",
"return",
"c... | 48.909091 | 25.772727 |
def download_object(self, object_name):
"""
Download an object.
:param str object_name: The object to fetch.
"""
return self._client.download_object(
self._instance, self.name, object_name) | [
"def",
"download_object",
"(",
"self",
",",
"object_name",
")",
":",
"return",
"self",
".",
"_client",
".",
"download_object",
"(",
"self",
".",
"_instance",
",",
"self",
".",
"name",
",",
"object_name",
")"
] | 29.375 | 10.125 |
def plot_circular(widths, colors, curviness=0.2, mask=True, topo=None, topomaps=None, axes=None, order=None):
"""Circluar connectivity plot.
Topos are arranged in a circle, with arrows indicating connectivity
Parameters
----------
widths : float or array, shape (n_channels, n_channels)
Width of each arrow. Can be a scalar to assign the same width to all arrows.
colors : array, shape (n_channels, n_channels, 3) or (3)
RGB color values for each arrow or one RGB color value for all arrows.
curviness : float, optional
Factor that determines how much arrows tend to deviate from a straight line.
mask : array, dtype = bool, shape (n_channels, n_channels)
Enable or disable individual arrows
topo : :class:`~eegtopo.topoplot.Topoplot`
This object draws the topo plot
topomaps : array, shape = [w_pixels, h_pixels]
Scalp-projected map
axes : axis, optional
Axis to draw into. A new figure is created by default.
order : list of int
Rearrange channels.
Returns
-------
axes : Axes object
The axes into which was plotted.
"""
colors = np.asarray(colors)
widths = np.asarray(widths)
mask = np.asarray(mask)
colors = np.maximum(colors, 0)
colors = np.minimum(colors, 1)
if len(widths.shape) > 2:
[n, m] = widths.shape
elif len(colors.shape) > 3:
[n, m, c] = widths.shape
elif len(mask.shape) > 2:
[n, m] = mask.shape
else:
n = len(topomaps)
m = n
if not order:
order = list(range(n))
#a = np.asarray(a)
#[n, m] = a.shape
assert(n == m)
if axes is None:
fig = new_figure()
axes = fig.add_subplot(111)
axes.set_yticks([])
axes.set_xticks([])
axes.set_frame_on(False)
if len(colors.shape) < 3:
colors = np.tile(colors, (n,n,1))
if len(widths.shape) < 2:
widths = np.tile(widths, (n,n))
if len(mask.shape) < 2:
mask = np.tile(mask, (n,n))
np.fill_diagonal(mask, False)
if topo:
alpha = 1.5 if n < 10 else 1.25
r = alpha * topo.head_radius / (np.sin(np.pi/n))
else:
r = 1
for i in range(n):
if topo:
o = (r*np.sin(i*2*np.pi/n), r*np.cos(i*2*np.pi/n))
plot_topo(axes, topo, topomaps[order[i]], offset=o)
for i in range(n):
for j in range(n):
if not mask[order[i], order[j]]:
continue
a0 = j*2*np.pi/n
a1 = i*2*np.pi/n
x0, y0 = r*np.sin(a0), r*np.cos(a0)
x1, y1 = r*np.sin(a1), r*np.cos(a1)
ex = (x0 + x1) / 2
ey = (y0 + y1) / 2
en = np.sqrt(ex**2 + ey**2)
if en < 1e-10:
en = 0
ex = y0 / r
ey = -x0 / r
w = -r
else:
ex /= en
ey /= en
w = np.sqrt((x1-x0)**2 + (y1-y0)**2) / 2
if x0*y1-y0*x1 < 0:
w = -w
d = en*(1-curviness)
h = en-d
t = np.linspace(-1, 1, 100)
dist = (t**2+2*t+1)*w**2 + (t**4-2*t**2+1)*h**2
tmask1 = dist >= (1.4*topo.head_radius)**2
tmask2 = dist >= (1.2*topo.head_radius)**2
tmask = np.logical_and(tmask1, tmask2[::-1])
t = t[tmask]
x = (h*t*t+d)*ex - w*t*ey
y = (h*t*t+d)*ey + w*t*ex
# Arrow Head
s = np.sqrt((x[-2] - x[-1])**2 + (y[-2] - y[-1])**2)
width = widths[order[i], order[j]]
x1 = 0.1*width*(x[-2] - x[-1] + y[-2] - y[-1])/s + x[-1]
y1 = 0.1*width*(y[-2] - y[-1] - x[-2] + x[-1])/s + y[-1]
x2 = 0.1*width*(x[-2] - x[-1] - y[-2] + y[-1])/s + x[-1]
y2 = 0.1*width*(y[-2] - y[-1] + x[-2] - x[-1])/s + y[-1]
x = np.concatenate([x, [x1, x[-1], x2]])
y = np.concatenate([y, [y1, y[-1], y2]])
axes.plot(x, y, lw=width, color=colors[order[i], order[j]], solid_capstyle='round', solid_joinstyle='round')
return axes | [
"def",
"plot_circular",
"(",
"widths",
",",
"colors",
",",
"curviness",
"=",
"0.2",
",",
"mask",
"=",
"True",
",",
"topo",
"=",
"None",
",",
"topomaps",
"=",
"None",
",",
"axes",
"=",
"None",
",",
"order",
"=",
"None",
")",
":",
"colors",
"=",
"np"... | 28.792857 | 21.4 |
def ManagerMock(manager, *return_value):
"""
Set the results to two items:
>>> objects = ManagerMock(Post.objects, 'queryset', 'result')
>>> assert objects.filter() == objects.all()
Force an exception:
>>> objects = ManagerMock(Post.objects, Exception())
See QuerySetMock for more about how this works.
"""
def make_get_query_set(self, model):
def _get(*a, **k):
return QuerySetMock(model, *return_value)
return _get
actual_model = getattr(manager, 'model', None)
if actual_model:
model = mock.MagicMock(spec=actual_model())
else:
model = mock.MagicMock()
m = SharedMock()
m.model = model
m.get_query_set = make_get_query_set(m, actual_model)
m.get = m.get_query_set().get
m.count = m.get_query_set().count
m.exists = m.get_query_set().exists
m.__iter__ = m.get_query_set().__iter__
m.__getitem__ = m.get_query_set().__getitem__
return m | [
"def",
"ManagerMock",
"(",
"manager",
",",
"*",
"return_value",
")",
":",
"def",
"make_get_query_set",
"(",
"self",
",",
"model",
")",
":",
"def",
"_get",
"(",
"*",
"a",
",",
"*",
"*",
"k",
")",
":",
"return",
"QuerySetMock",
"(",
"model",
",",
"*",
... | 27.617647 | 17.676471 |
def add(self, key, value):
"""
Method to accept a list of values and append to flat list.
QueryDict.appendlist(), if given a list, will append the list,
which creates nested lists. In most cases, we want to be able
to pass in a list (for convenience) but have it appended into
a flattened list.
TODO: Possibly throw an error if add() is used on a non-list param.
"""
if isinstance(value, list):
for val in value:
self.appendlist(key, val)
else:
self.appendlist(key, value) | [
"def",
"add",
"(",
"self",
",",
"key",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"list",
")",
":",
"for",
"val",
"in",
"value",
":",
"self",
".",
"appendlist",
"(",
"key",
",",
"val",
")",
"else",
":",
"self",
".",
"appendlis... | 41.357143 | 15.928571 |
def run_main(args: argparse.Namespace, do_exit=True) -> None:
"""Runs the checks and exits.
To extend this tool, use this function and set do_exit to False
to get returned the status code.
"""
if args.init:
generate()
return None # exit after generate instead of starting to lint
handler = CheckHandler(
file=args.config_file, out_json=args.json, files=args.files)
for style in get_stylers():
handler.run_linter(style())
for linter in get_linters():
handler.run_linter(linter())
for security in get_security():
handler.run_linter(security())
for tool in get_tools():
tool = tool()
# Only run pypi if everything else passed
if tool.name == "pypi" and handler.status_code != 0:
continue
handler.run_linter(tool)
if do_exit:
handler.exit()
return handler.status_code | [
"def",
"run_main",
"(",
"args",
":",
"argparse",
".",
"Namespace",
",",
"do_exit",
"=",
"True",
")",
"->",
"None",
":",
"if",
"args",
".",
"init",
":",
"generate",
"(",
")",
"return",
"None",
"# exit after generate instead of starting to lint",
"handler",
"=",... | 26.117647 | 20.647059 |
def parse_duration(duration, timestamp=None):
"""
Interprets a ISO8601 duration value relative to a given timestamp.
:param duration: The duration, as a string.
:type: string
:param timestamp: The unix timestamp we should apply the duration to.
Optional, default to the current time.
:type: string
:return: The new timestamp, after the duration is applied.
:rtype: int
"""
assert isinstance(duration, basestring)
assert timestamp is None or isinstance(timestamp, int)
timedelta = duration_parser(duration)
if timestamp is None:
data = datetime.utcnow() + timedelta
else:
data = datetime.utcfromtimestamp(timestamp) + timedelta
return calendar.timegm(data.utctimetuple()) | [
"def",
"parse_duration",
"(",
"duration",
",",
"timestamp",
"=",
"None",
")",
":",
"assert",
"isinstance",
"(",
"duration",
",",
"basestring",
")",
"assert",
"timestamp",
"is",
"None",
"or",
"isinstance",
"(",
"timestamp",
",",
"int",
")",
"timedelta",
"=",
... | 35.73913 | 20.608696 |
def get_badge(self):
"""
The related ``Badge`` object.
"""
try:
obj = Badge.objects.using(self.db_read).get(slug=self.slug)
logger.debug('✓ Badge %s: fetched from db (%s)', obj.slug, self.db_read)
except Badge.DoesNotExist:
obj = None
return obj | [
"def",
"get_badge",
"(",
"self",
")",
":",
"try",
":",
"obj",
"=",
"Badge",
".",
"objects",
".",
"using",
"(",
"self",
".",
"db_read",
")",
".",
"get",
"(",
"slug",
"=",
"self",
".",
"slug",
")",
"logger",
".",
"debug",
"(",
"'✓ Badge %s: fetched fro... | 32 | 17.2 |
def get_speed_steering(self, steering, speed):
"""
Calculate the speed_sp for each motor in a pair to achieve the specified
steering. Note that calling this function alone will not make the
motors move, it only calculates the speed. A run_* function must be called
afterwards to make the motors move.
steering [-100, 100]:
* -100 means turn left on the spot (right motor at 100% forward, left motor at 100% backward),
* 0 means drive in a straight line, and
* 100 means turn right on the spot (left motor at 100% forward, right motor at 100% backward).
speed:
The speed that should be applied to the outmost motor (the one
rotating faster). The speed of the other motor will be computed
automatically.
"""
assert steering >= -100 and steering <= 100,\
"{} is an invalid steering, must be between -100 and 100 (inclusive)".format(steering)
# We don't have a good way to make this generic for the pair... so we
# assume that the left motor's speed stats are the same as the right
# motor's.
speed = self.left_motor._speed_native_units(speed)
left_speed = speed
right_speed = speed
speed_factor = (50 - abs(float(steering))) / 50
if steering >= 0:
right_speed *= speed_factor
else:
left_speed *= speed_factor
return (left_speed, right_speed) | [
"def",
"get_speed_steering",
"(",
"self",
",",
"steering",
",",
"speed",
")",
":",
"assert",
"steering",
">=",
"-",
"100",
"and",
"steering",
"<=",
"100",
",",
"\"{} is an invalid steering, must be between -100 and 100 (inclusive)\"",
".",
"format",
"(",
"steering",
... | 42.114286 | 25.771429 |
def render_koji(self):
"""
if there is yum repo specified, don't pick stuff from koji
"""
phase = 'prebuild_plugins'
plugin = 'koji'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.yum_repourls.value:
logger.info("removing koji from request "
"because there is yum repo specified")
self.dj.remove_plugin(phase, plugin)
elif not (self.spec.koji_target.value and
self.spec.kojiroot.value and
self.spec.kojihub.value):
logger.info("removing koji from request as not specified")
self.dj.remove_plugin(phase, plugin)
else:
self.dj.dock_json_set_arg(phase, plugin,
"target", self.spec.koji_target.value)
self.dj.dock_json_set_arg(phase, plugin,
"root", self.spec.kojiroot.value)
self.dj.dock_json_set_arg(phase, plugin,
"hub", self.spec.kojihub.value)
if self.spec.proxy.value:
self.dj.dock_json_set_arg(phase, plugin,
"proxy", self.spec.proxy.value) | [
"def",
"render_koji",
"(",
"self",
")",
":",
"phase",
"=",
"'prebuild_plugins'",
"plugin",
"=",
"'koji'",
"if",
"not",
"self",
".",
"dj",
".",
"dock_json_has_plugin_conf",
"(",
"phase",
",",
"plugin",
")",
":",
"return",
"if",
"self",
".",
"spec",
".",
"... | 44.964286 | 16.535714 |
def _split_list(cls, items, separator=",", last_separator=" and "):
"""
Splits a string listing elements into an actual list.
Parameters
----------
items: :class:`str`
A string listing elements.
separator: :class:`str`
The separator between each item. A comma by default.
last_separator: :class:`str`
The separator used for the last item. ' and ' by default.
Returns
-------
:class:`list` of :class:`str`
A list containing each one of the items.
"""
if items is None:
return None
items = items.split(separator)
last_item = items[-1]
last_split = last_item.split(last_separator)
if len(last_split) > 1:
items[-1] = last_split[0]
items.append(last_split[1])
return [e.strip() for e in items] | [
"def",
"_split_list",
"(",
"cls",
",",
"items",
",",
"separator",
"=",
"\",\"",
",",
"last_separator",
"=",
"\" and \"",
")",
":",
"if",
"items",
"is",
"None",
":",
"return",
"None",
"items",
"=",
"items",
".",
"split",
"(",
"separator",
")",
"last_item"... | 32.777778 | 14.407407 |
def cacheLock(self):
"""
This is a context manager to acquire a lock on the Lock file that will be used to
prevent synchronous cache operations between workers.
:yields: File descriptor for cache lock file in w mode
"""
cacheLockFile = open(self.cacheLockFile, 'w')
try:
flock(cacheLockFile, LOCK_EX)
logger.debug("CACHE: Obtained lock on file %s" % self.cacheLockFile)
yield cacheLockFile
except IOError:
logger.critical('CACHE: Unable to acquire lock on %s' % self.cacheLockFile)
raise
finally:
cacheLockFile.close()
logger.debug("CACHE: Released lock") | [
"def",
"cacheLock",
"(",
"self",
")",
":",
"cacheLockFile",
"=",
"open",
"(",
"self",
".",
"cacheLockFile",
",",
"'w'",
")",
"try",
":",
"flock",
"(",
"cacheLockFile",
",",
"LOCK_EX",
")",
"logger",
".",
"debug",
"(",
"\"CACHE: Obtained lock on file %s\"",
"... | 40.882353 | 19.352941 |
def reset(self, new_damping=None):
"""
Keeps all user supplied options the same, but resets counters etc.
"""
self._num_iter = 0
self._inner_run_counter = 0
self._J_update_counter = self.update_J_frequency
self._fresh_JTJ = False
self._has_run = False
if new_damping is not None:
self.damping = np.array(new_damping).astype('float')
self._set_err_paramvals() | [
"def",
"reset",
"(",
"self",
",",
"new_damping",
"=",
"None",
")",
":",
"self",
".",
"_num_iter",
"=",
"0",
"self",
".",
"_inner_run_counter",
"=",
"0",
"self",
".",
"_J_update_counter",
"=",
"self",
".",
"update_J_frequency",
"self",
".",
"_fresh_JTJ",
"=... | 36.583333 | 10.916667 |
def rank_motifs(stats, metrics=("roc_auc", "recall_at_fdr")):
"""Determine mean rank of motifs based on metrics."""
rank = {}
combined_metrics = []
motif_ids = stats.keys()
background = list(stats.values())[0].keys()
for metric in metrics:
mean_metric_stats = [np.mean(
[stats[m][bg][metric] for bg in background]) for m in motif_ids]
ranked_metric_stats = rankdata(mean_metric_stats)
combined_metrics.append(ranked_metric_stats)
for motif, val in zip(motif_ids, np.mean(combined_metrics, 0)):
rank[motif] = val
return rank | [
"def",
"rank_motifs",
"(",
"stats",
",",
"metrics",
"=",
"(",
"\"roc_auc\"",
",",
"\"recall_at_fdr\"",
")",
")",
":",
"rank",
"=",
"{",
"}",
"combined_metrics",
"=",
"[",
"]",
"motif_ids",
"=",
"stats",
".",
"keys",
"(",
")",
"background",
"=",
"list",
... | 36.875 | 19.1875 |
def random_mixed_actions(nums_actions, random_state=None):
"""
Return a tuple of random mixed actions (vectors of floats).
Parameters
----------
nums_actions : tuple(int)
Tuple of the numbers of actions, one for each player.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to set
the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState is
used.
Returns
-------
action_profile : tuple(ndarray(float, ndim=1))
Tuple of mixed_actions, one for each player.
"""
random_state = check_random_state(random_state)
action_profile = tuple(
[probvec(1, num_actions, random_state).ravel()
for num_actions in nums_actions]
)
return action_profile | [
"def",
"random_mixed_actions",
"(",
"nums_actions",
",",
"random_state",
"=",
"None",
")",
":",
"random_state",
"=",
"check_random_state",
"(",
"random_state",
")",
"action_profile",
"=",
"tuple",
"(",
"[",
"probvec",
"(",
"1",
",",
"num_actions",
",",
"random_s... | 31.518519 | 21.444444 |
def delete(config, username, type):
"""Delete an LDAP user."""
client = Client()
client.prepare_connection()
user_api = API(client)
user_api.delete(username, type) | [
"def",
"delete",
"(",
"config",
",",
"username",
",",
"type",
")",
":",
"client",
"=",
"Client",
"(",
")",
"client",
".",
"prepare_connection",
"(",
")",
"user_api",
"=",
"API",
"(",
"client",
")",
"user_api",
".",
"delete",
"(",
"username",
",",
"type... | 33 | 6 |
def send_data(self):
"""Send data packets from the local file to the server"""
if not self.connection._sock:
raise err.InterfaceError("(0, '')")
conn = self.connection
try:
with open(self.filename, 'rb') as open_file:
packet_size = min(conn.max_allowed_packet, 16*1024) # 16KB is efficient enough
while True:
chunk = open_file.read(packet_size)
if not chunk:
break
conn.write_packet(chunk)
except IOError:
raise err.OperationalError(1017, "Can't find file '{0}'".format(self.filename))
finally:
# send the empty packet to signify we are done sending data
conn.write_packet(b'') | [
"def",
"send_data",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"connection",
".",
"_sock",
":",
"raise",
"err",
".",
"InterfaceError",
"(",
"\"(0, '')\"",
")",
"conn",
"=",
"self",
".",
"connection",
"try",
":",
"with",
"open",
"(",
"self",
".",
... | 41.315789 | 18.842105 |
def write(self, offset, data):
"""Write a string of bytes to the specified `offset` in bytes, relative
to the base physical address of the MMIO region.
Args:
offset (int, long): offset from base physical address, in bytes.
data (bytes, bytearray, list): a byte array or list of 8-bit
integers to write.
Raises:
TypeError: if `offset` or `data` type are invalid.
ValueError: if `offset` is out of bounds, or if data is not valid bytes.
"""
if not isinstance(offset, (int, long)):
raise TypeError("Invalid offset type, should be integer.")
if not isinstance(data, (bytes, bytearray, list)):
raise TypeError("Invalid data type, expected bytes, bytearray, or list.")
offset = self._adjust_offset(offset)
self._validate_offset(offset, len(data))
data = bytes(bytearray(data))
self.mapping[offset:offset + len(data)] = data | [
"def",
"write",
"(",
"self",
",",
"offset",
",",
"data",
")",
":",
"if",
"not",
"isinstance",
"(",
"offset",
",",
"(",
"int",
",",
"long",
")",
")",
":",
"raise",
"TypeError",
"(",
"\"Invalid offset type, should be integer.\"",
")",
"if",
"not",
"isinstanc... | 41.75 | 23.416667 |
def disconnected(self, client):
"""Call this method when a client disconnected."""
if client not in self.clients:
# already disconnected.
return
self.clients.remove(client)
self._log_disconnected(client)
self._close(client) | [
"def",
"disconnected",
"(",
"self",
",",
"client",
")",
":",
"if",
"client",
"not",
"in",
"self",
".",
"clients",
":",
"# already disconnected.",
"return",
"self",
".",
"clients",
".",
"remove",
"(",
"client",
")",
"self",
".",
"_log_disconnected",
"(",
"c... | 35 | 7.25 |
def from_ssl_socket(cls, ssl_socket):
"""Load certificate data from an SSL socket.
"""
cert = cls()
try:
data = ssl_socket.getpeercert()
except AttributeError:
# PyPy doesn't have .getppercert
return cert
logger.debug("Certificate data from ssl module: {0!r}".format(data))
if not data:
return cert
cert.validated = True
cert.subject_name = data.get('subject')
cert.alt_names = defaultdict(list)
if 'subjectAltName' in data:
for name, value in data['subjectAltName']:
cert.alt_names[name].append(value)
if 'notAfter' in data:
tstamp = ssl.cert_time_to_seconds(data['notAfter'])
cert.not_after = datetime.utcfromtimestamp(tstamp)
if sys.version_info.major < 3:
cert._decode_names() # pylint: disable=W0212
cert.common_names = []
if cert.subject_name:
for part in cert.subject_name:
for name, value in part:
if name == 'commonName':
cert.common_names.append(value)
return cert | [
"def",
"from_ssl_socket",
"(",
"cls",
",",
"ssl_socket",
")",
":",
"cert",
"=",
"cls",
"(",
")",
"try",
":",
"data",
"=",
"ssl_socket",
".",
"getpeercert",
"(",
")",
"except",
"AttributeError",
":",
"# PyPy doesn't have .getppercert",
"return",
"cert",
"logger... | 38.566667 | 11.4 |
def info(self):
"""Returns the name and version of the current shell"""
proc = Popen(['zsh', '-c', 'echo $ZSH_VERSION'],
stdout=PIPE, stderr=DEVNULL)
version = proc.stdout.read().decode('utf-8').strip()
return u'ZSH {}'.format(version) | [
"def",
"info",
"(",
"self",
")",
":",
"proc",
"=",
"Popen",
"(",
"[",
"'zsh'",
",",
"'-c'",
",",
"'echo $ZSH_VERSION'",
"]",
",",
"stdout",
"=",
"PIPE",
",",
"stderr",
"=",
"DEVNULL",
")",
"version",
"=",
"proc",
".",
"stdout",
".",
"read",
"(",
")... | 47.166667 | 11.666667 |
def put(self, transfer_id, amount, created_timestamp, receipt):
"""
:param transfer_id: int of the account_id to deposit the money to
:param amount: float of the amount to transfer
:param created_timestamp: str of the validated receipt that money has been received
:param receipt: str of the receipt
:return: Transfer dict
"""
return self.connection.put('account/transfer/claim',
data=dict(transfer_id=transfer_id,
amount=amount,
created_timestamp=created_timestamp,
receipt=receipt)) | [
"def",
"put",
"(",
"self",
",",
"transfer_id",
",",
"amount",
",",
"created_timestamp",
",",
"receipt",
")",
":",
"return",
"self",
".",
"connection",
".",
"put",
"(",
"'account/transfer/claim'",
",",
"data",
"=",
"dict",
"(",
"transfer_id",
"=",
"transfer_i... | 58.076923 | 22.538462 |
def enumerate_zones(self):
""" Return a list of (zone_id, zone_name) tuples """
zones = []
for controller in range(1, 8):
for zone in range(1, 17):
zone_id = ZoneID(zone, controller)
try:
name = yield from self.get_zone_variable(zone_id, 'name')
if name:
zones.append((zone_id, name))
except CommandException:
break
return zones | [
"def",
"enumerate_zones",
"(",
"self",
")",
":",
"zones",
"=",
"[",
"]",
"for",
"controller",
"in",
"range",
"(",
"1",
",",
"8",
")",
":",
"for",
"zone",
"in",
"range",
"(",
"1",
",",
"17",
")",
":",
"zone_id",
"=",
"ZoneID",
"(",
"zone",
",",
... | 37.846154 | 12.923077 |
def check_cdims(cls, ops, kwargs):
"""Check that all operands (`ops`) have equal channel dimension."""
if not len({o.cdim for o in ops}) == 1:
raise ValueError("Not all operands have the same cdim:" + str(ops))
return ops, kwargs | [
"def",
"check_cdims",
"(",
"cls",
",",
"ops",
",",
"kwargs",
")",
":",
"if",
"not",
"len",
"(",
"{",
"o",
".",
"cdim",
"for",
"o",
"in",
"ops",
"}",
")",
"==",
"1",
":",
"raise",
"ValueError",
"(",
"\"Not all operands have the same cdim:\"",
"+",
"str"... | 49 | 12.4 |
def copy(self, request, **kwargs):
# pylint: disable=unused-argument
'''
Copy instance with deps.
'''
instance = self.copy_instance(self.get_object())
serializer = self.get_serializer(instance, data=request.data, partial=True)
serializer.is_valid()
serializer.save()
return Response(serializer.data, status.HTTP_201_CREATED).resp | [
"def",
"copy",
"(",
"self",
",",
"request",
",",
"*",
"*",
"kwargs",
")",
":",
"# pylint: disable=unused-argument",
"instance",
"=",
"self",
".",
"copy_instance",
"(",
"self",
".",
"get_object",
"(",
")",
")",
"serializer",
"=",
"self",
".",
"get_serializer"... | 39.2 | 18.8 |
def limit_text_to_be_path_element(text, max_length=None, separator='_'):
""" Replace characters that are not in the valid character set of RAFCON.
:param text: the string to be cleaned
:param max_length: the maximum length of the output string
:param separator: the separator used for rafcon.core.storage.storage.limit_text_max_length
:return:
"""
# TODO: Should there not only be one method i.e. either this one or "clean_path_element"
elements_to_replace = {' ': '_', '*': '_'}
for elem, replace_with in elements_to_replace.items():
text = text.replace(elem, replace_with)
text = re.sub('[^a-zA-Z0-9-_]', '', text)
if max_length is not None:
text = limit_text_max_length(text, max_length, separator)
return text | [
"def",
"limit_text_to_be_path_element",
"(",
"text",
",",
"max_length",
"=",
"None",
",",
"separator",
"=",
"'_'",
")",
":",
"# TODO: Should there not only be one method i.e. either this one or \"clean_path_element\"",
"elements_to_replace",
"=",
"{",
"' '",
":",
"'_'",
","... | 47.6875 | 20.3125 |
def set_bpf_filter_on_all_devices(filterstr):
'''
Long method name, but self-explanatory. Set the bpf
filter on all devices that have been opened.
'''
with PcapLiveDevice._lock:
for dev in PcapLiveDevice._OpenDevices.values():
_PcapFfi.instance()._set_filter(dev, filterstr) | [
"def",
"set_bpf_filter_on_all_devices",
"(",
"filterstr",
")",
":",
"with",
"PcapLiveDevice",
".",
"_lock",
":",
"for",
"dev",
"in",
"PcapLiveDevice",
".",
"_OpenDevices",
".",
"values",
"(",
")",
":",
"_PcapFfi",
".",
"instance",
"(",
")",
".",
"_set_filter",... | 42 | 18 |
def storage_factory(storage_service, trajectory=None, **kwargs):
"""Creates a storage service, to be extended if new storage services are added
:param storage_service:
Storage Service instance of constructor or a string pointing to a file
:param trajectory:
A trajectory instance
:param kwargs:
Arguments passed to the storage service
:return:
A storage service and a set of not used keyword arguments from kwargs
"""
if 'filename' in kwargs and storage_service is None:
filename = kwargs['filename']
_, ext = os.path.splitext(filename)
if ext in ('.hdf', '.h4', '.hdf4', '.he2', '.h5', '.hdf5', '.he5'):
storage_service = HDF5StorageService
else:
raise ValueError('Extension `%s` of filename `%s` not understood.' %
(ext, filename))
elif isinstance(storage_service, str):
class_name = storage_service.split('.')[-1]
storage_service = create_class(class_name, [storage_service, HDF5StorageService])
if inspect.isclass(storage_service):
return _create_storage(storage_service, trajectory, **kwargs)
else:
return storage_service, set(kwargs.keys()) | [
"def",
"storage_factory",
"(",
"storage_service",
",",
"trajectory",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'filename'",
"in",
"kwargs",
"and",
"storage_service",
"is",
"None",
":",
"filename",
"=",
"kwargs",
"[",
"'filename'",
"]",
"_",
",... | 32.702703 | 24.513514 |
def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with IPython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError("Class of magics %r was constructed without "
"the @register_magics class decorator")
if type(m) in (type, MetaHasTraits):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype]) | [
"def",
"register",
"(",
"self",
",",
"*",
"magic_objects",
")",
":",
"# Start by validating them to ensure they have all had their magic",
"# methods registered at the instance level",
"for",
"m",
"in",
"magic_objects",
":",
"if",
"not",
"m",
".",
"registered",
":",
"rais... | 45.942857 | 23.4 |
def __parse(self):
"""
Parse
Accept the text file. We'll open it, read it, and return a compiled dictionary to write to a json file
May write a chronology CSV and a data CSV if those sections are available
:return:
"""
logger_noaa_lpd.info("enter parse")
# Strings
missing_str = ''
data_filename = ''
# Counters
grant_id = 0
funding_id = 0
data_col_ct = 1
line_num = 0
# Boolean markers
description_on = False
publication_on = False
abstract_on = False
site_info_on = False
chronology_on = False
chron_vals_on = False
variables_on = False
data_vals_on = False
data_on = False
# Lists
lat = []
lon = []
elev = []
pub = []
funding = []
temp_abstract = []
temp_description = []
data_var_names = []
data_col_list = []
data_tables = []
# All dictionaries needed to create JSON structure
temp_funding = OrderedDict()
temp_pub = OrderedDict()
core_len = OrderedDict()
geo_properties = OrderedDict()
chron_dict = OrderedDict()
data_dict_upper = OrderedDict()
final_dict = OrderedDict()
try:
# Open the text file in read mode. We'll read one line at a time until EOF
with open(self.filename_txt, 'r') as f:
logger_noaa_lpd.info("opened noaa file: {}".format(self.filename_txt))
for line in iter(f):
line_num += 1
# PUBLICATION
# There can be multiple publications. Create a dictionary for each one.
if publication_on:
# End of the section. Add the dictionary for this one publication to the overall list
if '-----' in line:
temp_pub = self.__reorganize_doi(temp_pub)
pub.append(temp_pub.copy())
temp_abstract.clear()
temp_pub.clear()
publication_on = False
logger_noaa_lpd.info("end section: Publication")
elif abstract_on:
# End of abstract: possibly more variables after.
if "#" in line:
abstract_on = False
temp_pub['abstract'] = ''.join(temp_abstract)
logger_noaa_lpd.info("end section: Abstract")
line = self.__str_cleanup(line)
key, value = self.__slice_key_val(line)
temp_pub[self.__camel_case(key)] = value
else:
temp_abstract.append(self.__str_cleanup(line))
# Add all info into the current publication dictionary
else:
line = self.__str_cleanup(line)
key, value = self.__slice_key_val(line)
if key in ("Author", "Authors"):
temp_pub["author"] = self.__reorganize_authors(value)
else:
temp_pub[self.__camel_case(key)] = value
if key == 'Abstract':
logger_noaa_lpd.info("reading section: Abstract")
abstract_on = True
temp_abstract.append(value)
# DESCRIPTION AND NOTES
# Descriptions are often long paragraphs spanning multiple lines, but don't follow the key/value format
elif description_on:
# End of the section. Turn marker off and combine all the lines in the section
if '-------' in line:
description_on = False
value = ''.join(temp_description)
final_dict['description'] = value
logger_noaa_lpd.info("end section: Description_and_Notes")
# The first line in the section. Split into key, value
elif 'Description:' in line:
key, val = self.__slice_key_val(line)
temp_description.append(val)
# Keep a running list of all lines in the section
else:
line = self.__str_cleanup(line)
temp_description.append(line)
# SITE INFORMATION (Geo)
elif site_info_on:
if '-------' in line:
site_info_on = False
logger_noaa_lpd.info("end section: Site_Information")
else:
line = self.__str_cleanup(line)
key, value = self.__slice_key_val(line)
if key.lower() in ["northernmost_latitude", "southernmost_latitude"]:
lat.append(self.__convert_num(value))
elif key.lower() in ["easternmost_longitude", "westernmost_longitude"]:
lon.append(self.__convert_num(value))
elif key.lower() in ["site_name", "location", "country", "elevation"]:
if key.lower() == 'elevation':
val, unit = self.__split_name_unit(value)
elev.append(val)
else:
geo_properties[self.__camel_case(key)] = value
# CHRONOLOGY
elif chronology_on:
"""
HOW IT WORKS:
Chronology will be started at "Chronology:" section header
Every line starting with a "#" will be ignored
The first line without a "#" is considered the variable header line. Variable names are parsed.
Each following line will be considered column data and sorted accordingly.
Once the "-----" barrier is reached, we exit the chronology section.
"""
# When reaching the end of the chron section, set the marker to off and close the CSV file
if '-------' in line:
# Turn off markers to exit section
chronology_on = False
chron_vals_on = False
try:
# If nothing between the chronology start and the end barrier, then there won't be a CSV
if chron_start_line != line_num - 1:
try:
chron_csv.close()
logger_noaa_lpd.info("parse: chronology: no data found in chronology section")
except NameError:
logger_noaa_lpd.debug(
"parse: chronology_on: NameError: chron_csv ref before assignment, {}".format(
self.filename_txt))
print(
"Chronology section is incorrectly formatted. "
"Section data will not be converted")
logger_noaa_lpd.info("end section: Chronology")
except NameError:
logger_noaa_lpd.debug(
"parse: chronology_on: NameError: chron_start_line ref before assignment, {}".format(
self.filename_txt))
print("Chronology section is incorrectly formatted. Section data will not be converted")
# Data values line. Split, then write to CSV file
elif chron_vals_on:
values = line.split()
try:
cw.writerow(values)
except NameError:
logger_noaa_lpd.debug(
"parse: chronology_on: NameError: csv writer ref before assignment, {}".format(
self.filename_txt))
print("Chronology section is incorrectly formatted. Section data will not be converted")
else:
try:
# Chron variable headers line
if line and line[0] != "#":
chron_filename = self.dsn + '.chron1.measurementTable1.csv'
# Organize the var header into a dictionary
variables = self.__reorganize_chron_header(line)
# Create a dictionary of info for each column
chron_col_list = self.__create_chron_cols(variables)
chron_dict['filename'] = chron_filename
chron_dict['chronTableName'] = 'Chronology'
chron_dict['columns'] = chron_col_list
# Open CSV for writing
csv_path = os.path.join(self.dir_bag, chron_filename)
chron_csv = open(csv_path, 'w+', newline='')
logger_noaa_lpd.info("opened csv file: {}".format(chron_filename))
cw = csv.writer(chron_csv)
# Turn the marker on to start processing the values columns
chron_vals_on = True
except IndexError:
logger_noaa_lpd.debug("parse: chronology: IndexError when attempting chron var header")
# VARIABLES
elif variables_on:
"""
HOW IT WORKS:
Variable lines are the only lines that have a "##" in front of them.
Ignore all lines that don't match the "##" regex.
Once there's a match, start parsing the variable lines, and create a column entry for each line.
"""
process_line = False
# End of the section. Turn marker off
if "------" in line:
variables_on = False
logger_noaa_lpd.info("end section: Variables")
for item in NOAA_VAR_LINES:
if item.lower() in line.lower():
process_line = False
for item in NOAA_EMPTY:
if item == line:
process_line = False
m = re.match(re_var, line)
if m:
process_line = True
# If the line isn't in the ignore list, then it's a variable line
if process_line:
# Split the line items, and cleanup
cleaned_line = self.__separate_data_vars(line)
# Add the items into a column dictionary
data_col_dict = self.__create_paleo_col(cleaned_line, data_col_ct)
# Keep a list of all variable names
try:
# Use this list later to cross check with the variable line in the Data section
data_var_names.append(data_col_dict['variableName'])
except KeyError:
data_var_names.append('')
logger_noaa_lpd.warn("parse: variables: "
"KeyError: {} not found in {}".format("variableName", "data_col_dict"))
# Add the column dictionary into a final dictionary
data_col_list.append(data_col_dict)
data_col_ct += 1
# DATA
# Missing Value, Create data columns, and output Data CSV
elif data_on:
"""
HOW IT WORKS:
Capture the "Missing Value" entry, if it exists.
Data lines should not have a "#" in front of them.
The first line without a "#" should be the variable header line
All lines that follow should have column data.
"""
# Do not process blank or template lines
process_line = True
for item in NOAA_DATA_LINES:
if item in line:
process_line = False
for item in NOAA_EMPTY:
if item == line:
process_line = False
for item in ALTS_MV:
# Missing value found. Store entry
if item in line.lower():
process_line = False
line = self.__str_cleanup(line)
key, missing_str = self.__slice_key_val(line)
if process_line:
# Split the line at each space (There SHOULD one space between each variable. Not always true)
values = line.split()
# Write all data values to CSV
if data_vals_on:
try:
dw.writerow(values)
except NameError:
logger_noaa_lpd.debug(
"parse: data_on: NameError: csv writer ref before assignment, {}".format(
self.filename_txt))
# Check for the line of variables
else:
var = self.__str_cleanup(values[0].lstrip())
# Check if a variable name is in the current line
if var.lower() in line.lower():
data_vals_on = True
logger_noaa_lpd.info("start section: Data_Values")
# Open CSV for writing
data_filename = "{}.paleoData1.measurementTable1.csv".format(self.dsn)
csv_path = os.path.join(self.dir_bag, data_filename)
data_csv = open(csv_path, 'w+', newline='')
logger_noaa_lpd.info("opened csv file: {}".format(data_filename))
dw = csv.writer(data_csv)
# METADATA
else:
# Line Continuation: Sometimes there are items that span a few lines.
# If this happens, we want to combine them all properly into one entry.
if '#' not in line and line not in NOAA_EMPTY and old_val:
if old_key in ('funding', 'agency'):
try:
temp_funding[old_key] = old_val + line
except KeyError as e:
logger_noaa_lpd.debug(
"parse: metadata: line continuation: {} not found in {}, {}".format(old_key,
"temp_funding",
e))
else:
try:
final_dict[old_key] = old_val + line
except KeyError as e:
logger_noaa_lpd.debug(
"parse: metadata: line continuation: {} not found in {}, {}".format(old_key,
"temp_funding",
e))
# No Line Continuation: This is the start or a new entry
else:
line = self.__str_cleanup(line)
# Grab the key and value from the current line
try:
# Split the line into key, value pieces
key, value = self.__slice_key_val(line)
l_key = key.lower()
cc_key= self.__camel_case(key)
# If there is no value, then we are at a section header.
# Data often has a blank value, so that is a special check.
if not value or l_key == 'data':
# Turn on markers if we run into section headers
if l_key == 'description_and_notes':
description_on = True
logger_noaa_lpd.info("reading section: Description_and_Notes")
elif l_key == 'publication':
publication_on = True
logger_noaa_lpd.info("reading section: Publication")
elif l_key == 'site_information':
site_info_on = True
logger_noaa_lpd.info("reading section: Site_Information")
elif l_key == 'chronology':
chronology_on = True
logger_noaa_lpd.info("reading section: Chronology")
chron_start_line = line_num
elif l_key == 'variables':
variables_on = True
logger_noaa_lpd.info("reading section: Variables")
elif l_key == 'data':
data_on = True
logger_noaa_lpd.info("reading section: Data")
# For all
else:
# Ignore any entries that are specified in the skip list
_ignore = [item.lower() for item in NOAA_KEYS_BY_SECTION["Ignore"]]
if l_key not in _ignore:
# There can be multiple funding agencies and grants. Keep a list of dict entries
_funding = [item.lower() for item in NOAA_KEYS_BY_SECTION["Funding_Agency"]]
if l_key in _funding:
if l_key == 'funding_agency_name':
funding_id += 1
key = 'agency'
elif l_key == 'grant':
grant_id += 1
key = 'grant'
temp_funding[key] = value
# If both counters are matching, we are ready to add content to the funding list
if grant_id == funding_id:
funding.append(temp_funding.copy())
temp_funding.clear()
else:
# There's likely two "Online_Resource"s, and we need both, so check and concat
if cc_key == "onlineResource":
# If it exists, append. If not, add entry as a list
if cc_key in final_dict:
final_dict[cc_key].append(value)
else:
final_dict[cc_key] = [value]
else:
final_dict[cc_key] = value
# Keep track of old key in case we have a line continuation
old_key = key
old_val = value.strip()
except TypeError as e:
logger_noaa_lpd.warn(
"parse: TypeError: none type received from slice_key_val, {}".format(e))
# Wait to close the data CSV until we reached the end of the text file
try:
data_csv.close()
logger_noaa_lpd.info("end section: Data_Values")
logger_noaa_lpd.info("end section: Data")
except NameError as e:
print("Error: NOAA text file is contains format errors. Unable to process.")
logger_noaa_lpd.debug(
"parse: NameError: failed to close csv, invalid formatting in NOAA txt file, {}".format(e))
# Piece together measurements block
logger_noaa_lpd.info("compiling final paleoData")
data_dict_upper['filename'] = data_filename
data_dict_upper['paleoDataTableName'] = 'Data'
data_dict_upper['missingValue'] = missing_str
data_dict_upper['columns'] = data_col_list
data_tables.append(data_dict_upper)
# Piece together geo block
logger_noaa_lpd.info("compiling final geo")
geo = self.__create_coordinates(lat, lon, elev)
geo['properties'] = geo_properties
# Piece together final dictionary
logger_noaa_lpd.info("compiling final master")
final_dict['pub'] = pub
final_dict['funding'] = funding
final_dict['geo'] = geo
final_dict['coreLength'] = core_len
final_dict['chronData'] = [{"chronMeasurementTable": chron_dict}]
final_dict['paleoData'] = data_tables
self.metadata = final_dict
logger_noaa_lpd.info("final dictionary compiled")
# Start cleaning up the metadata
logger_noaa_lpd.info("removing empty fields")
self.metadata = rm_empty_fields(self.metadata)
logger_noaa_lpd.info("removing empty doi")
self.metadata = rm_empty_doi(self.metadata)
logger_noaa_lpd.info("removing irrelevant keys")
self.__remove_irr_fields()
except Exception as e:
logger_noaa_lpd.debug("parse: {}".format(e))
logger_noaa_lpd.info("exit parse")
return | [
"def",
"__parse",
"(",
"self",
")",
":",
"logger_noaa_lpd",
".",
"info",
"(",
"\"enter parse\"",
")",
"# Strings",
"missing_str",
"=",
"''",
"data_filename",
"=",
"''",
"# Counters",
"grant_id",
"=",
"0",
"funding_id",
"=",
"0",
"data_col_ct",
"=",
"1",
"lin... | 54.672686 | 26.889391 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.