code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def enable_tracing(self, thread_trace_func=None):
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
return
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func) | Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread. |
def _CreateFeedMapping(client, feed_details):
feed_mapping_service = client.GetService('FeedMappingService',
version='v201809')
operation = {
'operand': {
'criterionType': DSA_PAGE_FEED_CRITERION_TYPE,
'feedId': feed_details.feed_id,
'attributeFieldMappings': [
{
'feedAttributeId': feed_details.url_attribute_id,
'fieldId': DSA_PAGE_URLS_FIELD_ID
},
{
'feedAttributeId': feed_details.label_attribute_id,
'fieldId': DSA_LABEL_FIELD_ID
}
]
},
'operator': 'ADD'
}
feed_mapping_service.mutate([operation]) | Creates the feed mapping for DSA page feeds.
Args:
client: an AdWordsClient instance.
feed_details: a _DSAFeedDetails instance. |
def add_to_package_numpy(self, root, ndarray, node_path, target, source_path, transform, custom_meta):
filehash = self.save_numpy(ndarray)
metahash = self.save_metadata(custom_meta)
self._add_to_package_contents(root, node_path, [filehash], target, source_path, transform, metahash) | Save a Numpy array to the store. |
def _reconnect(self):
if self.idle or self.closed:
LOGGER.debug('Attempting RabbitMQ reconnect in %s seconds',
self.reconnect_delay)
self.io_loop.call_later(self.reconnect_delay, self.connect)
return
LOGGER.warning('Reconnect called while %s', self.state_description) | Schedule the next connection attempt if the class is not currently
closing. |
def workspace_backup_add(ctx):
backup_manager = WorkspaceBackupManager(Workspace(ctx.resolver, directory=ctx.directory, mets_basename=ctx.mets_basename, automatic_backup=ctx.automatic_backup))
backup_manager.add() | Create a new backup |
def _atomicModification(func):
def wrapper(*args, **kwargs):
self = args[0]
with self._qpart:
func(*args, **kwargs)
return wrapper | Decorator
Make document modification atomic |
def dt_avg(self, print_output=True, output_file="dt_query.csv"):
avg = self.dt.mean(axis=2)
if print_output:
np.savetxt(output_file, avg, delimiter=",")
return avg | Compute average document-topic matrix,
and print to file if print_output=True. |
def get_wrong_answer_ids(self):
id_list = []
for answer in self.get_wrong_answers():
id_list.append(answer.get_id())
return IdList(id_list) | provide this method to return only wrong answer ids |
def parse_inline(self, text):
element_list = self._build_inline_element_list()
return inline_parser.parse(
text, element_list, fallback=self.inline_elements['RawText']
) | Parses text into inline elements.
RawText is not considered in parsing but created as a wrapper of holes
that don't match any other elements.
:param text: the text to be parsed.
:returns: a list of inline elements. |
def _serialize_rules(rules):
result = [(rule_name, str(rule)) for rule_name, rule in rules.items()]
return sorted(result, key=lambda rule: rule[0]) | Serialize all the Rule object as string.
New string is used to compare the rules list. |
def add_selected(self, ):
browser = self.shot_browser if self.browser_tabw.currentIndex() == 1 else self.asset_browser
selelements = browser.selected_indexes(2)
if not selelements:
return
seltypes = browser.selected_indexes(3)
if not seltypes:
return
elementi = selelements[0]
typi = seltypes[0]
if not elementi.isValid() or not typi.isValid():
return
element = elementi.internalPointer().internal_data()
typ = typi.internalPointer().internal_data()[0]
reftrack.Reftrack(self.root, self.refobjinter, typ=typ, element=element) | Create a new reftrack with the selected element and type and add it to the root.
:returns: None
:rtype: None
:raises: NotImplementedError |
def with_more_selectors(self, selectors):
if self.headers and self.headers[-1].is_selector:
new_selectors = extend_unique(
self.headers[-1].selectors,
selectors)
new_headers = self.headers[:-1] + (
BlockSelectorHeader(new_selectors),)
return RuleAncestry(new_headers)
else:
new_headers = self.headers + (BlockSelectorHeader(selectors),)
return RuleAncestry(new_headers) | Return a new ancestry that also matches the given selectors. No
nesting is done. |
def ensure_on():
if get_status() == 'not-running':
if config.dbserver.multi_user:
sys.exit('Please start the DbServer: '
'see the documentation for details')
subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver',
'-l', 'INFO'])
waiting_seconds = 30
while get_status() == 'not-running':
if waiting_seconds == 0:
sys.exit('The DbServer cannot be started after 30 seconds. '
'Please check the configuration')
time.sleep(1)
waiting_seconds -= 1 | Start the DbServer if it is off |
def nested_dict_to_list(path, dic, exclusion=None):
result = []
exclusion = ['__self'] if exclusion is None else exclusion
for key, value in dic.items():
if not any([exclude in key for exclude in exclusion]):
if isinstance(value, dict):
aux = path + key + "/"
result.extend(nested_dict_to_list(aux, value))
else:
if path.endswith("/"):
path = path[:-1]
result.append([path, key, value])
return result | Transform nested dict to list |
def build_annotation_dict_any_filter(annotations: Mapping[str, Iterable[str]]) -> EdgePredicate:
if not annotations:
return keep_edge_permissive
@edge_predicate
def annotation_dict_any_filter(edge_data: EdgeData) -> bool:
return _annotation_dict_any_filter(edge_data, query=annotations)
return annotation_dict_any_filter | Build an edge predicate that passes for edges whose data dictionaries match the given dictionary.
If the given dictionary is empty, will always evaluate to true.
:param annotations: The annotation query dict to match |
def print_file_details_as_csv(self, fname, col_headers):
line = ''
qu = '"'
d = ','
for fld in col_headers:
if fld == "fullfilename":
line = line + qu + fname + qu + d
if fld == "name":
line = line + qu + os.path.basename(fname) + qu + d
if fld == "date":
line = line + qu + self.GetDateAsString(fname) + qu + d
if fld == "size":
line = line + qu + self.get_size_as_string(fname) + qu + d
if fld == "path":
try:
line = line + qu + os.path.dirname(fname) + qu + d
except IOError:
line = line + qu + 'ERROR_PATH' + qu + d
return line | saves as csv format |
def contracts_version_expects_deposit_limits(contracts_version: Optional[str]) -> bool:
if contracts_version is None:
return True
if contracts_version == '0.3._':
return False
return compare(contracts_version, '0.9.0') > -1 | Answers whether TokenNetworkRegistry of the contracts_vesion needs deposit limits |
def _reference_rmvs(self, removes):
print("")
self.msg.template(78)
msg_pkg = "package"
if len(removes) > 1:
msg_pkg = "packages"
print("| Total {0} {1} removed".format(len(removes), msg_pkg))
self.msg.template(78)
for pkg in removes:
if not GetFromInstalled(pkg).name():
print("| Package {0} removed".format(pkg))
else:
print("| Package {0} not found".format(pkg))
self.msg.template(78)
print("") | Prints all removed packages |
def iter_dialogs(
self, limit=None, *, offset_date=None, offset_id=0,
offset_peer=types.InputPeerEmpty(), ignore_migrated=False
):
return _DialogsIter(
self,
limit,
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
ignore_migrated=ignore_migrated
) | Returns an iterator over the dialogs, yielding 'limit' at most.
Dialogs are the open "chats" or conversations with other people,
groups you have joined, or channels you are subscribed to.
Args:
limit (`int` | `None`):
How many dialogs to be retrieved as maximum. Can be set to
``None`` to retrieve all dialogs. Note that this may take
whole minutes if you have hundreds of dialogs, as Telegram
will tell the library to slow down through a
``FloodWaitError``.
offset_date (`datetime`, optional):
The offset date to be used.
offset_id (`int`, optional):
The message ID to be used as an offset.
offset_peer (:tl:`InputPeer`, optional):
The peer to be used as an offset.
ignore_migrated (`bool`, optional):
Whether :tl:`Chat` that have ``migrated_to`` a :tl:`Channel`
should be included or not. By default all the chats in your
dialogs are returned, but setting this to ``True`` will hide
them in the same way official applications do.
Yields:
Instances of `telethon.tl.custom.dialog.Dialog`. |
def get_generic_subseq_3D(protein, cutoff, prop, condition):
if not protein.representative_structure:
log.error('{}: no representative structure, cannot search for subseq'.format(protein.id))
return {'subseq_len': 0, 'subseq': None, 'subseq_resnums': []}
subseq, subseq_resnums = protein.get_seqprop_subsequence_from_structchain_property(property_key=prop,
property_value=cutoff,
condition=condition,
use_representatives=True,
return_resnums=True) or (
None, [])
return {'subseq_len': len(subseq_resnums), 'subseq': subseq, 'subseq_resnums': subseq_resnums} | Get a subsequence from REPSEQ based on a property stored in REPSTRUCT.REPCHAIN.letter_annotations |
def visit_ListComp(self, node: ast.ListComp) -> None:
if node in self._recomputed_values:
value = self._recomputed_values[node]
text = self._atok.get_text(node)
self.reprs[text] = value
self.generic_visit(node=node) | Represent the list comprehension by dumping its source code. |
def parse_iptables_rule(line):
bits = line.split()
definition = {}
key = None
args = []
not_arg = False
def add_args():
arg_string = ' '.join(args)
if key in IPTABLES_ARGS:
definition_key = (
'not_{0}'.format(IPTABLES_ARGS[key])
if not_arg
else IPTABLES_ARGS[key]
)
definition[definition_key] = arg_string
else:
definition.setdefault('extras', []).extend((key, arg_string))
for bit in bits:
if bit == '!':
if key:
add_args()
args = []
key = None
not_arg = True
elif bit.startswith('-'):
if key:
add_args()
args = []
not_arg = False
key = bit
else:
args.append(bit)
if key:
add_args()
if 'extras' in definition:
definition['extras'] = set(definition['extras'])
return definition | Parse one iptables rule. Returns a dict where each iptables code argument
is mapped to a name using IPTABLES_ARGS. |
def renderer_doc(*args):
renderers_ = salt.loader.render(__opts__, [])
docs = {}
if not args:
for func in six.iterkeys(renderers_):
docs[func] = renderers_[func].__doc__
return _strip_rst(docs)
for module in args:
if '*' in module or '.' in module:
for func in fnmatch.filter(renderers_, module):
docs[func] = renderers_[func].__doc__
else:
moduledot = module + '.'
for func in six.iterkeys(renderers_):
if func.startswith(moduledot):
docs[func] = renderers_[func].__doc__
return _strip_rst(docs) | Return the docstrings for all renderers. Optionally, specify a renderer or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
Multiple renderers can be specified.
.. versionadded:: 2015.5.0
CLI Example:
.. code-block:: bash
salt '*' sys.renderer_doc
salt '*' sys.renderer_doc cheetah
salt '*' sys.renderer_doc jinja json
Renderer names can be specified as globs.
.. code-block:: bash
salt '*' sys.renderer_doc 'c*' 'j*' |
def _create_binary_trigger(trigger):
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
op_codes = {y: x for x, y in ops.items()}
source = 0
if isinstance(trigger, TrueTrigger):
op_code = op_codes['always']
elif isinstance(trigger, FalseTrigger):
raise ArgumentError("Cannot express a never trigger in binary descriptor", trigger=trigger)
else:
op_code = op_codes[trigger.comp_string]
if trigger.use_count:
source = 1
return (op_code << 1) | source | Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger. |
def match(self, p_todo):
operand1 = self.value
operand2 = p_todo.priority() or 'ZZ'
return self.compare_operands(operand1, operand2) | Performs a match on a priority in the todo.
It gets priority from p_todo and compares it with user-entered
expression based on the given operator (default ==). It does that however
in reversed order to obtain more intuitive result. Example: (>B) will
match todos with priority (A).
Items without priority are designated with corresponding operand set to
'ZZ', because python doesn't allow NoneType() and str() comparisons. |
def _validate_file_format(self, file_format):
if file_format not in self.valid_file_formats:
raise InvalidFileFormatError(
"{} is not a valid file format".format(file_format)
)
return file_format | Validates file format, raising error if invalid. |
def from_json(cls, data, json_schema_class=None):
schema = cls.json_schema if json_schema_class is None else json_schema_class()
return schema.load(data) | JSON deserialization method that retrieves a genome instance from its json representation
If specific json schema is provided, it is utilized, and if not, a class specific is used |
def upload_content(self, synchronous=True, **kwargs):
kwargs = kwargs.copy()
kwargs.update(self._server_config.get_client_kwargs())
response = client.post(self.path('upload_content'), **kwargs)
json = _handle_response(response, self._server_config, synchronous)
if json['status'] != 'success':
raise APIResponseError(
'Received error when uploading file {0} to repository {1}: {2}'
.format(kwargs.get('files'), self.id, json)
)
return json | Upload a file or files to the current repository.
Here is an example of how to upload content::
with open('my_content.rpm') as content:
repo.upload_content(files={'content': content})
This method accepts the same keyword arguments as Requests. As a
result, the following examples can be adapted for use here:
* `POST a Multipart-Encoded File`_
* `POST Multiple Multipart-Encoded Files`_
:param synchronous: What should happen if the server returns an HTTP
202 (accepted) status code? Wait for the task to complete if
``True``. Immediately return the server's response otherwise.
:param kwargs: Arguments to pass to requests.
:returns: The server's response, with all JSON decoded.
:raises: ``requests.exceptions.HTTPError`` If the server responds with
an HTTP 4XX or 5XX message.
:raises nailgun.entities.APIResponseError: If the response has a status
other than "success".
.. _POST a Multipart-Encoded File:
http://docs.python-requests.org/en/latest/user/quickstart/#post-a-multipart-encoded-file
.. _POST Multiple Multipart-Encoded Files:
http://docs.python-requests.org/en/latest/user/advanced/#post-multiple-multipart-encoded-files |
def copy(self, deep=True):
from copy import copy, deepcopy
if deep:
return deepcopy(self)
else:
return copy(self) | Make a copy of this object
Parameters
----------
deep : boolean, default True
Make a deep copy, i.e. also copy data
Returns
-------
copy : type of caller |
def _parse_file():
file_name = path.join(
path.abspath(path.dirname(path.dirname(__file__))), 'config.json'
)
logger.info('loading configuration from file: %r', file_name)
try:
data = _read_file(file_name)
except FileNotFoundError:
logger.error('no configuration available, set FLASH_CONFIG or '
'provide config.json')
exit()
for service in data.get('services', []):
for key, val in service.items():
if isinstance(val, str) and re.match(r'^\$[A-Z_]+$', val):
env_val = getenv(val[1:])
if env_val is None:
logger.warning('environment variable %r not found', val[1:])
service[key] = env_val or val
return data | Parse the config from a file.
Note:
Assumes any value that ``"$LOOKS_LIKE_THIS"`` in a service
definition refers to an environment variable, and attempts to get
it accordingly. |
def tmp(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_TMP, root=_path(host, root=hosts(p_queue)))
return _path(p_queue, _c.FSQ_TMP) | Construct a path to the tmp dir for a queue |
def _get_request_fields_from_parent(self):
if not self.parent:
return None
if not getattr(self.parent, 'request_fields'):
return None
if not isinstance(self.parent.request_fields, dict):
return None
return self.parent.request_fields.get(self.field_name) | Get request fields from the parent serializer. |
def release_subnet(self, cidr, direc):
if direc == 'in':
self.service_in_ip.release_subnet(cidr)
else:
self.service_out_ip.release_subnet(cidr) | Routine to release a subnet from the DB. |
def list(self, request):
max_items_per_page = getattr(self, 'max_per_page',
getattr(settings, 'AJAX_MAX_PER_PAGE', 100))
requested_items_per_page = request.POST.get("items_per_page", 20)
items_per_page = min(max_items_per_page, requested_items_per_page)
current_page = request.POST.get("current_page", 1)
if not self.can_list(request.user):
raise AJAXError(403, _("Access to this endpoint is forbidden"))
objects = self.get_queryset(request)
paginator = Paginator(objects, items_per_page)
try:
page = paginator.page(current_page)
except PageNotAnInteger:
page = paginator.page(1)
except EmptyPage:
page = EmptyPageResult()
data = [encoder.encode(record) for record in page.object_list]
return EnvelopedResponse(data=data, metadata={'total': paginator.count}) | List objects of a model. By default will show page 1 with 20 objects on it.
**Usage**::
params = {"items_per_page":10,"page":2} //all params are optional
$.post("/ajax/{app}/{model}/list.json"),params) |
def parse_skypos(ra, dec):
rval = make_val_float(ra)
dval = make_val_float(dec)
if rval is None:
rval, dval = radec_hmstodd(ra, dec)
return rval, dval | Function to parse RA and Dec input values and turn them into decimal
degrees
Input formats could be:
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
nn.nnnnnnnn
"nn.nnnnnnn" |
def mfpt(totflux, pi, qminus):
r
return dense.tpt.mfpt(totflux, pi, qminus) | r"""Mean first passage time for reaction A to B.
Parameters
----------
totflux : float
The total flux between reactant and product
pi : (M,) ndarray
Stationary distribution
qminus : (M,) ndarray
Backward comittor
Returns
-------
tAB : float
The mean first-passage time for the A to B reaction
See also
--------
rate
Notes
-----
Equal to the inverse rate, see [1].
References
----------
.. [1] F. Noe, Ch. Schuette, E. Vanden-Eijnden, L. Reich and
T. Weikl: Constructing the Full Ensemble of Folding Pathways
from Short Off-Equilibrium Simulations.
Proc. Natl. Acad. Sci. USA, 106, 19011-19016 (2009) |
def is_supported():
on_supported_platform = False
if salt.utils.platform.is_sunos():
on_supported_platform = True
elif salt.utils.platform.is_freebsd() and _check_retcode('kldstat -q -m zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and os.path.exists('/sys/module/zfs'):
on_supported_platform = True
elif salt.utils.platform.is_linux() and salt.utils.path.which('zfs-fuse'):
on_supported_platform = True
elif salt.utils.platform.is_darwin() and \
os.path.exists('/Library/Extensions/zfs.kext') and \
os.path.exists('/dev/zfs'):
on_supported_platform = True
return (salt.utils.path.which('zpool') and on_supported_platform) is True | Check the system for ZFS support |
def write_temp_file(self, content, filename=None, mode='w'):
if filename is None:
filename = str(uuid.uuid4())
fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)
with open(fqpn, mode) as fh:
fh.write(content)
return fqpn | Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file. |
def url_view(url_pattern, name=None, priority=None):
def meta_wrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.urljects_view = True
wrapper.url = url_pattern
wrapper.url_name = name or func.__name__
wrapper.url_priority = priority
return wrapper
return meta_wrapper | Decorator for registering functional views.
Meta decorator syntax has to be used in order to accept arguments.
This decorator does not really do anything that magical:
This:
>>> from urljects import U, url_view
>>> @url_view(U / 'my_view')
... def my_view(request)
... pass
is equivalent to this:
>>> def my_view(request)
... pass
>>> my_view.urljects_view = True
>>> my_view.url = U / 'my_view'
>>> my_view.url_name = 'my_view'
Those view are then supposed to be used with ``view_include`` which will
register all views that have ``urljects_view`` set to ``True``.
:param url_pattern: regex or URLPattern or anything passable to url()
:param name: name of the view, __name__ will be used otherwise.
:param priority: priority of the view, the lower the better |
def _get_meaning(value_pb, is_list=False):
meaning = None
if is_list:
if len(value_pb.array_value.values) == 0:
return None
all_meanings = [
_get_meaning(sub_value_pb) for sub_value_pb in value_pb.array_value.values
]
unique_meanings = set(all_meanings)
if len(unique_meanings) == 1:
meaning = unique_meanings.pop()
else:
meaning = all_meanings
elif value_pb.meaning:
meaning = value_pb.meaning
return meaning | Get the meaning from a protobuf value.
:type value_pb: :class:`.entity_pb2.Value`
:param value_pb: The protobuf value to be checked for an
associated meaning.
:type is_list: bool
:param is_list: Boolean indicating if the ``value_pb`` contains
a list value.
:rtype: int
:returns: The meaning for the ``value_pb`` if one is set, else
:data:`None`. For a list value, if there are disagreeing
means it just returns a list of meanings. If all the
list meanings agree, it just condenses them. |
def find_element(driver, elem_path, by=CSS, timeout=TIMEOUT, poll_frequency=0.5):
wait = WebDriverWait(driver, timeout, poll_frequency)
return wait.until(EC.presence_of_element_located((by, elem_path))) | Find and return an element once located
find_element locates an element on the page, waiting
for up to timeout seconds. The element, when located,
is returned. If not located, a TimeoutException is raised.
Args:
driver (selenium webdriver or element): A driver or element
elem_path (str): String used to located the element
by (selenium By): Selenium By reference
timeout (int): Selenium Wait timeout, in seconds
poll_frequency (float): Selenium Wait polling frequency, in seconds
Returns:
element: Selenium element
Raises:
TimeoutException: Raised when target element isn't located |
def exit_statistics(hostname, start_time, count_sent, count_received, min_time, avg_time, max_time, deviation):
end_time = datetime.datetime.now()
duration = end_time - start_time
duration_sec = float(duration.seconds * 1000)
duration_ms = float(duration.microseconds / 1000)
duration = duration_sec + duration_ms
package_loss = 100 - ((float(count_received) / float(count_sent)) * 100)
print(f'\b\b--- {hostname} ping statistics ---')
try:
print(f'{count_sent} packages transmitted, {count_received} received, {package_loss}% package loss, time {duration}ms')
except ZeroDivisionError:
print(f'{count_sent} packets transmitted, {count_received} received, 100% packet loss, time {duration}ms')
print(
'rtt min/avg/max/dev = %.2f/%.2f/%.2f/%.2f ms' % (
min_time.seconds*1000 + float(min_time.microseconds)/1000,
float(avg_time) / 1000,
max_time.seconds*1000 + float(max_time.microseconds)/1000,
float(deviation)
)
) | Print ping exit statistics |
def _get_subelements(self, node):
items = node.find('rdf:Alt', self.NS)
if items is not None:
try:
return items[0].text
except IndexError:
return ''
for xmlcontainer, container, insertfn in XMP_CONTAINERS:
items = node.find('rdf:{}'.format(xmlcontainer), self.NS)
if items is None:
continue
result = container()
for item in items:
insertfn(result, item.text)
return result
return '' | Gather the sub-elements attached to a node
Gather rdf:Bag and and rdf:Seq into set and list respectively. For
alternate languages values, take the first language only for
simplicity. |
def _sigma_pi_midE(self, Tp):
m_p = self._m_p
Qp = (Tp - self._Tth) / m_p
multip = -6e-3 + 0.237 * Qp - 0.023 * Qp ** 2
return self._sigma_inel(Tp) * multip | Geant 4.10.0 model for 2 GeV < Tp < 5 GeV |
def ts_stats_significance(ts, ts_stat_func, null_ts_func, B=1000, permute_fast=False):
stats_ts = ts_stat_func(ts)
if permute_fast:
null_ts = map(np.random.permutation, np.array([ts, ] * B))
else:
null_ts = np.vstack([null_ts_func(ts) for i in np.arange(0, B)])
stats_null_ts = np.vstack([ts_stat_func(nts) for nts in null_ts])
pvals = []
nums = []
for i in np.arange(0, len(stats_ts)):
num_samples = np.sum((stats_null_ts[:, i] >= stats_ts[i]))
nums.append(num_samples)
pval = num_samples / float(B)
pvals.append(pval)
return stats_ts, pvals, nums | Compute the statistical significance of a test statistic at each point
of the time series. |
def breathe_identifier(self):
if self.kind == "function":
return "{name}({parameters})".format(
name=self.name,
parameters=", ".join(self.parameters)
)
return self.name | The unique identifier for breathe directives.
.. note::
This method is currently assumed to only be called for nodes that are
in :data:`exhale.utils.LEAF_LIKE_KINDS` (see also
:func:`exhale.graph.ExhaleRoot.generateSingleNodeRST` where it is used).
**Return**
:class:`python:str`
Usually, this will just be ``self.name``. However, for functions in
particular the signature must be included to distinguish overloads. |
def table(name=None, mode='create', use_cache=True, priority='interactive',
allow_large_results=False):
output = QueryOutput()
output._output_type = 'table'
output._table_name = name
output._table_mode = mode
output._use_cache = use_cache
output._priority = priority
output._allow_large_results = allow_large_results
return output | Construct a query output object where the result is a table
Args:
name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a name to be specified) (default False). |
def get_version(filename):
with open(filename) as in_fh:
for line in in_fh:
if line.startswith('__version__'):
return line.split('=')[1].strip()[1:-1]
raise ValueError("Cannot extract version from %s" % filename) | Extract the package version |
def load_notebook(fullname: str):
shell = InteractiveShell.instance()
path = fullname
with open(path, 'r', encoding='utf-8') as f:
notebook = read(f, 4)
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__dict__['get_ipython'] = get_ipython
sys.modules[fullname] = mod
save_user_ns = shell.user_ns
shell.user_ns = mod.__dict__
try:
for cell in notebook.cells:
if cell.cell_type == 'code':
try:
ast.parse(cell.source)
except SyntaxError:
continue
try:
exec(cell.source, mod.__dict__)
except NameError:
print(cell.source)
raise
finally:
shell.user_ns = save_user_ns
return mod | Import a notebook as a module. |
def display_element_selected(self, f):
self._display_element_selected_func = f
@wraps(f)
def wrapper(*args, **kw):
self._flask_view_func(*args, **kw)
return f | Decorator routes Alexa Display.ElementSelected request to the wrapped view function.
@ask.display_element_selected
def eval_element():
return "", 200
The wrapped function is registered as the display_element_selected view function
and renders the response for requests.
Arguments:
f {function} -- display_element_selected view function |
def file_w_create_directories(filepath):
dirname = os.path.dirname(filepath)
if dirname and dirname != os.path.curdir and not os.path.isdir(dirname):
os.makedirs(dirname)
return open(filepath, 'w') | Recursively create some directories if needed so that the directory where
@filepath must be written exists, then open it in "w" mode and return the
file object. |
def get_start_time(self):
start_time = c_double()
self.library.get_start_time.argtypes = [POINTER(c_double)]
self.library.get_start_time.restype = None
self.library.get_start_time(byref(start_time))
return start_time.value | returns start time |
def duration(self):
self._duration = self.lib.iperf_get_test_duration(self._test)
return self._duration | The test duration in seconds. |
def sync_and_deploy_gateway(collector):
configuration = collector.configuration
aws_syncr = configuration['aws_syncr']
find_gateway(aws_syncr, configuration)
artifact = aws_syncr.artifact
aws_syncr.artifact = ""
sync(collector)
aws_syncr.artifact = artifact
deploy_gateway(collector) | Do a sync followed by deploying the gateway |
def generate_key(block_size=32):
random_seq = os.urandom(block_size)
random_key = base64.b64encode(random_seq)
return random_key | Generate random key for ope cipher.
Parameters
----------
block_size : int, optional
Length of random bytes.
Returns
-------
random_key : str
A random key for encryption.
Notes:
------
Implementation follows https://github.com/pyca/cryptography |
def save_proficiency(self, proficiency_form, *args, **kwargs):
if proficiency_form.is_for_update():
return self.update_proficiency(proficiency_form, *args, **kwargs)
else:
return self.create_proficiency(proficiency_form, *args, **kwargs) | Pass through to provider ProficiencyAdminSession.update_proficiency |
def sort_resources(cls, request, resources, fail_enum, header_proto=None):
if not request.sorting:
return resources
value_handlers = cls._get_handler_set(request, fail_enum, header_proto)
def sorter(resource_a, resource_b):
for handler in value_handlers:
val_a, val_b = handler.get_sort_values(resource_a, resource_b)
if val_a < val_b:
return handler.xform_result(-1)
if val_a > val_b:
return handler.xform_result(1)
return 0
return sorted(resources, key=cmp_to_key(sorter)) | Sorts a list of resources based on a list of sort controls
Args:
request (object): The parsed protobuf request object
resources (list of objects): The resources to be sorted
fail_enum (int, enum): The enum status to raise with invalid keys
header_proto(class): Class to decode a resources header
Returns:
list: The sorted list of resources |
def generate_token(self):
response = self._make_request()
self.auth = response
self.token = response['token'] | Make request in API to generate a token. |
def on_interesting_rts_change(self, new_global_rts, removed_global_rts):
if new_global_rts:
LOG.debug(
'Sending route_refresh to all neighbors that'
' did not negotiate RTC capability.'
)
pm = self._core_service.peer_manager
pm.schedule_rr_to_non_rtc_peers()
if removed_global_rts:
LOG.debug(
'Cleaning up global tables as some interested RTs were removed'
)
self._clean_global_uninteresting_paths() | Update global tables as interested RTs changed.
Adds `new_rts` and removes `removed_rts` rt nlris. Does not check if
`new_rts` or `removed_rts` are already present. Schedules refresh
request to peers that do not participate in RTC address-family. |
def circular_pores(target, pore_diameter='pore.diameter',
throat_diameter='throat.diameter',
throat_centroid='throat.centroid'):
r
return spherical_pores(target=target, pore_diameter=pore_diameter,
throat_diameter=throat_diameter) | r"""
Calculate the coordinates of throat endpoints, assuming circular pores.
This model accounts for the overlapping lens between pores and throats.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_diameter : string
Dictionary key of the pore diameter values.
throat_diameter : string
Dictionary key of the throat diameter values.
throat_centroid : string, optional
Dictionary key of the throat centroid values. See the notes.
Returns
-------
EP : dictionary
Coordinates of throat endpoints stored in Dict form. Can be accessed
via the dict keys 'head' and 'tail'.
Notes
-----
(1) This model should only be applied to ture 2D networks.
(2) By default, this model assumes that throat centroid and pore
coordinates are colinear. If that's not the case, such as in extracted
networks, `throat_centroid` could be passed as an optional argument, and
the model takes care of the rest. |
def do_diff(self, params):
count = 0
for count, (diff, path) in enumerate(self._zk.diff(params.path_a, params.path_b), 1):
if diff == -1:
self.show_output("-- %s", path)
elif diff == 0:
self.show_output("-+ %s", path)
elif diff == 1:
self.show_output("++ %s", path)
if count == 0:
self.show_output("Branches are equal.") | \x1b[1mNAME\x1b[0m
diff - Display the differences between two paths
\x1b[1mSYNOPSIS\x1b[0m
diff <src> <dst>
\x1b[1mDESCRIPTION\x1b[0m
The output is interpreted as:
-- means the znode is missing in /new-configs
++ means the znode is new in /new-configs
+- means the znode's content differ between /configs and /new-configs
\x1b[1mEXAMPLES\x1b[0m
> diff /configs /new-configs
-- service-x/hosts
++ service-x/hosts.json
+- service-x/params |
def config_keys(self, sortkey = False):
if sortkey:
items = sorted(self.items())
else:
items = self.items()
for k,v in items:
if isinstance(v, ConfigTree):
for k2 in v.config_keys(sortkey):
yield k + '.' + k2
else:
yield k | Return all configuration keys in this node, including configurations on children nodes. |
def validate(data):
text = data.get('text')
if not isinstance(text, _string_types) or len(text) == 0:
raise ValueError('text field is required and should not be empty')
if 'markdown' in data and not type(data['markdown']) is bool:
raise ValueError('markdown field should be bool')
if 'attachments' in data:
if not isinstance(data['attachments'], (list, tuple)):
raise ValueError('attachments field should be list or tuple')
for attachment in data['attachments']:
if 'text' not in attachment and 'title' not in attachment:
raise ValueError('text or title is required in attachment')
return True | Validates incoming data
Args:
data(dict): the incoming data
Returns:
True if the data is valid
Raises:
ValueError: the data is not valid |
def surface_measure(self, param):
scalar_out = (np.shape(param) == ())
param = np.array(param, dtype=float, copy=False, ndmin=1)
if self.check_bounds and not is_inside_bounds(param, self.params):
raise ValueError('`param` {} not in the valid range '
'{}'.format(param, self.params))
if scalar_out:
return self.radius
else:
return self.radius * np.ones(param.shape) | Return the arc length measure at ``param``.
This is a constant function evaluating to `radius` everywhere.
Parameters
----------
param : float or `array-like`
Parameter value(s) at which to evaluate.
Returns
-------
measure : float or `numpy.ndarray`
Constant value(s) of the arc length measure at ``param``.
If ``param`` is a single parameter, a float is returned,
otherwise an array of shape ``param.shape``.
See Also
--------
surface
surface_deriv
Examples
--------
The method works with a single parameter, resulting in a float:
>>> part = odl.uniform_partition(-np.pi / 2, np.pi / 2, 10)
>>> det = CircularDetector(part, axis=[1, 0], radius=2)
>>> det.surface_measure(0)
2.0
>>> det.surface_measure(np.pi / 2)
2.0
It is also vectorized, i.e., it can be called with multiple
parameters at once (or an n-dimensional array of parameters):
>>> det.surface_measure([0, np.pi / 2])
array([ 2., 2.])
>>> det.surface_measure(np.zeros((4, 5))).shape
(4, 5) |
def upload_path(instance, filename):
filename = filename.replace(" ", "_")
filename = unicodedata.normalize('NFKD', filename).lower()
return os.path.join(str(timezone.now().date().isoformat()), filename) | Sanitize the user-provided file name, add timestamp for uniqness. |
def get_session(username, password, cookie_path=COOKIE_PATH, cache=True,
cache_expiry=300, cache_path=CACHE_PATH, driver='phantomjs'):
class USPSAuth(AuthBase):
def __init__(self, username, password, cookie_path, driver):
self.username = username
self.password = password
self.cookie_path = cookie_path
self.driver = driver
def __call__(self, r):
return r
session = requests.Session()
if cache:
session = requests_cache.core.CachedSession(cache_name=cache_path,
expire_after=cache_expiry)
session.auth = USPSAuth(username, password, cookie_path, driver)
session.headers.update({'User-Agent': USER_AGENT})
if os.path.exists(cookie_path):
_LOGGER.debug("cookie found at: %s", cookie_path)
session.cookies = _load_cookies(cookie_path)
else:
_login(session)
return session | Get session, existing or new. |
def _identity(self, *args, **kwargs):
LOCAL = 'local accounts'
EXT = 'external accounts'
data = dict()
data[LOCAL] = self._get_local_users(disabled=kwargs.get('disabled'))
data[EXT] = self._get_external_accounts(data[LOCAL].keys()) or 'N/A'
data['local groups'] = self._get_local_groups()
return data | Local users and groups.
accounts
Can be either 'local', 'remote' or 'all' (equal to "local,remote").
Remote accounts cannot be resolved on all systems, but only
those, which supports 'passwd -S -a'.
disabled
True (or False, default) to return only disabled accounts. |
def bulk_create(self, *args, **kwargs):
ret_val = super(ManagerUtilsQuerySet, self).bulk_create(*args, **kwargs)
post_bulk_operation.send(sender=self.model, model=self.model)
return ret_val | Overrides Django's bulk_create function to emit a post_bulk_operation signal when bulk_create
is finished. |
def get_actions(self, request):
actions = super(CertificateMixin, self).get_actions(request)
actions.pop('delete_selected', '')
return actions | Disable the "delete selected" admin action.
Otherwise the action is present even though has_delete_permission is False, it just doesn't
work. |
def square_off(series, time_delta=None, transition_seconds=1):
if time_delta:
if isinstance(time_delta, (int, float)):
time_delta = datetime.timedelta(0, time_delta)
new_times = series.index + time_delta
else:
diff = np.diff(series.index)
time_delta = np.append(diff, [diff[-1]])
new_times = series.index + time_delta
new_times = pd.DatetimeIndex(new_times) - datetime.timedelta(0, transition_seconds)
return pd.concat([series, pd.Series(series.values, index=new_times)]).sort_index() | Insert samples in regularly sampled data to produce stairsteps from ramps when plotted.
New samples are 1 second (1e9 ns) before each existing samples, to facilitate plotting and sorting
>>> square_off(pd.Series(range(3), index=pd.date_range('2014-01-01', periods=3, freq='15m')),
... time_delta=5.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-31 00:00:00 0
2014-01-31 00:00:05.500000 0
2015-04-30 00:00:00 1
2015-04-30 00:00:05.500000 1
2016-07-31 00:00:00 2
2016-07-31 00:00:05.500000 2
dtype: int64
>>> square_off(pd.Series(range(2), index=pd.date_range('2014-01-01', periods=2, freq='15min')),
... transition_seconds=2.5) # doctest: +NORMALIZE_WHITESPACE
2014-01-01 00:00:00 0
2014-01-01 00:14:57.500000 0
2014-01-01 00:15:00 1
2014-01-01 00:29:57.500000 1
dtype: int64 |
def write(text, delay=0, restore_state_after=True, exact=None):
if exact is None:
exact = _platform.system() == 'Windows'
state = stash_state()
if exact:
for letter in text:
if letter in '\n\b':
send(letter)
else:
_os_keyboard.type_unicode(letter)
if delay: _time.sleep(delay)
else:
for letter in text:
try:
entries = _os_keyboard.map_name(normalize_name(letter))
scan_code, modifiers = next(iter(entries))
except (KeyError, ValueError):
_os_keyboard.type_unicode(letter)
continue
for modifier in modifiers:
press(modifier)
_os_keyboard.press(scan_code)
_os_keyboard.release(scan_code)
for modifier in modifiers:
release(modifier)
if delay:
_time.sleep(delay)
if restore_state_after:
restore_modifiers(state) | Sends artificial keyboard events to the OS, simulating the typing of a given
text. Characters not available on the keyboard are typed as explicit unicode
characters using OS-specific functionality, such as alt+codepoint.
To ensure text integrity, all currently pressed keys are released before
the text is typed, and modifiers are restored afterwards.
- `delay` is the number of seconds to wait between keypresses, defaults to
no delay.
- `restore_state_after` can be used to restore the state of pressed keys
after the text is typed, i.e. presses the keys that were released at the
beginning. Defaults to True.
- `exact` forces typing all characters as explicit unicode (e.g.
alt+codepoint or special events). If None, uses platform-specific suggested
value. |
def shutdown(opts):
log.debug('dummy proxy shutdown() called...')
DETAILS = _load_state()
if 'filename' in DETAILS:
os.unlink(DETAILS['filename']) | For this proxy shutdown is a no-op |
def _diff_interface_lists(old, new):
diff = _diff_lists(old, new, _nics_equal)
macs = [nic.find('mac').get('address') for nic in diff['unchanged']]
for nic in diff['new']:
mac = nic.find('mac')
if mac.get('address') in macs:
nic.remove(mac)
return diff | Compare network interface definitions to extract the changes
:param old: list of ElementTree nodes representing the old interfaces
:param new: list of ElementTree nodes representing the new interfaces |
def _project_eigenvectors(self):
self._p_eigenvectors = []
for vecs_q in self._eigenvectors:
p_vecs_q = []
for vecs in vecs_q.T:
p_vecs_q.append(np.dot(vecs.reshape(-1, 3),
self._projection_direction))
self._p_eigenvectors.append(np.transpose(p_vecs_q))
self._p_eigenvectors = np.array(self._p_eigenvectors) | Eigenvectors are projected along Cartesian direction |
def setMAC(self, xEUI):
print '%s call setMAC' % self.port
address64 = ''
try:
if not xEUI:
address64 = self.mac
if not isinstance(xEUI, str):
address64 = self.__convertLongToString(xEUI)
if len(address64) < 16:
address64 = address64.zfill(16)
print address64
else:
address64 = xEUI
cmd = WPANCTL_CMD + 'setprop NCP:MACAddress %s' % address64
if self.__sendCommand(cmd)[0] != 'Fail':
self.mac = address64
return True
else:
return False
except Exception, e:
ModuleHelper.WriteIntoDebugLogger('setMAC() Error: ' + str(e)) | set the extended addresss of Thread device
Args:
xEUI: extended address in hex format
Returns:
True: successful to set the extended address
False: fail to set the extended address |
def _to_json_type(obj, classkey=None):
if isinstance(obj, dict):
data = {}
for (k, v) in obj.items():
data[k] = _to_json_type(v, classkey)
return data
elif hasattr(obj, "_ast"):
return _to_json_type(obj._ast())
elif hasattr(obj, "__iter__"):
return [_to_json_type(v, classkey) for v in obj]
elif hasattr(obj, "__dict__"):
data = dict([
(key, _to_json_type(value, classkey))
for key, value in obj.__dict__.iteritems()
if not callable(value) and not key.startswith('_')
])
if classkey is not None and hasattr(obj, "__class__"):
data[classkey] = obj.__class__.__name__
return data
else:
return obj | Recursively convert the object instance into a valid JSON type. |
def _costfcn(self, x):
f = self._f(x)
df = self._df(x)
d2f = self._d2f(x)
return f, df, d2f | Evaluates the objective function, gradient and Hessian for OPF. |
def label(self, input_grid):
marked = self.find_local_maxima(input_grid)
marked = np.where(marked >= 0, 1, 0)
markers = splabel(marked)[0]
return markers | Labels input grid using enhanced watershed algorithm.
Args:
input_grid (numpy.ndarray): Grid to be labeled.
Returns:
Array of labeled pixels |
def check_input_files(self,
return_found=True,
return_missing=True):
all_input_files = self.files.chain_input_files + self.sub_files.chain_input_files
return check_files(all_input_files, self._file_stage,
return_found, return_missing) | Check if input files exist.
Parameters
----------
return_found : list
A list with the paths of the files that were found.
return_missing : list
A list with the paths of the files that were missing.
Returns
-------
found : list
List of the found files, if requested, otherwise `None`
missing : list
List of the missing files, if requested, otherwise `None` |
def _post_activity(self, activity, unserialize=True):
feed_url = "{proto}://{server}/api/user/{username}/feed".format(
proto=self._pump.protocol,
server=self._pump.client.server,
username=self._pump.client.nickname
)
data = self._pump.request(feed_url, method="POST", data=activity)
if not data:
return False
if "error" in data:
raise PumpException(data["error"])
if unserialize:
if "target" in data:
self.unserialize(data["target"])
else:
if "author" not in data["object"]:
data["object"]["author"] = data["actor"]
for key in ["to", "cc", "bto", "bcc"]:
if key not in data["object"] and key in data:
data["object"][key] = data[key]
self.unserialize(data["object"])
return True | Posts a activity to feed |
def __getAvatar(self, web):
try:
self.avatar = web.find("img", {"class": "avatar"})['src'][:-10]
except IndexError as error:
print("There was an error with the user " + self.name)
print(error)
except AttributeError as error:
print("There was an error with the user " + self.name)
print(error) | Scrap the avatar from a GitHub profile.
:param web: parsed web.
:type web: BeautifulSoup node. |
def get_output_margin(self, status=None):
margin = self.get_reserved_space() + self.get_prompt(self.prompt).count('\n') + 1
if special.is_timing_enabled():
margin += 1
if status:
margin += 1 + status.count('\n')
return margin | Get the output margin (number of rows for the prompt, footer and
timing message. |
def _save(self, url, path, data):
worker = self._workers[url]
path = self._paths[url]
if len(data):
try:
with open(path, 'wb') as f:
f.write(data)
except Exception:
logger.error((url, path))
worker.finished = True
worker.sig_download_finished.emit(url, path)
worker.sig_finished.emit(worker, path, None)
self._get_requests.pop(url)
self._workers.pop(url)
self._paths.pop(url) | Save `data` of downloaded `url` in `path`. |
def etree_to_string(tree):
buff = BytesIO()
tree.write(buff, xml_declaration=True, encoding='UTF-8')
return buff.getvalue() | Creates string from lxml.etree.ElementTree with XML declaration and UTF-8 encoding.
:param tree: the instance of ElementTree
:return: the string of XML. |
def is_newer_file(a, b):
if not (op.exists(a) and op.exists(b)):
return False
am = os.stat(a).st_mtime
bm = os.stat(b).st_mtime
return am > bm | Check if the file a is newer than file b |
def main():
try:
device = AlarmDecoder(SerialDevice(interface=SERIAL_DEVICE))
device.on_zone_fault += handle_zone_fault
device.on_zone_restore += handle_zone_restore
with device.open(baudrate=BAUDRATE):
last_update = time.time()
while True:
if time.time() - last_update > WAIT_TIME:
last_update = time.time()
device.fault_zone(TARGET_ZONE)
time.sleep(1)
except Exception as ex:
print('Exception:', ex) | Example application that periodically faults a virtual zone and then
restores it.
This is an advanced feature that allows you to emulate a virtual zone. When
the AlarmDecoder is configured to emulate a zone expander we can fault and
restore those zones programmatically at will. These events can also be seen by
others, such as home automation platforms which allows you to connect other
devices or services and monitor them as you would any physical zone.
For example, you could connect a ZigBee device and receiver and fault or
restore it's zone(s) based on the data received.
In order for this to happen you need to perform a couple configuration steps:
1. Enable zone expander emulation on your AlarmDecoder device by hitting '!'
in a terminal and going through the prompts.
2. Enable the zone expander in your panel programming. |
def alarm_on_segfault(self, alarm):
self.register_alarm(alarm)
for alarm in listify(alarm):
self._set('alarm-segfault', alarm.alias, multi=True)
return self._section | Raise the specified alarm when the segmentation fault handler is executed.
Sends a backtrace.
:param AlarmType|list[AlarmType] alarm: Alarm. |
def add_host(self, host_id=None, host='localhost', port=6379,
unix_socket_path=None, db=0, password=None,
ssl=False, ssl_options=None):
if host_id is None:
raise RuntimeError('Host ID is required')
elif not isinstance(host_id, (int, long)):
raise ValueError('The host ID has to be an integer')
host_id = int(host_id)
with self._lock:
if host_id in self.hosts:
raise TypeError('Two hosts share the same host id (%r)' %
(host_id,))
self.hosts[host_id] = HostInfo(host_id=host_id, host=host,
port=port, db=db,
unix_socket_path=unix_socket_path,
password=password, ssl=ssl,
ssl_options=ssl_options)
self._hosts_age += 1 | Adds a new host to the cluster. This is only really useful for
unittests as normally hosts are added through the constructor and
changes after the cluster has been used for the first time are
unlikely to make sense. |
async def tile(tile_number):
try:
tile = get_tile(tile_number)
except TileOutOfBoundsError:
abort(404)
buf = BytesIO(tile.tobytes())
tile.save(buf, 'JPEG')
content = buf.getvalue()
response = await make_response(content)
response.headers['Content-Type'] = 'image/jpg'
response.headers['Accept-Ranges'] = 'bytes'
response.headers['Content-Length'] = str(len(content))
return response | Handles GET requests for a tile number.
:param int tile_number: Number of the tile between 0 and `max_tiles`^2.
:raises HTTPError: 404 if tile exceeds `max_tiles`^2. |
def check_fieldsets(*args, **kwargs):
if hasattr(settings, "CONFIG_FIELDSETS") and settings.CONFIG_FIELDSETS:
inconsistent_fieldnames = get_inconsistent_fieldnames()
if inconsistent_fieldnames:
return [
checks.Warning(
_(
"CONSTANCE_CONFIG_FIELDSETS is missing "
"field(s) that exists in CONSTANCE_CONFIG."
),
hint=", ".join(sorted(inconsistent_fieldnames)),
obj="settings.CONSTANCE_CONFIG",
id="constance.E001",
)
]
return [] | A Django system check to make sure that, if defined, CONFIG_FIELDSETS accounts for
every entry in settings.CONFIG. |
def load_fixture(self, body, attachment_bodies={}):
doc = json.loads(body)
self._documents[doc['_id']] = doc
self._attachments[doc['_id']] = dict()
for name in doc.get('_attachments', list()):
attachment_body = attachment_bodies.get(name, 'stub')
self._attachments[doc['_id']][name] = attachment_body | Loads the document into the database from json string. Fakes the
attachments if necessary. |
def operator_complexity(self):
return sum([level.A.nnz for level in self.levels]) /\
float(self.levels[0].A.nnz) | Operator complexity of this multigrid hierarchy.
Defined as:
Number of nonzeros in the matrix on all levels /
Number of nonzeros in the matrix on the finest level |
def _set_implied_name(self):
"Allow the name of this handler to default to the function name."
if getattr(self, 'name', None) is None:
self.name = self.func.__name__
self.name = self.name.lower() | Allow the name of this handler to default to the function name. |
def validate_number(self, number):
if not isinstance(number, str):
raise ElksException('Recipient phone number may not be empty')
if number[0] == '+' and len(number) > 2 and len(number) < 16:
return True
else:
raise ElksException("Phone number must be of format +CCCXXX...") | Checks if a number looks somewhat like a E.164 number. Not an
exhaustive check, as the API takes care of that |
def cmServiceRequest(PriorityLevel_presence=0):
a = TpPd(pd=0x5)
b = MessageType(mesType=0x24)
c = CmServiceTypeAndCiphKeySeqNr()
e = MobileStationClassmark2()
f = MobileId()
packet = a / b / c / e / f
if PriorityLevel_presence is 1:
g = PriorityLevelHdr(ieiPL=0x8, eightBitPL=0x0)
packet = packet / g
return packet | CM SERVICE REQUEST Section 9.2.9 |
def secp256k1():
GFp = FiniteField(2 ** 256 - 2 ** 32 - 977)
ec = EllipticCurve(GFp, 0, 7)
return ECDSA(ec, ec.point(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798, 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8), 2 ** 256 - 432420386565659656852420866394968145599) | create the secp256k1 curve |
def StartElement(self, name, attributes):
if name == 'hierarchy':
pass
elif name == 'node':
attributes['uniqueId'] = 'id/no_id/%d' % self.idCount
bounds = re.split('[\][,]', attributes['bounds'])
attributes['bounds'] = ((int(bounds[1]), int(bounds[2])), (int(bounds[4]), int(bounds[5])))
if DEBUG_BOUNDS:
print >> sys.stderr, "bounds=", attributes['bounds']
self.idCount += 1
child = View.factory(attributes, self.device, version=self.version, uiAutomatorHelper=self.uiAutomatorHelper)
self.views.append(child)
if not self.nodeStack:
self.root = child
else:
self.parent = self.nodeStack[-1]
self.parent.add(child)
self.nodeStack.append(child) | Expat start element event handler |
def _transform(xsl_filename, xml, **kwargs):
xslt = _make_xsl(xsl_filename)
xml = xslt(xml, **kwargs)
return xml | Transforms the xml using the specifiec xsl file. |
def diff(candidate, running, *models):
if isinstance(models, tuple) and isinstance(models[0], list):
models = models[0]
first = _get_root_object(models)
first.load_dict(candidate)
second = _get_root_object(models)
second.load_dict(running)
return napalm_yang.utils.diff(first, second) | Returns the difference between two configuration entities structured
according to the YANG model.
.. note::
This function is recommended to be used mostly as a state helper.
candidate
First model to compare.
running
Second model to compare.
models
A list of models to be used when comparing.
CLI Example:
.. code-block:: bash
salt '*' napalm_yang.diff {} {} models.openconfig_interfaces
Output Example:
.. code-block:: python
{
"interfaces": {
"interface": {
"both": {
"Port-Channel1": {
"config": {
"mtu": {
"first": "0",
"second": "9000"
}
}
}
},
"first_only": [
"Loopback0"
],
"second_only": [
"Loopback1"
]
}
}
} |
def normalize_name(self, name):
name = name.lower()
if name in self.aliases:
name = self.aliases[name]
return name | Normalize a field or level name.
:param name: The field or level name (a string).
:returns: The normalized name (a string).
Transforms all strings to lowercase and resolves level name aliases
(refer to :func:`find_level_aliases()`) to their canonical name:
>>> from coloredlogs import NameNormalizer
>>> from humanfriendly import format_table
>>> nn = NameNormalizer()
>>> sample_names = ['DEBUG', 'INFO', 'WARN', 'WARNING', 'ERROR', 'FATAL', 'CRITICAL']
>>> print(format_table([(n, nn.normalize_name(n)) for n in sample_names]))
-----------------------
| DEBUG | debug |
| INFO | info |
| WARN | warning |
| WARNING | warning |
| ERROR | error |
| FATAL | critical |
| CRITICAL | critical |
----------------------- |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.