code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def pop(self):
method_frame, header, body = self.server.basic_get(queue=self.key)
if body:
return self._decode_request(body) | Pop a request |
def should_exclude(self, filename) -> bool:
for skip_glob in self.skip_globs:
if self.filename_matches_glob(filename, skip_glob):
return True
return False | Should we exclude this file from consideration? |
def get_forces(self):
forces = []
for f in self.service.request('GET', 'forces'):
forces.append(Force(self, id=f['id'], name=f['name']))
return forces | Get a list of all police forces. Uses the forces_ API call.
.. _forces: https://data.police.uk/docs/method/forces/
:rtype: list
:return: A list of :class:`forces.Force` objects (one for each police
force represented in the API) |
def get_geostationary_mask(area):
h = area.proj_dict['h']
xmax, ymax = get_geostationary_angle_extent(area)
xmax *= h
ymax *= h
x, y = area.get_proj_coords_dask()
return ((x / xmax) ** 2 + (y / ymax) ** 2) <= 1 | Compute a mask of the earth's shape as seen by a geostationary satellite
Args:
area (pyresample.geometry.AreaDefinition) : Corresponding area
definition
Returns:
Boolean mask, True inside the earth's shape, False outside. |
def bool_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
raise ValueError(msg)
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)
raise ValueError(msg) | Interpret string value as boolean.
Returns True if value translates to True otherwise False. |
def multiget(self, keys, r=None, pr=None, timeout=None,
basic_quorum=None, notfound_ok=None,
head_only=False):
bkeys = [(self.bucket_type.name, self.name, key) for key in keys]
return self._client.multiget(bkeys, r=r, pr=pr, timeout=timeout,
basic_quorum=basic_quorum,
notfound_ok=notfound_ok,
head_only=head_only) | Retrieves a list of keys belonging to this bucket in parallel.
:param keys: the keys to fetch
:type keys: list
:param r: R-Value for the requests (defaults to bucket's R)
:type r: integer
:param pr: PR-Value for the requests (defaults to bucket's PR)
:type pr: integer
:param timeout: a timeout value in milliseconds
:type timeout: int
:param basic_quorum: whether to use the "basic quorum" policy
for not-founds
:type basic_quorum: bool
:param notfound_ok: whether to treat not-found responses as successful
:type notfound_ok: bool
:param head_only: whether to fetch without value, so only metadata
(only available on PB transport)
:type head_only: bool
:rtype: list of :class:`RiakObjects <riak.riak_object.RiakObject>`,
:class:`Datatypes <riak.datatypes.Datatype>`, or tuples of
bucket_type, bucket, key, and the exception raised on fetch |
def colorize(string, stack):
codes = optimize(stack)
if len(codes):
prefix = SEQ % ';'.join(map(str, codes))
suffix = SEQ % STYLE.reset
return prefix + string + suffix
else:
return string | Apply optimal ANSI escape sequences to the string. |
def get_missing_languages(self, field_name, db_table):
db_table_fields = self.get_table_fields(db_table)
for lang_code in AVAILABLE_LANGUAGES:
if build_localized_fieldname(field_name, lang_code) not in db_table_fields:
yield lang_code | Gets only missings fields. |
def safe_cast_to_index(array: Any) -> pd.Index:
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return _maybe_cast_to_cftimeindex(index) | Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object. |
def _to_key(d):
as_str = json.dumps(d, sort_keys=True)
return as_str.replace('"{{', '{{').replace('}}"', '}}') | Convert dict to str and enable Jinja2 template syntax. |
def encode(value, encoding='utf-8', encoding_errors='strict'):
if isinstance(value, bytes):
return value
if not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(encoding, encoding_errors)
return value | Return a bytestring representation of the value. |
def validate(self, *args, **kwargs):
return super(ParameterValidator, self)._validate(*args, **kwargs) | Validate a parameter dict against a parameter schema from an ocrd-tool.json
Args:
obj (dict):
schema (dict): |
def add_val(self, val):
if not isinstance(val, type({})):
raise ValueError(type({}))
self.read()
self.config.update(val)
self.save() | add value in form of dict |
def set_log_type_flags(self, logType, stdoutFlag, fileFlag):
assert logType in self.__logTypeStdoutFlags.keys(), "logType '%s' not defined" %logType
assert isinstance(stdoutFlag, bool), "stdoutFlag must be boolean"
assert isinstance(fileFlag, bool), "fileFlag must be boolean"
self.__logTypeStdoutFlags[logType] = stdoutFlag
self.__logTypeFileFlags[logType] = fileFlag | Set a defined log type flags.
:Parameters:
#. logType (string): A defined logging type.
#. stdoutFlag (boolean): Whether to log to the standard output stream.
#. fileFlag (boolean): Whether to log to to file. |
def get_attribute(json, attr):
res = [json[entry][attr] for entry, _ in enumerate(json)]
logger.debug('{0}s (from JSON):\n{1}'.format(attr, res))
return res | Gets the values of an attribute from JSON
Args:
json: JSON data as a list of dict dates, where the keys are
the raw market statistics.
attr: String of attribute in JSON file to collect.
Returns:
List of values of specified attribute from JSON |
def new_children(self, **kwargs):
for k, v in kwargs.items():
self.new_child(k, v)
return self | Create new children from kwargs |
def query(self, query_data, k=10, queue_size=5.0):
query_data = np.asarray(query_data).astype(np.float32)
init = initialise_search(
self._rp_forest,
self._raw_data,
query_data,
int(k * queue_size),
self._random_init,
self._tree_init,
self.rng_state,
)
result = self._search(
self._raw_data,
self._search_graph.indptr,
self._search_graph.indices,
init,
query_data,
)
indices, dists = deheap_sort(result)
return indices[:, :k], dists[:, :k] | Query the training data for the k nearest neighbors
Parameters
----------
query_data: array-like, last dimension self.dim
An array of points to query
k: integer (default = 10)
The number of nearest neighbors to return
queue_size: float (default 5.0)
The multiplier of the internal search queue. This controls the
speed/accuracy tradeoff. Low values will search faster but with
more approximate results. High values will search more
accurately, but will require more computation to do so. Values
should generally be in the range 1.0 to 10.0.
Returns
-------
indices, distances: array (n_query_points, k), array (n_query_points, k)
The first array, ``indices``, provides the indices of the data
points in the training set that are the nearest neighbors of
each query point. Thus ``indices[i, j]`` is the index into the
training data of the jth nearest neighbor of the ith query points.
Similarly ``distances`` provides the distances to the neighbors
of the query points such that ``distances[i, j]`` is the distance
from the ith query point to its jth nearest neighbor in the
training data. |
def register_interaction(key=None):
def wrap(interaction):
name = key if key is not None else interaction.__module__ + \
interaction.__name__
interaction.types[name] = interaction
return interaction
return wrap | Decorator registering an interaction class in the registry.
If no key is provided, the class name is used as a key. A key is provided
for each core bqplot interaction type so that the frontend can use this
key regardless of the kernal language. |
def value(self):
try:
res = self.properties.device.properties.network.read(
"{} {} {} presentValue".format(
self.properties.device.properties.address,
self.properties.type,
str(self.properties.address),
)
)
self._trend(res)
except Exception:
raise Exception("Problem reading : {}".format(self.properties.name))
if res == "inactive":
self._key = 0
self._boolKey = False
else:
self._key = 1
self._boolKey = True
return res | Read the value from BACnet network |
def opendocs(where='index', how='default'):
import webbrowser
docs_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'docs')
index = os.path.join(docs_dir, '_build/html/%s.html' % where)
builddocs('html')
url = 'file://%s' % os.path.abspath(index)
if how in ('d', 'default'):
webbrowser.open(url)
elif how in ('t', 'tab'):
webbrowser.open_new_tab(url)
elif how in ('n', 'w', 'window'):
webbrowser.open_new(url) | Rebuild documentation and opens it in your browser.
Use the first argument to specify how it should be opened:
`d` or `default`: Open in new tab or new window, using the default
method of your browser.
`t` or `tab`: Open documentation in new tab.
`n`, `w` or `window`: Open documentation in new window. |
def _get_title(self, prop, main_infos, info_dict):
result = main_infos.get('label')
if result is None:
result = info_dict.get('colanderalchemy', {}).get('title')
if result is None:
result = prop.key
return result | Return the title configured as in colanderalchemy |
def _ReferenceFromSerialized(serialized):
if not isinstance(serialized, basestring):
raise TypeError('serialized must be a string; received %r' % serialized)
elif isinstance(serialized, unicode):
serialized = serialized.encode('utf8')
return entity_pb.Reference(serialized) | Construct a Reference from a serialized Reference. |
def set_text(self, text="YEAH."):
s = str(text)
if not s == self.get_text(): self._widget.setText(str(text))
return self | Sets the current value of the text box. |
def set_computer_name(name):
if six.PY2:
name = _to_unicode(name)
if windll.kernel32.SetComputerNameExW(
win32con.ComputerNamePhysicalDnsHostname, name):
ret = {'Computer Name': {'Current': get_computer_name()}}
pending = get_pending_computer_name()
if pending not in (None, False):
ret['Computer Name']['Pending'] = pending
return ret
return False | Set the Windows computer name
Args:
name (str):
The new name to give the computer. Requires a reboot to take effect.
Returns:
dict:
Returns a dictionary containing the old and new names if successful.
``False`` if not.
CLI Example:
.. code-block:: bash
salt 'minion-id' system.set_computer_name 'DavesComputer' |
async def delete(
self, name: str, *, force: bool = False, noprune: bool = False
) -> List:
params = {"force": force, "noprune": noprune}
response = await self.docker._query_json(
"images/{name}".format(name=name), "DELETE", params=params
)
return response | Remove an image along with any untagged parent
images that were referenced by that image
Args:
name: name/id of the image to delete
force: remove the image even if it is being used
by stopped containers or has other tags
noprune: don't delete untagged parent images
Returns:
List of deleted images |
def rename_db_ref(stmts_in, ns_from, ns_to, **kwargs):
logger.info('Remapping "%s" to "%s" in db_refs on %d statements...' %
(ns_from, ns_to, len(stmts_in)))
stmts_out = [deepcopy(st) for st in stmts_in]
for stmt in stmts_out:
for agent in stmt.agent_list():
if agent is not None and ns_from in agent.db_refs:
agent.db_refs[ns_to] = agent.db_refs.pop(ns_from)
dump_pkl = kwargs.get('save')
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Rename an entry in the db_refs of each Agent.
This is particularly useful when old Statements in pickle files
need to be updated after a namespace was changed such as
'BE' to 'FPLX'.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements whose Agents' db_refs need to be changed
ns_from : str
The namespace identifier to replace
ns_to : str
The namespace identifier to replace to
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of Statements with Agents' db_refs changed. |
def page_should_contain_element(self, locator, loglevel='INFO'):
if not self._is_element_present(locator):
self.log_source(loglevel)
raise AssertionError("Page should have contained element '%s' "
"but did not" % locator)
self._info("Current page contains element '%s'." % locator) | Verifies that current page contains `locator` element.
If this keyword fails, it automatically logs the page source
using the log level specified with the optional `loglevel` argument.
Giving `NONE` as level disables logging. |
def transform_using_this_method(original_sample):
new_sample = original_sample.copy()
new_data = new_sample.data
new_data['Y2-A'] = log(new_data['Y2-A'])
new_data = new_data.dropna()
new_sample.data = new_data
return new_sample | This function implements a log transformation on the data. |
def snapshot_agents(self, force=False):
for agent in self._agents:
agent.check_if_should_snapshot(force) | snapshot agents if number of entries from last snapshot if greater
than 1000. Use force=True to override. |
def react_to_event(self, event):
if not react_to_event(self.view, self.view.editor, event):
return False
if not rafcon.gui.singleton.state_machine_manager_model.selected_state_machine_id == \
self.model.state_machine.state_machine_id:
return False
return True | Check whether the given event should be handled
Checks, whether the editor widget has the focus and whether the selected state machine corresponds to the
state machine of this editor.
:param event: GTK event object
:return: True if the event should be handled, else False
:rtype: bool |
def _pidExists(pid):
assert pid > 0
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
else:
raise
else:
return True | This will return True if the process associated with pid is still running on the machine.
This is based on stackoverflow question 568271.
:param int pid: ID of the process to check for
:return: True/False
:rtype: bool |
def get_user_subadmin_groups(self, user_name):
res = self._make_ocs_request(
'GET',
self.OCS_SERVICE_CLOUD,
'users/' + user_name + '/subadmins',
)
if res.status_code == 200:
tree = ET.fromstring(res.content)
self._check_ocs_status(tree, [100])
groups = tree.find('data')
return groups
raise HTTPResponseError(res) | Get a list of subadmin groups associated to a user.
:param user_name: name of user
:returns: list of subadmin groups
:raises: HTTPResponseError in case an HTTP error status was returned |
def set(self, varname, value, idx=0, units=None):
if not varname in self.mapping.vars:
raise fgFDMError('Unknown variable %s' % varname)
if idx >= self.mapping.vars[varname].arraylength:
raise fgFDMError('index of %s beyond end of array idx=%u arraylength=%u' % (
varname, idx, self.mapping.vars[varname].arraylength))
if units:
value = self.convert(value, units, self.mapping.vars[varname].units)
if math.isinf(value) or math.isnan(value) or math.fabs(value) > 3.4e38:
value = 0
self.values[self.mapping.vars[varname].index + idx] = value | set a variable value |
def scan(self, t, dt=None, aggfunc=None):
idx = (np.abs(self.index - t)).argmin()
if dt is None:
mz_abn = self.values[idx, :].copy()
else:
en_idx = (np.abs(self.index - t - dt)).argmin()
idx, en_idx = min(idx, en_idx), max(idx, en_idx)
if aggfunc is None:
mz_abn = self.values[idx:en_idx + 1, :].copy().sum(axis=0)
else:
mz_abn = aggfunc(self.values[idx:en_idx + 1, :].copy())
if isinstance(mz_abn, scipy.sparse.spmatrix):
mz_abn = mz_abn.toarray()[0]
return Scan(self.columns, mz_abn) | Returns the spectrum from a specific time.
Parameters
----------
t : float
dt : float |
def scrape(url, params=None, user_agent=None):
headers = {}
if user_agent:
headers['User-Agent'] = user_agent
data = params and six.moves.urllib.parse.urlencode(params) or None
req = six.moves.urllib.request.Request(url, data=data, headers=headers)
f = six.moves.urllib.request.urlopen(req)
text = f.read()
f.close()
return text | Scrape a URL optionally with parameters.
This is effectively a wrapper around urllib2.urlopen. |
async def workerType(self, *args, **kwargs):
return await self._makeApiCall(self.funcinfo["workerType"], *args, **kwargs) | Get Worker Type
Retrieve a copy of the requested worker type definition.
This copy contains a lastModified field as well as the worker
type name. As such, it will require manipulation to be able to
use the results of this method to submit date to the update
method.
This method gives output: ``http://schemas.taskcluster.net/aws-provisioner/v1/get-worker-type-response.json#``
This method is ``stable`` |
def register(self, name, asymmetric=False):
def register_func(func):
if asymmetric:
self._asymmetric.append(name)
self.store[name] = func
return func
return register_func | Decorator for registering a measure with PyPhi.
Args:
name (string): The name of the measure.
Keyword Args:
asymmetric (boolean): ``True`` if the measure is asymmetric. |
def stop(self):
res = self.send_request('manager/stop', post=True)
if res.status_code != 200:
raise UnexpectedResponse(
'Attempted to stop manager. {res_code}: {res_text}'.format(
res_code=res.status_code,
res_text=res.text,
)
)
if settings.VERBOSITY >= verbosity.PROCESS_STOP:
print('Stopped {}'.format(self.get_name()))
time.sleep(0.05) | If the manager is running, tell it to stop its process |
def _extractErrorString(request):
errorStr = ""
tag = None
try:
root = ET.fromstring(request.text.encode('utf-8'))
tag = root[0][0]
except:
return errorStr
for element in tag.getiterator():
tagName = element.tag.lower()
if tagName.endswith("string"):
errorStr += element.text + " "
elif tagName.endswith("description"):
errorStr += element.text + " "
return errorStr | Extract error string from a failed UPnP call.
:param request: the failed request result
:type request: requests.Response
:return: an extracted error text or empty str
:rtype: str |
def getEmailTemplate(request):
if request.method != 'POST':
return HttpResponse(_('Error, no POST data.'))
if not hasattr(request,'user'):
return HttpResponse(_('Error, not authenticated.'))
template_id = request.POST.get('template')
if not template_id:
return HttpResponse(_("Error, no template ID provided."))
try:
this_template = EmailTemplate.objects.get(id=template_id)
except ObjectDoesNotExist:
return HttpResponse(_("Error getting template."))
if this_template.groupRequired and this_template.groupRequired not in request.user.groups.all():
return HttpResponse(_("Error, no permission to access this template."))
if this_template.hideFromForm:
return HttpResponse(_("Error, no permission to access this template."))
return JsonResponse({
'subject': this_template.subject,
'content': this_template.content,
'html_content': this_template.html_content,
'richTextChoice': this_template.richTextChoice,
}) | This function handles the Ajax call made when a user wants a specific email template |
def update_text(self, mapping):
found = False
for node in self._page.iter("*"):
if node.text or node.tail:
for old, new in mapping.items():
if node.text and old in node.text:
node.text = node.text.replace(old, new)
found = True
if node.tail and old in node.tail:
node.tail = node.tail.replace(old, new)
found = True
if not found:
raise KeyError("Updating text failed with mapping:{}".format(mapping)) | Iterate over nodes, replace text with mapping |
def style(theme=None, context='paper', grid=True, gridlines=u'-', ticks=False, spines=True, fscale=1.2, figsize=(8., 7.)):
rcdict = set_context(context=context, fscale=fscale, figsize=figsize)
if theme is None:
theme = infer_theme()
set_style(rcdict, theme=theme, grid=grid, gridlines=gridlines, ticks=ticks, spines=spines) | main function for styling matplotlib according to theme
::Arguments::
theme (str): 'oceans16', 'grade3', 'chesterish', 'onedork', 'monokai', 'solarizedl', 'solarizedd'. If no theme name supplied the currently installed notebook theme will be used.
context (str): 'paper' (Default), 'notebook', 'talk', or 'poster'
grid (bool): removes axis grid lines if False
gridlines (str): set grid linestyle (e.g., '--' for dashed grid)
ticks (bool): make major x and y ticks visible
spines (bool): removes x (bottom) and y (left) axis spines if False
fscale (float): scale font size for axes labels, legend, etc.
figsize (tuple): default figure size of matplotlib figures |
def is_subdir(a, b):
a, b = map(os.path.abspath, [a, b])
return os.path.commonpath([a, b]) == b | Return true if a is a subdirectory of b |
def _state_invalid(self):
for statemanager, conditions in self.statetransition.transitions.items():
current_state = getattr(self.obj, statemanager.propname)
if conditions['from'] is None:
state_valid = True
else:
mstate = conditions['from'].get(current_state)
state_valid = mstate and mstate(self.obj)
if state_valid and conditions['if']:
state_valid = all(v(self.obj) for v in conditions['if'])
if not state_valid:
return statemanager, current_state, statemanager.lenum.get(current_state) | If the state is invalid for the transition, return details on what didn't match
:return: Tuple of (state manager, current state, label for current state) |
def getfieldindex(data, commdct, objkey, fname):
objindex = data.dtls.index(objkey)
objcomm = commdct[objindex]
for i_index, item in enumerate(objcomm):
try:
if item['field'] == [fname]:
break
except KeyError as err:
pass
return i_index | given objkey and fieldname, return its index |
def __parseParameters(self):
self.__parameters = []
for parameter in self.__data['parameters']:
self.__parameters.append(Parameter(parameter)) | Parses the parameters of data. |
def validate(self):
msg = list()
previously_seen = set()
currently_seen = set([1])
problemset = set()
while currently_seen:
node = currently_seen.pop()
if node in previously_seen:
problemset.add(node)
else:
previously_seen.add(node)
self.add_descendants(node, currently_seen)
unreachable = self.all_nodes - previously_seen
if unreachable:
msg.append("%d unreachable nodes: "%len(unreachable))
for node in unreachable:
msg.append(str(node))
if problemset:
msg.append("Loop involving %d nodes"%len(problemset))
for node in problemset:
msg.append(str(node))
if msg:
return msg
else:
return True | Simulataneously checks for loops and unreachable nodes |
def _cast_to_pod(val):
bools = {"True": True, "False": False}
if val in bools:
return bools[val]
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return tf.compat.as_text(val) | Try cast to int, float, bool, str, in that order. |
def add_model(self, model):
if model.identity not in self._models.keys():
self._models[model.identity] = model
else:
raise ValueError("{} has already been defined".format(model.identity)) | Add a model to the document |
def get_full_url(self, url):
request = Request('GET', url)
preparedrequest = self.session.prepare_request(request)
return preparedrequest.url | Get full url including any additional parameters
Args:
url (str): URL for which to get full url
Returns:
str: Full url including any additional parameters |
def os_info():
stype = ""
slack, ver = slack_ver()
mir = mirror()
if mir:
if "current" in mir:
stype = "Current"
else:
stype = "Stable"
info = (
"User: {0}\n"
"OS: {1}\n"
"Version: {2}\n"
"Type: {3}\n"
"Arch: {4}\n"
"Kernel: {5}\n"
"Packages: {6}".format(getpass.getuser(), slack, ver, stype,
os.uname()[4], os.uname()[2], ins_packages()))
return info | Get OS info |
def downsample_with_striding(array, factor):
return array[tuple(np.s_[::f] for f in factor)] | Downsample x by factor using striding.
@return: The downsampled array, of the same type as x. |
def search_text(self, text, encoding = "utf-16le",
caseSensitive = False,
minAddr = None,
maxAddr = None):
pattern = TextPattern(text, encoding, caseSensitive)
matches = Search.search_process(self, pattern, minAddr, maxAddr)
for addr, size, data in matches:
yield addr, data | Search for the given text within the process memory.
@type text: str or compat.unicode
@param text: Text to search for.
@type encoding: str
@param encoding: (Optional) Encoding for the text parameter.
Only used when the text to search for is a Unicode string.
Don't change unless you know what you're doing!
@type caseSensitive: bool
@param caseSensitive: C{True} of the search is case sensitive,
C{False} otherwise.
@type minAddr: int
@param minAddr: (Optional) Start the search at this memory address.
@type maxAddr: int
@param maxAddr: (Optional) Stop the search at this memory address.
@rtype: iterator of tuple( int, str )
@return: An iterator of tuples. Each tuple contains the following:
- The memory address where the pattern was found.
- The text that matches the pattern.
@raise WindowsError: An error occurred when querying or reading the
process memory. |
def join(self, *args):
call_args = list(args)
joiner = call_args.pop(0)
self.random.shuffle(call_args)
return joiner.join(call_args) | Returns the arguments in the list joined by STR.
FIRST,JOIN_BY,ARG_1,...,ARG_N
%{JOIN: ,A,...,F} -> 'A B C ... F' |
def is_processed(self, db_versions):
return self.number in (v.number for v in db_versions if v.date_done) | Check if version is already applied in the database.
:param db_versions: |
def load():
plugins = []
for filename in os.listdir(PLUGINS_PATH):
if not filename.endswith(".py") or filename.startswith("_"):
continue
if not os.path.isfile(os.path.join(PLUGINS_PATH, filename)):
continue
plugin = filename[:-3]
if plugin in FAILED_PLUGINS:
continue
try:
__import__(PLUGINS.__name__, {}, {}, [plugin])
plugins.append(plugin)
log.debug("Successfully imported {0} plugin".format(plugin))
except (ImportError, SyntaxError) as error:
message = "Failed to import {0} plugin ({1})".format(plugin, error)
if Config().sections(kind=plugin):
log.warn(message)
else:
log.debug(message)
FAILED_PLUGINS.append(plugin)
return plugins | Check available plugins and attempt to import them |
def get_prefix_envname(self, name, log=False):
prefix = None
if name == 'root':
prefix = self.ROOT_PREFIX
envs = self.get_envs()
for p in envs:
if basename(p) == name:
prefix = p
return prefix | Return full prefix path of environment defined by `name`. |
def check_config(config):
essential_keys = ['number_earthquakes']
for key in essential_keys:
if key not in config:
raise ValueError('For Kijko Nonparametric Gaussian the key %s '
'needs to be set in the configuation' % key)
if config.get('tolerance', 0.0) <= 0.0:
config['tolerance'] = 0.05
if config.get('maximum_iterations', 0) < 1:
config['maximum_iterations'] = 100
if config.get('number_samples', 0) < 2:
config['number_samples'] = 51
return config | Check config file inputs and overwrite bad values with the defaults |
def get(self, key):
upload = None
files_states = self.backend.get(key)
files = MultiValueDict()
if files_states:
for name, state in files_states.items():
f = BytesIO()
f.write(state['content'])
if state['size'] > settings.FILE_UPLOAD_MAX_MEMORY_SIZE:
upload = TemporaryUploadedFile(
state['name'],
state['content_type'],
state['size'],
state['charset'],
)
upload.file = f
else:
f = BytesIO()
f.write(state['content'])
upload = InMemoryUploadedFile(
file=f,
field_name=name,
name=state['name'],
content_type=state['content_type'],
size=state['size'],
charset=state['charset'],
)
files[name] = upload
upload.file.seek(0)
return files | Regenerates a MultiValueDict instance containing the files related to all file states
stored for the given key. |
def rename_tabs_after_change(self, given_name):
client = self.get_current_client()
repeated = False
for cl in self.get_clients():
if id(client) != id(cl) and given_name == cl.given_name:
repeated = True
break
if client.allow_rename and not u'/' in given_name and not repeated:
self.rename_client_tab(client, given_name)
else:
self.rename_client_tab(client, None)
if client.allow_rename and not u'/' in given_name and not repeated:
for cl in self.get_related_clients(client):
self.rename_client_tab(cl, given_name) | Rename tabs after a change in name. |
def longest_run(da, dim='time'):
d = rle(da, dim=dim)
rl_long = d.max(dim=dim)
return rl_long | Return the length of the longest consecutive run of True values.
Parameters
----------
arr : N-dimensional array (boolean)
Input array
dim : Xarray dimension (default = 'time')
Dimension along which to calculate consecutive run
Returns
-------
N-dimensional array (int)
Length of longest run of True values along dimension |
def get_piis(query_str):
dates = range(1960, datetime.datetime.now().year)
all_piis = flatten([get_piis_for_date(query_str, date) for date in dates])
return all_piis | Search ScienceDirect through the API for articles and return PIIs.
Note that ScienceDirect has a limitation in which a maximum of 6,000
PIIs can be retrieved for a given search and therefore this call is
internally broken up into multiple queries by a range of years and the
results are combined.
Parameters
----------
query_str : str
The query string to search with
Returns
-------
piis : list[str]
The list of PIIs identifying the papers returned by the search |
def _get_string_match_value(self, string, string_match_type):
if string_match_type == Type(**get_type_data('EXACT')):
return string
elif string_match_type == Type(**get_type_data('IGNORECASE')):
return re.compile('^' + string, re.I)
elif string_match_type == Type(**get_type_data('WORD')):
return re.compile('.*' + string + '.*')
elif string_match_type == Type(**get_type_data('WORDIGNORECASE')):
return re.compile('.*' + string + '.*', re.I) | Gets the match value |
def validate_json(f):
@wraps(f)
def wrapper(*args, **kw):
instance = args[0]
try:
if request.get_json() is None:
ret_dict = instance._create_ret_object(instance.FAILURE,
None, True,
instance.MUST_JSON)
instance.logger.error(instance.MUST_JSON)
return jsonify(ret_dict), 400
except BadRequest:
ret_dict = instance._create_ret_object(instance.FAILURE, None,
True,
instance.MUST_JSON)
instance.logger.error(instance.MUST_JSON)
return jsonify(ret_dict), 400
instance.logger.debug("JSON is valid")
return f(*args, **kw)
return wrapper | Validate that the call is JSON. |
def _parse_ipmi_nic_capacity(nic_out):
if (("Device not present" in nic_out)
or ("Unknown FRU header" in nic_out) or not nic_out):
return None
capacity = None
product_name = None
data = nic_out.split('\n')
for item in data:
fields = item.split(':')
if len(fields) > 1:
first_field = fields[0].strip()
if first_field == "Product Name":
product_name = ':'.join(fields[1:])
break
if product_name:
product_name_array = product_name.split(' ')
for item in product_name_array:
if 'Gb' in item:
capacity_int = item.strip('Gb')
if capacity_int.isdigit():
capacity = item
return capacity | Parse the FRU output for NIC capacity
Parses the FRU output. Seraches for the key "Product Name"
in FRU output and greps for maximum speed supported by the
NIC adapter.
:param nic_out: the FRU output for NIC adapter.
:returns: the max capacity supported by the NIC adapter. |
def make_hash_id():
today = datetime.datetime.now().strftime(DATETIME_FORMAT)
return hashlib.sha1(today.encode('utf-8')).hexdigest() | Compute the `datetime.now` based SHA-1 hash of a string.
:return: Returns the sha1 hash as a string.
:rtype: str |
def replace_model(refwcs, newcoeffs):
print('WARNING:')
print(' Replacing existing distortion model with one')
print(' not necessarily matched to the observation!')
wcslin = stwcs.distortion.utils.undistortWCS(refwcs)
outwcs = refwcs.deepcopy()
outwcs.wcs.cd = wcslin.wcs.cd
outwcs.wcs.set()
outwcs.setOrient()
outwcs.setPscale()
add_model(outwcs,newcoeffs)
apply_model(outwcs)
refwcs = outwcs.deepcopy() | Replace the distortion model in a current WCS with a new model
Start by creating linear WCS, then run |
def disablingBuidCache(self):
self.buidcache = s_cache.LruDict(0)
yield
self.buidcache = s_cache.LruDict(BUID_CACHE_SIZE) | Disable and invalidate the layer buid cache for migration |
def _fold_line(self, line):
if len(line) <= self._cols:
self._output_file.write(line)
self._output_file.write(self._line_sep)
else:
pos = self._cols
self._output_file.write(line[0:self._cols])
self._output_file.write(self._line_sep)
while pos < len(line):
self._output_file.write(b' ')
end = min(len(line), pos + self._cols - 1)
self._output_file.write(line[pos:end])
self._output_file.write(self._line_sep)
pos = end | Write string line as one or more folded lines. |
def event_return(events):
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
for event in events:
tag = event.get('tag', '')
data = event.get('data', '')
sql =
cur.execute(sql, (tag, salt.utils.json.dumps(data), __opts__['id']))
_close_conn(conn) | Return event to a postgres server
Require that configuration be enabled via 'event_return'
option in master config. |
def get_snippet_content(snippet_name, **format_kwargs):
filename = snippet_name + '.snippet'
snippet_file = os.path.join(SNIPPETS_ROOT, filename)
if not os.path.isfile(snippet_file):
raise ValueError('could not find snippet with name ' + filename)
ret = helpers.get_file_content(snippet_file)
if format_kwargs:
ret = ret.format(**format_kwargs)
return ret | Load the content from a snippet file which exists in SNIPPETS_ROOT |
def from_array(array):
if array is None or not array:
return None
assert_type_or_raise(array, dict, parameter_name="array")
from ..receivable.media import Location
from ..receivable.peer import User
data = {}
data['result_id'] = u(array.get('result_id'))
data['from_peer'] = User.from_array(array.get('from'))
data['query'] = u(array.get('query'))
data['location'] = Location.from_array(array.get('location')) if array.get('location') is not None else None
data['inline_message_id'] = u(array.get('inline_message_id')) if array.get('inline_message_id') is not None else None
data['_raw'] = array
return ChosenInlineResult(**data) | Deserialize a new ChosenInlineResult from a given dictionary.
:return: new ChosenInlineResult instance.
:rtype: ChosenInlineResult |
def get_files(dirname=None, pattern='*.*', recursive=True):
if dirname is None:
from FlowCytometryTools.gui import dialogs
dirname = dialogs.select_directory_dialog('Select a directory')
if recursive:
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
else:
matches = glob.glob(os.path.join(dirname, pattern))
return matches | Get all file names within a given directory those names match a
given pattern.
Parameters
----------
dirname : str | None
Directory containing the datafiles.
If None is given, open a dialog box.
pattern : str
Return only files whose names match the specified pattern.
recursive : bool
True : Search recursively within all sub-directories.
False : Search only in given directory.
Returns
-------
matches: list
List of file names (including full path). |
def printAllElements(self):
cnt = 1
print("{id:<3s}: {name:<12s} {type:<10s} {classname:<10s}"
.format(id='ID', name='Name', type='Type', classname='Class Name'))
for e in self._lattice_eleobjlist:
print("{cnt:>03d}: {name:<12s} {type:<10s} {classname:<10s}"
.format(cnt=cnt, name=e.name, type=e.typename, classname=e.__class__.__name__))
cnt += 1 | print out all modeled elements |
def find_nearest(self, hex_code, system, filter_set=None):
if system not in self._colors_by_system_hex:
raise ValueError(
"%r is not a registered color system. Try one of %r"
% (system, self._colors_by_system_hex.keys())
)
hex_code = hex_code.lower().strip()
if hex_code in self._colors_by_system_hex[system]:
color_name = self._colors_by_system_hex[system][hex_code]
if filter_set is None or color_name in filter_set:
return FindResult(color_name, 0)
colors = self._colors_by_system_lab[system]
if filter_set is not None:
colors = (pair for pair in colors if pair[1] in set(filter_set))
lab_color = _hex_to_lab(hex_code)
min_distance = sys.float_info.max
min_color_name = None
for current_lab_color, current_color_name in colors:
distance = colormath.color_diff.delta_e_cie2000(lab_color, current_lab_color)
if distance < min_distance:
min_distance = distance
min_color_name = current_color_name
return FindResult(min_color_name, min_distance) | Find a color name that's most similar to a given sRGB hex code.
In normalization terms, this method implements "normalize an arbitrary sRGB value
to a well-defined color name".
Args:
system (string): The color system. Currently, `"en"`` is the only default
system.
filter_set (iterable of string, optional): Limits the output choices
to fewer color names. The names (e.g. ``["black", "white"]``) must be
present in the given system.
If omitted, all color names of the system are considered. Defaults to None.
Returns:
A named tuple with the members `color_name` and `distance`.
Raises:
ValueError: If argument `system` is not a registered color system.
Examples:
>>> tint_registry = TintRegistry()
>>> tint_registry.find_nearest("54e6e4", system="en")
FindResult(color_name=u'bright turquoise', distance=3.730288645055483)
>>> tint_registry.find_nearest("54e6e4", "en", filter_set=("white", "black"))
FindResult(color_name=u'white', distance=25.709952192116894) |
def filter(self, filter_fn=None, desc=None, **kwargs):
if filter_fn is not None and kwargs:
raise TypeError('Must supply either a filter_fn or attribute filter parameters to filter(), but not both.')
if filter_fn is None and not kwargs:
raise TypeError('Must supply one of filter_fn or one or more attribute filter parameters to filter().')
if desc is None:
if filter_fn is not None:
desc = getattr(filter_fn, '__name__', '')
elif kwargs:
desc = u", ".join([u"{}={!r}".format(key, value) for key, value in kwargs.items()])
desc = u"filter({})".format(desc)
if kwargs:
def filter_fn(elem):
return all(
getattr(elem, filter_key) == filter_value
for filter_key, filter_value
in kwargs.items()
)
return self.transform(lambda xs: (x for x in xs if filter_fn(x)), desc=desc) | Return a copy of this query, with some values removed.
Example usages:
.. code:: python
# Returns a query that matches even numbers
q.filter(filter_fn=lambda x: x % 2)
# Returns a query that matches elements with el.description == "foo"
q.filter(description="foo")
Keyword Args:
filter_fn (callable): If specified, a function that accepts one argument (the element)
and returns a boolean indicating whether to include that element in the results.
kwargs: Specify attribute values that an element must have to be included in the results.
desc (str): A description of the filter, for use in log messages.
Defaults to the name of the filter function or attribute.
Raises:
TypeError: neither or both of `filter_fn` and `kwargs` are provided. |
def _update_transforms(self):
if len(self._fb_stack) == 0:
fb_size = fb_rect = None
else:
fb, origin, fb_size = self._fb_stack[-1]
fb_rect = origin + fb_size
if len(self._vp_stack) == 0:
viewport = None
else:
viewport = self._vp_stack[-1]
self.transforms.configure(viewport=viewport, fbo_size=fb_size,
fbo_rect=fb_rect) | Update the canvas's TransformSystem to correct for the current
canvas size, framebuffer, and viewport. |
def _get_v4_signed_headers(self):
if self.aws_session is None:
boto_session = session.Session()
creds = boto_session.get_credentials()
else:
creds = self.aws_session.get_credentials()
if creds is None:
raise CerberusClientException("Unable to locate AWS credentials")
readonly_credentials = creds.get_frozen_credentials()
data = OrderedDict((('Action','GetCallerIdentity'), ('Version', '2011-06-15')))
url = 'https://sts.{}.amazonaws.com/'.format(self.region)
request_object = awsrequest.AWSRequest(method='POST', url=url, data=data)
signer = auth.SigV4Auth(readonly_credentials, 'sts', self.region)
signer.add_auth(request_object)
return request_object.headers | Returns V4 signed get-caller-identity request headers |
def quoteattrs(data):
items = []
for key, value in data.items():
items.append('{}={}'.format(key, quoteattr(value)))
return ' '.join(items) | Takes dict of attributes and returns their HTML representation |
def send(self, filenames=None):
try:
with self.ssh_client.connect() as ssh_conn:
with self.sftp_client.connect(ssh_conn) as sftp_conn:
for filename in filenames:
sftp_conn.copy(filename=filename)
self.archive(filename=filename)
if self.update_history_model:
self.update_history(filename=filename)
except SSHClientError as e:
raise TransactionFileSenderError(e) from e
except SFTPClientError as e:
raise TransactionFileSenderError(e) from e
return filenames | Sends the file to the remote host and archives
the sent file locally. |
def edit(self, pk):
resp = super(TableModelView, self).edit(pk)
if isinstance(resp, str):
return resp
return redirect('/superset/explore/table/{}/'.format(pk)) | Simple hack to redirect to explore view after saving |
def display_upstream_structure(structure_dict):
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt) | Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`. |
def get_ticket(self, ticket_id):
url = 'tickets/%d' % ticket_id
ticket = self._api._get(url)
return Ticket(**ticket) | Fetches the ticket for the given ticket ID |
def schedule(identifier, **kwargs):
source = actions.schedule(identifier, **kwargs)
msg = 'Scheduled {source.name} with the following crontab: {cron}'
log.info(msg.format(source=source, cron=source.periodic_task.crontab)) | Schedule a harvest job to run periodically |
def get_word_under_cursor(self):
if not re.match(r"^\w+$", foundations.strings.to_string(self.get_previous_character())):
return QString()
cursor = self.textCursor()
cursor.movePosition(QTextCursor.PreviousWord, QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.EndOfWord, QTextCursor.KeepAnchor)
return cursor.selectedText() | Returns the document word under cursor.
:return: Word under cursor.
:rtype: QString |
def instances(cls):
l = [i for i in cls.allinstances() if isinstance(i, cls)]
return l | Return all instances of this class and subclasses
:returns: all instances of the current class and subclasses
:rtype: list
:raises: None |
def get_dock_json(self):
env_json = self.build_json['spec']['strategy']['customStrategy']['env']
try:
p = [env for env in env_json if env["name"] == "ATOMIC_REACTOR_PLUGINS"]
except TypeError:
raise RuntimeError("\"env\" is not iterable")
if len(p) <= 0:
raise RuntimeError("\"env\" misses key ATOMIC_REACTOR_PLUGINS")
dock_json_str = p[0]['value']
dock_json = json.loads(dock_json_str)
return dock_json | return dock json from existing build json |
def run_migration_list(self, path, migrations, pretend=False):
if not migrations:
self._note("<info>Nothing to migrate</info>")
return
batch = self._repository.get_next_batch_number()
for f in migrations:
self._run_up(path, f, batch, pretend) | Run a list of migrations.
:type migrations: list
:type pretend: bool |
def lookupByName(self, name):
res = yield queue.executeInThread(self.connection.lookupByName, name)
return self.DomainClass(self, res) | I lookup an existing predefined domain |
def clipandmerge_general_stats_table(self):
headers = OrderedDict()
headers['percentage'] = {
'title': '% Merged',
'description': 'Percentage of reads merged',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'Greens',
'format': '{:,.2f}',
}
self.general_stats_addcols(self.clipandmerge_data, headers) | Take the parsed stats from the ClipAndMerge report and add it to the
basic stats table at the top of the report |
def keep_everything_scorer(checked_ids):
result = checked_ids.keys()
for i in checked_ids.values():
result.extend(i.keys())
return dict.fromkeys(result).keys() | Returns every query and every match in checked_ids, with best score. |
def trailing_stop_loss(self, accountID, **kwargs):
return self.create(
accountID,
order=TrailingStopLossOrderRequest(**kwargs)
) | Shortcut to create a Trailing Stop Loss Order in an Account
Args:
accountID : The ID of the Account
kwargs : The arguments to create a TrailingStopLossOrderRequest
Returns:
v20.response.Response containing the results from submitting
the request |
def mouse_button_callback(self, window, button, action, mods):
button += 1
if button not in [1, 2]:
return
xpos, ypos = glfw.get_cursor_pos(self.window)
if action == glfw.PRESS:
self.example.mouse_press_event(xpos, ypos, button)
else:
self.example.mouse_release_event(xpos, ypos, button) | Handle mouse button events and forward them to the example |
def readPhenoFile(pfile,idx=None):
usecols = None
if idx!=None:
usecols = [int(x) for x in idx.split(',')]
phenoFile = pfile+'.phe'
assert os.path.exists(phenoFile), '%s is missing.'%phenoFile
Y = SP.loadtxt(phenoFile,usecols=usecols)
if (usecols is not None) and (len(usecols)==1): Y = Y[:,SP.newaxis]
Y -= Y.mean(0); Y /= Y.std(0)
return Y | reading in phenotype file
pfile root of the file containing the phenotypes as NxP matrix
(N=number of samples, P=number of traits) |
def static_filename(self, repo: str, branch: str, relative_path: Union[str, Path], *,
depth: DepthDefinitionType=1,
reference: ReferenceDefinitionType=None
) -> Path:
self.validate_repo_url(repo)
depth = self.validate_depth(depth)
reference = self.validate_reference(reference)
if not isinstance(relative_path, Path):
relative_path = Path(relative_path)
_, repo_path = self.get_files(repo, branch, depth=depth, reference=reference)
result = repo_path / relative_path
result = result.resolve()
if repo_path not in result.parents:
raise FileOutOfRangeError(f"{relative_path} is not inside the repository.")
if not result.exists():
raise FileNotFoundError(f"{relative_path} does not exist in the repository.")
logger.info("Static path for %s is %s", relative_path, result)
return result | Returns an absolute path to where a file from the repo was cloned to.
:param repo: Repo URL
:param branch: Branch name
:param relative_path: Relative path to the requested file
:param depth: See :meth:`run`
:param reference: See :meth:`run`
:return: Absolute path to the file in the target repository
:raise FileOutOfRangeError: If the relative path leads out of the repository path
:raise FileNotFoundError: If the file doesn't exist in the repository. |
def recent_update_frequencies(self):
return list(reversed([(1.0 / p) for p in numpy.diff(self._recent_updates)])) | Returns the 10 most recent update frequencies.
The given frequencies are computed as short-term frequencies!
The 0th element of the list corresponds to the most recent frequency. |
def _from_dict(cls, _dict):
args = {}
if 'section_titles' in _dict:
args['section_titles'] = [
SectionTitles._from_dict(x)
for x in (_dict.get('section_titles'))
]
if 'leading_sentences' in _dict:
args['leading_sentences'] = [
LeadingSentence._from_dict(x)
for x in (_dict.get('leading_sentences'))
]
return cls(**args) | Initialize a DocStructure object from a json dictionary. |
def tasks(self):
tasks_response = self.get_request('tasks/')
return [Task(self, tjson['task']) for tjson in tasks_response] | Generates a list of all Tasks. |
def ensure_default_namespace(self) -> Namespace:
namespace = self.get_namespace_by_keyword_version(BEL_DEFAULT_NAMESPACE, BEL_DEFAULT_NAMESPACE_VERSION)
if namespace is None:
namespace = Namespace(
name='BEL Default Namespace',
contact='charles.hoyt@scai.fraunhofer.de',
keyword=BEL_DEFAULT_NAMESPACE,
version=BEL_DEFAULT_NAMESPACE_VERSION,
url=BEL_DEFAULT_NAMESPACE_URL,
)
for name in set(chain(pmod_mappings, gmod_mappings, activity_mapping, compartment_mapping)):
entry = NamespaceEntry(name=name, namespace=namespace)
self.session.add(entry)
self.session.add(namespace)
self.session.commit()
return namespace | Get or create the BEL default namespace. |
def address(self) -> str:
fmt = self._data['address_fmt']
st_num = self.street_number()
st_name = self.street_name()
if self.locale in SHORTENED_ADDRESS_FMT:
return fmt.format(
st_num=st_num,
st_name=st_name,
)
if self.locale == 'ja':
return fmt.format(
self.random.choice(self._data['city']),
*self.random.randints(amount=3, a=1, b=100),
)
return fmt.format(
st_num=st_num,
st_name=st_name,
st_sfx=self.street_suffix(),
) | Generate a random full address.
:return: Full address. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.