code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def get_command(self, command_input, docker_object=None, buffer=None, size=None):
logger.debug("get command for command input %r", command_input)
if not command_input:
return
if command_input[0] in ["/"]:
command_name = command_input[0]
unparsed_command_args = shlex.split(command_input[1:])
else:
command_input_list = shlex.split(command_input)
command_name = command_input_list[0]
unparsed_command_args = command_input_list[1:]
try:
CommandClass = commands_mapping[command_name]
except KeyError:
logger.info("no such command: %r", command_name)
raise NoSuchCommand("There is no such command: %s" % command_name)
else:
cmd = CommandClass(ui=self.ui, docker_backend=self.docker_backend,
docker_object=docker_object, buffer=buffer, size=size)
cmd.process_args(unparsed_command_args)
return cmd | return command instance which is the actual command to be executed
:param command_input: str, command name and its args: "command arg arg2=val opt"
:param docker_object:
:param buffer:
:param size: tuple, so we can call urwid.keypress(size, ...)
:return: instance of Command |
def p2x(self, p):
if hasattr(p, 'keys'):
dp = BufferDict(p, keys=self.g.keys())._buf[:self.meanflat.size] - self.meanflat
else:
dp = numpy.asarray(p).reshape(-1) - self.meanflat
return self.vec_isig.dot(dp) | Map parameters ``p`` to vector in x-space.
x-space is a vector space of dimension ``p.size``. Its axes are
in the directions specified by the eigenvectors of ``p``'s covariance
matrix, and distance along an axis is in units of the standard
deviation in that direction. |
def parse_multi_object_delete_response(data):
root = S3Element.fromstring('MultiObjectDeleteResult', data)
return [
MultiDeleteError(errtag.get_child_text('Key'),
errtag.get_child_text('Code'),
errtag.get_child_text('Message'))
for errtag in root.findall('Error')
] | Parser for Multi-Object Delete API response.
:param data: XML response body content from service.
:return: Returns list of error objects for each delete object that
had an error. |
def has_read_permission(self, request, path):
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user.is_staff:
return True
else:
return False | Just return True if the user is an authenticated staff member.
Extensions could base the permissions on the path too. |
def tofile(self, filepath=None):
if filepath is None:
with NamedTemporaryFile(prefix='%s_' % self.alias, suffix='.ini', delete=False) as f:
filepath = f.name
else:
filepath = os.path.abspath(filepath)
if os.path.isdir(filepath):
filepath = os.path.join(filepath, '%s.ini' % self.alias)
with open(filepath, 'w') as target_file:
target_file.write(self.format())
target_file.flush()
return filepath | Saves configuration into a file and returns its path.
Convenience method.
:param str|unicode filepath: Filepath to save configuration into.
If not provided a temporary file will be automatically generated.
:rtype: str|unicode |
def _api_path(self, item):
if self.base_url is None:
raise NotImplementedError("base_url not set")
path = "/".join([x.blob["id"] for x in item.path])
return "/".join([self.base_url, path]) | Get the API path for the current cursor position. |
def xml_to_str(tree, encoding=None, xml_declaration=False):
if xml_declaration and not encoding:
raise ValueError("'xml_declaration' is not supported when 'encoding' is None")
if encoding:
return tostring(tree, encoding=encoding, xml_declaration=True)
return tostring(tree, encoding=text_type, xml_declaration=False) | Serialize an XML tree. Returns unicode if 'encoding' is None. Otherwise, we return encoded 'bytes'. |
def is_little_endian(array):
if numpy.little_endian:
machine_little = True
else:
machine_little = False
byteorder = array.dtype.base.byteorder
return (byteorder == '<') or (machine_little and byteorder == '=') | Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields. |
def versioned_bucket_lister(bucket, prefix='', delimiter='',
key_marker='', version_id_marker='', headers=None):
more_results = True
k = None
while more_results:
rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
version_id_marker=version_id_marker,
delimiter=delimiter, headers=headers,
max_keys=999)
for k in rs:
yield k
key_marker = rs.next_key_marker
version_id_marker = rs.next_version_id_marker
more_results= rs.is_truncated | A generator function for listing versions in a bucket. |
def call_cur(f):
"decorator for opening a connection and passing a cursor to the function"
@functools.wraps(f)
def f2(self, *args, **kwargs):
with self.withcur() as cur:
return f(self, cur, *args, **kwargs)
return f2 | decorator for opening a connection and passing a cursor to the function |
def selectrangeopenleft(table, field, minv, maxv, complement=False):
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv <= v < maxv,
complement=complement) | Select rows where the given field is greater than or equal to `minv` and
less than `maxv`. |
def __notify(self, sender, content):
if self.handle_message is not None:
try:
self.handle_message(sender, content)
except Exception as ex:
logging.exception("Error calling message listener: %s", ex) | Calls back listener when a message is received |
def stub_batch(cls, size, **kwargs):
return [cls.stub(**kwargs) for _ in range(size)] | Stub a batch of instances of the given class, with overriden attrs.
Args:
size (int): the number of instances to stub
Returns:
object list: the stubbed instances |
def _decrypt_object(obj, translate_newlines=False):
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines)
if isinstance(obj, six.string_types):
try:
return _decrypt_ciphertext(obj,
translate_newlines=translate_newlines)
except (fernet.InvalidToken, TypeError):
return obj
elif isinstance(obj, dict):
for key, value in six.iteritems(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
else:
return obj | Recursively try to decrypt any object.
Recur on objects that are not strings.
Decrypt strings that are valid Fernet tokens.
Return the rest unchanged. |
def generate_host_passthrough(self, vcpu_num):
cpu = ET.Element('cpu', mode='host-passthrough')
cpu.append(self.generate_topology(vcpu_num))
if vcpu_num > 1:
cpu.append(self.generate_numa(vcpu_num))
return cpu | Generate host-passthrough XML cpu node
Args:
vcpu_num(str): number of virtual CPUs
Returns:
lxml.etree.Element: CPU XML node |
def database_to_excel(engine, excel_file_path):
from sqlalchemy import MetaData, select
metadata = MetaData()
metadata.reflect(engine)
writer = pd.ExcelWriter(excel_file_path)
for table in metadata.tables.values():
sql = select([table])
df = pd.read_sql(sql, engine)
df.to_excel(writer, table.name, index=False)
writer.save() | Export database to excel.
:param engine:
:param excel_file_path: |
def _set_WorkingDir(self, path):
self._curr_working_dir = path
try:
mkdir(self.WorkingDir)
except OSError:
pass | Sets the working directory |
def _get_nonce(self, url):
action = LOG_JWS_GET_NONCE()
if len(self._nonces) > 0:
with action:
nonce = self._nonces.pop()
action.add_success_fields(nonce=nonce)
return succeed(nonce)
else:
with action.context():
return (
DeferredContext(self.head(url))
.addCallback(self._add_nonce)
.addCallback(lambda _: self._nonces.pop())
.addCallback(tap(
lambda nonce: action.add_success_fields(nonce=nonce)))
.addActionFinish()) | Get a nonce to use in a request, removing it from the nonces on hand. |
def validate_auth_mechanism_properties(option, value):
value = validate_string(option, value)
props = {}
for opt in value.split(','):
try:
key, val = opt.split(':')
except ValueError:
raise ValueError("auth mechanism properties must be "
"key:value pairs like SERVICE_NAME:"
"mongodb, not %s." % (opt,))
if key not in _MECHANISM_PROPS:
raise ValueError("%s is not a supported auth "
"mechanism property. Must be one of "
"%s." % (key, tuple(_MECHANISM_PROPS)))
if key == 'CANONICALIZE_HOST_NAME':
props[key] = validate_boolean_or_string(key, val)
else:
props[key] = val
return props | Validate authMechanismProperties. |
def draw_scores(self):
x1, y1 = self.WIDTH - self.BORDER - 200 - 2 * self.BORDER, self.BORDER
width, height = 100, 60
self.screen.fill((255, 255, 255), (x1, 0, self.WIDTH - x1, height + y1))
self._draw_score_box(self.score_label, self.score, (x1, y1), (width, height))
x2 = x1 + width + self.BORDER
self._draw_score_box(self.best_label, self.manager.score, (x2, y1), (width, height))
return (x1, y1), (x2, y1), width, height | Draw the current and best score |
def render(opts, functions, states=None, proxy=None, context=None):
if context is None:
context = {}
pack = {'__salt__': functions,
'__grains__': opts.get('grains', {}),
'__context__': context}
if states:
pack['__states__'] = states
pack['__proxy__'] = proxy or {}
ret = LazyLoader(
_module_dirs(
opts,
'renderers',
'render',
ext_type_dirs='render_dirs',
),
opts,
tag='render',
pack=pack,
)
rend = FilterDictWrapper(ret, '.render')
if not check_render_pipe_str(opts['renderer'], rend, opts['renderer_blacklist'], opts['renderer_whitelist']):
err = ('The renderer {0} is unavailable, this error is often because '
'the needed software is unavailable'.format(opts['renderer']))
log.critical(err)
raise LoaderError(err)
return rend | Returns the render modules |
def get_dates_file(path):
with open(path) as f:
dates = f.readlines()
return [(convert_time_string(date_string.split(" ")[0]), float(date_string.split(" ")[1]))
for date_string in dates] | parse dates file of dates and probability of choosing |
def thread_setup(read_and_decode_fn, example_serialized, num_threads):
decoded_data = list()
for _ in range(num_threads):
decoded_data.append(read_and_decode_fn(example_serialized))
return decoded_data | Sets up the threads within each reader |
def _get_index(self, beacon_config, label):
indexes = [index for index, item in enumerate(beacon_config) if label in item]
if not indexes:
return -1
else:
return indexes[0] | Return the index of a labeled config item in the beacon config, -1 if the index is not found |
def edges(self, tail_head_iter):
edge = self._edge_plain
quote = self._quote_edge
lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)
self.body.extend(lines) | Create a bunch of edges.
Args:
tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs. |
def cmd_led(self, args):
if len(args) < 3:
print("Usage: led RED GREEN BLUE <RATE>")
return
pattern = [0] * 24
pattern[0] = int(args[0])
pattern[1] = int(args[1])
pattern[2] = int(args[2])
if len(args) == 4:
plen = 4
pattern[3] = int(args[3])
else:
plen = 3
self.master.mav.led_control_send(self.settings.target_system,
self.settings.target_component,
0, 0, plen, pattern) | send LED pattern as override |
def _wait_for_js(self):
if not hasattr(self, 'browser'):
return
if hasattr(self, '_js_vars') and self._js_vars:
EmptyPromise(
lambda: _are_js_vars_defined(self.browser, self._js_vars),
u"JavaScript variables defined: {0}".format(", ".join(self._js_vars))
).fulfill()
if hasattr(self, '_requirejs_deps') and self._requirejs_deps:
EmptyPromise(
lambda: _are_requirejs_deps_loaded(self.browser, self._requirejs_deps),
u"RequireJS dependencies loaded: {0}".format(", ".join(self._requirejs_deps)),
try_limit=5
).fulfill() | Class method added by the decorators to allow
decorated classes to manually re-check JavaScript
dependencies.
Expect that `self` is a class that:
1) Has been decorated with either `js_defined` or `requirejs`
2) Has a `browser` property
If either (1) or (2) is not satisfied, then do nothing. |
def iter_starred(self, sort=None, direction=None, number=-1, etag=None):
from .repos import Repository
params = {'sort': sort, 'direction': direction}
self._remove_none(params)
url = self.starred_urlt.expand(owner=None, repo=None)
return self._iter(int(number), url, Repository, params, etag) | Iterate over repositories starred by this user.
.. versionchanged:: 0.5
Added sort and direction parameters (optional) as per the change in
GitHub's API.
:param int number: (optional), number of starred repos to return.
Default: -1, returns all available repos
:param str sort: (optional), either 'created' (when the star was
created) or 'updated' (when the repository was last pushed to)
:param str direction: (optional), either 'asc' or 'desc'. Default:
'desc'
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`Repository <github3.repos.Repository>` |
def on_draw(self):
if self.program:
self.program.draw(self.gl_primitive_type)
else:
logger.debug("Skipping drawing visual `%s` because the program "
"has not been built yet.", self) | Draw the visual. |
def fit(self, features, classes):
classes = self.le.fit_transform(classes)
X = []
self.mu = []
self.Z = []
for i in np.unique(classes):
X.append(features[classes == i])
self.mu.append(np.mean(X[i],axis=0))
if self.d == 'mahalanobis':
self.Z.append(np.cov(X[i].transpose()))
return self | Constructs the DistanceClassifier from the provided training data
Parameters
----------
features: array-like {n_samples, n_features}
Feature matrix
classes: array-like {n_samples}
List of class labels for prediction
Returns
-------
None |
async def mark_fixed(self, *, comment: str = None):
params = {
"system_id": self.system_id
}
if comment:
params["comment"] = comment
self._data = await self._handler.mark_fixed(**params)
return self | Mark fixes.
:param comment: Reason machine is fixed.
:type comment: `str` |
def parents(self) -> List[str]:
parents = []
for p in self._c_object.parents:
parents.append(p.hexsha)
return parents | Return the list of parents SHAs.
:return: List[str] parents |
def genhash(self, package, code):
return hex(checksum(
hash_sep.join(
str(item) for item in (VERSION_STR,)
+ self.__reduce__()[1]
+ (package, code)
).encode(default_encoding),
)) | Generate a hash from code. |
def total(self):
url = "/stats/total"
result = self._get(url)
return StatModel.parse(result) | Get a list of counts for all of Unsplash
:return [Stat]: The Unsplash Stat. |
def _get_files(self) -> Iterator[str]:
path = os.path.abspath(self.path)
if os.path.isfile(path):
path = os.path.dirname(path)
for path in self._get_parents(path):
for file_path in self._get_files_from_dir(path):
yield file_path | Return paths to all requirements files |
def get_users():
try:
recs = psutil.users()
return [dict(x._asdict()) for x in recs]
except AttributeError:
try:
import utmp
result = []
while True:
rec = utmp.utmpaccess.getutent()
if rec is None:
return result
elif rec[0] == 7:
started = rec[8]
if isinstance(started, tuple):
started = started[0]
result.append({'name': rec[4], 'terminal': rec[2],
'started': started, 'host': rec[5]})
except ImportError:
return False | Return logged-in users.
CLI Example:
.. code-block:: bash
salt '*' ps.get_users |
def suspend(self):
status = yield from self.get_status()
if status == "running":
yield from self._hypervisor.send('vm suspend "{name}"'.format(name=self._name))
self.status = "suspended"
log.info('Router "{name}" [{id}] has been suspended'.format(name=self._name, id=self._id)) | Suspends this router. |
def render_linked_js(self, js_files: Iterable[str]) -> str:
paths = []
unique_paths = set()
for path in js_files:
if not is_absolute(path):
path = self.static_url(path)
if path not in unique_paths:
paths.append(path)
unique_paths.add(path)
return "".join(
'<script src="'
+ escape.xhtml_escape(p)
+ '" type="text/javascript"></script>'
for p in paths
) | Default method used to render the final js links for the
rendered webpage.
Override this method in a sub-classed controller to change the output. |
def tables_list(self, dataset_name, max_results=0, page_token=None):
url = Api._ENDPOINT +\
(Api._TABLES_PATH % (dataset_name.project_id, dataset_name.dataset_id, '', ''))
args = {}
if max_results != 0:
args['maxResults'] = max_results
if page_token is not None:
args['pageToken'] = page_token
return google.datalab.utils.Http.request(url, args=args, credentials=self.credentials) | Issues a request to retrieve a list of tables.
Args:
dataset_name: the name of the dataset to enumerate.
max_results: an optional maximum number of tables to retrieve.
page_token: an optional token to continue the retrieval.
Returns:
A parsed result object.
Raises:
Exception if there is an error performing the operation. |
async def _sasl_abort(self, timeout=False):
if timeout:
self.logger.error('SASL authentication timed out: aborting.')
else:
self.logger.error('SASL authentication aborted.')
if self._sasl_timer:
self._sasl_timer.cancel()
self._sasl_timer = None
await self.rawmsg('AUTHENTICATE', ABORT_MESSAGE)
await self._capability_negotiated('sasl') | Abort SASL authentication. |
def interpolate(self, df):
f0 = self.f0.decompose().value
N = (self.size - 1) * (self.df.decompose().value / df) + 1
fsamples = numpy.arange(0, numpy.rint(N), dtype=self.dtype) * df + f0
out = type(self)(numpy.interp(fsamples, self.frequencies.value,
self.value))
out.__array_finalize__(self)
out.f0 = f0
out.df = df
return out | Interpolate this `FrequencySeries` to a new resolution.
Parameters
----------
df : `float`
desired frequency resolution of the interpolated `FrequencySeries`,
in Hz
Returns
-------
out : `FrequencySeries`
the interpolated version of the input `FrequencySeries`
See Also
--------
numpy.interp
for the underlying 1-D linear interpolation scheme |
def register_onchain_secret(
channel_state: NettingChannelState,
secret: Secret,
secrethash: SecretHash,
secret_reveal_block_number: BlockNumber,
delete_lock: bool = True,
) -> None:
our_state = channel_state.our_state
partner_state = channel_state.partner_state
register_onchain_secret_endstate(
our_state,
secret,
secrethash,
secret_reveal_block_number,
delete_lock,
)
register_onchain_secret_endstate(
partner_state,
secret,
secrethash,
secret_reveal_block_number,
delete_lock,
) | This will register the onchain secret and set the lock to the unlocked stated.
Even though the lock is unlocked it is *not* claimed. The capacity will
increase once the next balance proof is received. |
def messages(self):
return int(math.floor(((self.limit.unit_value - self.level) /
self.limit.unit_value) * self.limit.value)) | Return remaining messages before limiting. |
def parent(self):
if TextBlockHelper.get_fold_lvl(self._trigger) > 0 and \
self._trigger.blockNumber():
block = self._trigger.previous()
ref_lvl = self.trigger_level - 1
while (block.blockNumber() and
(not TextBlockHelper.is_fold_trigger(block) or
TextBlockHelper.get_fold_lvl(block) > ref_lvl)):
block = block.previous()
try:
return FoldScope(block)
except ValueError:
return None
return None | Return the parent scope.
:return: FoldScope or None |
def clean_file(configuration, filename):
pofile = polib.pofile(filename)
if pofile.header.find(EDX_MARKER) != -1:
new_header = get_new_header(configuration, pofile)
new = pofile.header.replace(EDX_MARKER, new_header)
pofile.header = new
pofile.save() | Strips out the warning from a translated po file about being an English source file.
Replaces warning with a note about coming from Transifex. |
def setnx(self, key, value):
fut = self.execute(b'SETNX', key, value)
return wait_convert(fut, bool) | Set the value of a key, only if the key does not exist. |
def flatten_list(l: List[list]) -> list:
return [v for inner_l in l for v in inner_l] | takes a list of lists, l and returns a flat list |
def __label_cmp(self, other):
if other is None:
return -1
label_name = strip_accents(self.name).lower()
other_name = strip_accents(other.name).lower()
if label_name < other_name:
return -1
elif label_name == other_name:
return 0
else:
return 1
if self.get_color_str() < other.get_color_str():
return -1
elif self.get_color_str() == other.get_color_str():
return 0
else:
return 1 | Comparaison function. Can be used to sort labels alphabetically. |
def setup_db(self, couch, dbname):
my_db = None
self.log.debug('Setting up DB: %s' % dbname)
if dbname not in couch:
self.log.info("DB doesn't exist so creating DB: %s", dbname)
try:
my_db = couch.create(dbname)
except:
self.log.critical("Race condition caught")
raise RuntimeError("Race condition caught when creating DB")
try:
auth_doc = {}
auth_doc['_id'] = '_design/auth'
auth_doc['language'] = 'javascript'
auth_doc['validate_doc_update'] =
my_db.save(auth_doc)
except:
self.log.error('Could not set permissions of %s' % dbname)
else:
my_db = couch[dbname]
return my_db | Setup and configure DB |
def property(self, property_name, default=Ellipsis):
try:
return self._a_tags[property_name]
except KeyError:
if default != Ellipsis:
return default
else:
raise | Returns a property value
:param: default will return that value if the property is not found,
else, will raise a KeyError. |
def do_aprint(self, statement):
self.poutput('aprint was called with argument: {!r}'.format(statement))
self.poutput('statement.raw = {!r}'.format(statement.raw))
self.poutput('statement.argv = {!r}'.format(statement.argv))
self.poutput('statement.command = {!r}'.format(statement.command)) | Print the argument string this basic command is called with. |
def set_attribute(self, name, value):
js_executor = self.driver_wrapper.js_executor
def set_attribute_element():
js_executor.execute_template('setAttributeTemplate', {
'attribute_name': str(name),
'attribute_value': str(value)}, self.element)
return True
self.execute_and_handle_webelement_exceptions(set_attribute_element,
'set attribute "' + str(name) + '" to "' + str(value) + '"')
return self | Sets the attribute of the element to a specified value
@type name: str
@param name: the name of the attribute
@type value: str
@param value: the attribute of the value |
def predict(rf_model, features):
import numpy as np
from upsilon.extract_features.feature_set import get_feature_set
feature_set = get_feature_set()
cols = [feature for feature in features if feature in feature_set]
cols = sorted(cols)
filtered_features = []
for i in range(len(cols)):
filtered_features.append(features[cols[i]])
filtered_features = np.array(filtered_features).reshape(1, -1)
classes = rf_model.classes_
probabilities = rf_model.predict_proba(filtered_features)[0]
flag = 0
if features['period_SNR'] < 20. or is_period_alias(features['period']):
flag = 1
max_index = np.where(probabilities == np.max(probabilities))
return classes[max_index][0], probabilities[max_index][0], flag | Return label and probability estimated.
Parameters
----------
rf_model : sklearn.ensemble.RandomForestClassifier
The UPSILoN random forests model.
features : array_like
A list of features estimated by UPSILoN.
Returns
-------
label : str
A predicted label (i.e. class).
probability : float
Class probability.
flag : int
Classification flag. |
def before_request():
if not request.path.startswith('/saml') and not request.path.startswith('/auth'):
if 'accounts' not in session:
logger.debug('Missing \'accounts\' from session object, sending user to login page')
return BaseView.make_unauth_response()
if request.method in ('POST', 'PUT', 'DELETE',):
if session['csrf_token'] != request.headers.get('X-Csrf-Token'):
logger.info('CSRF Token is missing or incorrect, sending user to login page')
abort(403) | Checks to ensure that the session is valid and validates the users CSRF token is present
Returns:
`None` |
def load_local_config(filename):
if not filename:
return imp.new_module('local_pylint_config')
module = imp.load_source('local_pylint_config', filename)
return module | Loads the pylint.config.py file.
Args:
filename (str): The python file containing the local configuration.
Returns:
module: The loaded Python module. |
def update_firewall(self, firewall, body=None):
return self.put(self.firewall_path % (firewall), body=body) | Updates a firewall. |
def postComponents(self, name, status, **kwargs):
kwargs['name'] = name
kwargs['status'] = status
return self.__postRequest('/components', kwargs) | Create a new component.
:param name: Name of the component
:param status: Status of the component; 1-4
:param description: (optional) Description of the component
:param link: (optional) A hyperlink to the component
:param order: (optional) Order of the component
:param group_id: (optional) The group id that the component is within
:param enabled: (optional)
:return: :class:`Response <Response>` object
:rtype: requests.Response |
def get_distance(a, b, xaxis=True):
if xaxis:
arange = ("0", a.qstart, a.qstop, a.orientation)
brange = ("0", b.qstart, b.qstop, b.orientation)
else:
arange = ("0", a.sstart, a.sstop, a.orientation)
brange = ("0", b.sstart, b.sstop, b.orientation)
dist, oo = range_distance(arange, brange, distmode="ee")
dist = abs(dist)
return dist | Returns the distance between two blast HSPs. |
def reload(self, *fields, **kw):
Doc, collection, query, options = self._prepare_find(id=self.id, projection=fields, **kw)
result = collection.find_one(query, **options)
if fields:
for k in result:
if k == ~Doc.id: continue
self.__data__[k] = result[k]
else:
self.__data__ = result
return self | Reload the entire document from the database, or refresh specific named top-level fields. |
def create_tags(self, entry):
tag_list = [t.lower().strip() for t in entry.tag_string.split(',')]
for t in tag_list:
tag, created = self.get_or_create(name=t)
entry.tags.add(tag) | Inspects an ``Entry`` instance, and builds associates ``Tag``
objects based on the values in the ``Entry``'s ``tag_string``. |
def genesis(chain_class: BaseChain,
db: BaseAtomicDB=None,
params: Dict[str, HeaderParams]=None,
state: GeneralState=None) -> BaseChain:
if state is None:
genesis_state = {}
else:
genesis_state = _fill_and_normalize_state(state)
genesis_params_defaults = _get_default_genesis_params(genesis_state)
if params is None:
genesis_params = genesis_params_defaults
else:
genesis_params = merge(genesis_params_defaults, params)
if db is None:
base_db = AtomicDB()
else:
base_db = db
return chain_class.from_genesis(base_db, genesis_params, genesis_state) | Initialize the given chain class with the given genesis header parameters
and chain state. |
def run_file(self, path, all_errors_exit=True):
path = fixpath(path)
with self.handling_errors(all_errors_exit):
module_vars = run_file(path)
self.vars.update(module_vars)
self.store("from " + splitname(path)[1] + " import *") | Execute a Python file. |
def Distance(lat0, lng0, lat1, lng1):
deg2rad = math.pi / 180.0
lat0 = lat0 * deg2rad
lng0 = lng0 * deg2rad
lat1 = lat1 * deg2rad
lng1 = lng1 * deg2rad
dlng = lng1 - lng0
dlat = lat1 - lat0
a = math.sin(dlat*0.5)
b = math.sin(dlng*0.5)
a = a * a + math.cos(lat0) * math.cos(lat1) * b * b
c = 2.0 * math.atan2(math.sqrt(a), math.sqrt(1.0 - a))
return 6367000.0 * c | Compute the geodesic distance in meters between two points on the
surface of the Earth. The latitude and longitude angles are in
degrees.
Approximate geodesic distance function (Haversine Formula) assuming
a perfect sphere of radius 6367 km (see "What are some algorithms
for calculating the distance between 2 points?" in the GIS Faq at
http://www.census.gov/geo/www/faq-index.html). The approximate
radius is adequate for our needs here, but a more sophisticated
geodesic function should be used if greater accuracy is required
(see "When is it NOT okay to assume the Earth is a sphere?" in the
same faq). |
def parse_bulk_create(prs, conn):
prs_create = prs.add_parser(
'bulk_create', help='create bulk records of specific zone')
set_option(prs_create, 'infile')
conn_options(prs_create, conn)
prs_create.add_argument('--domain', action='store',
help='create records with specify zone')
prs_create.set_defaults(func=create) | Create bulk_records.
Arguments:
prs: parser object of argparse
conn: dictionary of connection information |
def complete_opt_display(self, text, *_):
return [t + " " for t in DISPLAYS if t.startswith(text)] | Autocomplete for display option |
def get_child(self, child_name):
child = self.children.get(child_name, None)
if child:
return child
raise ValueError("Value {} not in this tree".format(child_name)) | returns the object with the name supplied |
def run_program(program, *args):
real_args = [program]
real_args.extend(args)
logging.debug(_('check_output arguments: %s'), real_args)
check_output(real_args, universal_newlines=True) | Wrap subprocess.check_output to make life easier. |
def should_close(http_version, connection_field):
connection_field = (connection_field or '').lower()
if http_version == 'HTTP/1.0':
return connection_field.replace('-', '') != 'keepalive'
else:
return connection_field == 'close' | Return whether the connection should be closed.
Args:
http_version (str): The HTTP version string like ``HTTP/1.0``.
connection_field (str): The value for the ``Connection`` header. |
def has_verified_email(self):
url = (self._imgur._base_url + "/3/account/{0}/"
"verifyemail".format(self.name))
return self._imgur._send_request(url, needs_auth=True) | Has the user verified that the email he has given is legit?
Verified e-mail is required to the gallery. Confirmation happens by
sending an email to the user and the owner of the email user verifying
that he is the same as the Imgur user. |
def single_violation(self, column=None, value=None, **kwargs):
return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column,
value, **kwargs) | A single event violation is a one-time event that occurred on a fixed
date, and is associated with one permitted facility.
>>> PCS().single_violation('single_event_viol_date', '16-MAR-01') |
def eval_one(self, e, **kwargs):
try:
return self.eval_exact(e, 1, **{k: v for (k, v) in kwargs.items() if k != 'default'})[0]
except (SimUnsatError, SimValueError, SimSolverModeError):
if 'default' in kwargs:
return kwargs.pop('default')
raise | Evaluate an expression to get the only possible solution. Errors if either no or more than one solution is
returned. A kwarg parameter `default` can be specified to be returned instead of failure!
:param e: the expression to get a solution for
:param default: A value can be passed as a kwarg here. It will be returned in case of failure.
:param kwargs: Any additional kwargs will be passed down to `eval_upto`
:raise SimUnsatError: if no solution could be found satisfying the given constraints
:raise SimValueError: if more than one solution was found to satisfy the given constraints
:return: The value for `e` |
def potcar_spec( filename ):
p_spec = {}
with open( filename, 'r' ) as f:
potcars = re.split('(End of Dataset\n)', f.read() )
potcar_md5sums = [ md5sum( ''.join( pair ) ) for pair in zip( potcars[::2], potcars[1:-1:2] ) ]
for this_md5sum in potcar_md5sums:
for ps in potcar_sets:
for p, p_md5sum in potcar_md5sum_data[ ps ].items():
if this_md5sum == p_md5sum:
p_spec[ p ] = ps
if len( p_spec ) != len( potcar_md5sums ):
raise ValueError( 'One or more POTCARs did not have matching md5 hashes' )
return p_spec | Returns a dictionary specifying the pseudopotentials contained in a POTCAR file.
Args:
filename (Str): The name of the POTCAR file to process.
Returns:
(Dict): A dictionary of pseudopotential filename: dataset pairs, e.g.
{ 'Fe_pv': 'PBE_54', 'O', 'PBE_54' } |
def update(self, conf_dict):
if isinstance(conf_dict, dict):
iterator = six.iteritems(conf_dict)
else:
iterator = iter(conf_dict)
for k, v in iterator:
if not IDENTIFIER.match(k):
raise ValueError('\'%s\' is not a valid indentifier' % k)
cur_val = self.__values__.get(k)
if isinstance(cur_val, Config):
cur_val.update(conf_dict[k])
else:
self[k] = conf_dict[k] | Updates this configuration with a dictionary.
:param conf_dict: A python dictionary to update this configuration
with. |
def enable_mp_crash_reporting():
global mp_crash_reporting_enabled
multiprocessing.Process = multiprocessing.process.Process = CrashReportingProcess
mp_crash_reporting_enabled = True | Monkey-patch the multiprocessing.Process class with our own CrashReportingProcess.
Any subsequent imports of multiprocessing.Process will reference CrashReportingProcess instead.
This function must be called before any imports to mulitprocessing in order for the monkey-patching to work. |
def transform_series(series, force_list=False, buffers=None):
if isinstance(series, pd.PeriodIndex):
vals = series.to_timestamp().values
else:
vals = series.values
return transform_array(vals, force_list=force_list, buffers=buffers) | Transforms a Pandas series into serialized form
Args:
series (pd.Series) : the Pandas series to transform
force_list (bool, optional) : whether to only output to standard lists
This function can encode some dtypes using a binary encoding, but
setting this argument to True will override that and cause only
standard Python lists to be emitted. (default: False)
buffers (set, optional) :
If binary buffers are desired, the buffers parameter may be
provided, and any columns that may be sent as binary buffers
will be added to the set. If None, then only base64 encoding
will be used (default: None)
If force_list is True, then this value will be ignored, and
no buffers will be generated.
**This is an "out" parameter**. The values it contains will be
modified in-place.
Returns:
list or dict |
def as_cache_key(self, ireq):
extras = tuple(sorted(ireq.extras))
if not extras:
extras_string = ""
else:
extras_string = "[{}]".format(",".join(extras))
name = key_from_req(ireq.req)
version = get_pinned_version(ireq)
return name, "{}{}".format(version, extras_string) | Given a requirement, return its cache key.
This behavior is a little weird in order to allow backwards
compatibility with cache files. For a requirement without extras, this
will return, for example::
("ipython", "2.1.0")
For a requirement with extras, the extras will be comma-separated and
appended to the version, inside brackets, like so::
("ipython", "2.1.0[nbconvert,notebook]") |
def end_timing(self):
if self._callback != None:
elapsed = time.perf_counter() * 1000 - self._start
self._callback.end_timing(self._counter, elapsed) | Ends timing of an execution block, calculates elapsed time and updates the associated counter. |
def parse_value(cls, value: int, default: T = None) -> T:
return next((item for item in cls if value == item.value), default) | Parse specified value for IntEnum; return default if not found. |
def _defaultdict(dct, fallback=_illegal_character):
out = defaultdict(lambda: fallback)
for k, v in six.iteritems(dct):
out[k] = v
return out | Wraps the given dictionary such that the given fallback function will be called when a nonexistent key is
accessed. |
def vb_get_max_network_slots():
sysprops = vb_get_box().systemProperties
totals = [
sysprops.getMaxNetworkAdapters(adapter_type)
for adapter_type in [
1,
2
]
]
return sum(totals) | Max number of slots any machine can have
@return:
@rtype: number |
def get_socket(self):
import eventlet
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
ssl_args = {}
for arg in ('keyfile', 'certfile', 'server_side', 'cert_reqs',
'ssl_version', 'ca_certs', 'do_handshake_on_connect',
'suppress_ragged_eofs', 'ciphers'):
try:
ssl_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
sock = eventlet.listen(address, **socket_args)
except TypeError:
sock = eventlet.listen(address)
if ssl_args:
sock = eventlet.wrap_ssl(sock, **ssl_args)
return sock | Create listener socket based on bottle server parameters. |
def aux_dict(self):
if self._aux_dict is None:
self._aux_dict = Executor._get_dict(
self._symbol.list_auxiliary_states(), self.aux_arrays)
return self._aux_dict | Get dictionary representation of auxiliary states arrays.
Returns
-------
aux_dict : dict of str to NDArray
The dictionary that maps name of auxiliary states to NDArrays.
Raises
------
ValueError : if there are duplicated names in the auxiliary states. |
def list_inputs(self):
doc = []
for inp, typ in self.input_types.items():
if isinstance(typ, six.string_types):
typ = "'{}'".format(typ)
doc.append('{}: {}'.format(inp, typ))
return '\n'.join(doc) | Return a string listing all the Step's input names and their types.
The types are returned in a copy/pastable format, so if the type is
`string`, `'string'` (with single quotes) is returned.
Returns:
str containing all input names and types. |
def set_color(self, group, color, pct=1):
if not self.leds:
return
color_tuple = color
if isinstance(color, str):
assert color in self.led_colors, \
"%s is an invalid LED color, valid choices are %s" % \
(color, ', '.join(self.led_colors.keys()))
color_tuple = self.led_colors[color]
assert group in self.led_groups, \
"%s is an invalid LED group, valid choices are %s" % \
(group, ', '.join(self.led_groups.keys()))
for led, value in zip(self.led_groups[group], color_tuple):
led.brightness_pct = value * pct | Sets brightness of LEDs in the given group to the values specified in
color tuple. When percentage is specified, brightness of each LED is
reduced proportionally.
Example::
my_leds = Leds()
my_leds.set_color('LEFT', 'AMBER')
With a custom color::
my_leds = Leds()
my_leds.set_color('LEFT', (0.5, 0.3)) |
def dump(self, stream):
items = (
('time', self.time),
('inc', self.inc),
)
ts = collections.OrderedDict(items)
json.dump(dict(ts=ts), stream) | Serialize self to text stream.
Matches convention of mongooplog. |
def cursor():
try:
cur = conn.cursor()
yield cur
except (db.Error, Exception) as e:
cur.close()
if conn:
conn.rollback()
print(e.message)
raise
else:
conn.commit()
cur.close() | Database cursor generator. Commit on context exit. |
def randomZ(maximum=None, bits=256):
result = BigInt()
if maximum:
maximum = coerceBigInt(maximum)
librelic.bn_rand_mod(byref(result), byref(maximum))
else:
librelic.bn_rand_abi(byref(result), BigInt.POSITIVE_FLAG, c_int(bits))
return result | Retrieve a random BigInt.
@maximum: If specified, the value will be no larger than this modulus.
@bits: If no maximum is specified, the value will have @bits. |
def get_filename(self, year):
res = self.fldr + os.sep + self.type + year + '.' + self.user
return res | returns the filename |
def set_prewarp(self, prewarp):
prewarp = _convert_to_charp(prewarp)
self._set_prewarp_func(self.alpr_pointer, prewarp) | Updates the prewarp configuration used to skew images in OpenALPR before
processing.
:param prewarp: A unicode/ascii string (Python 2/3) or bytes array (Python 3)
:return: None |
def simBirth(self,which_agents):
IndShockConsumerType.simBirth(self,which_agents)
if not self.global_markov:
N = np.sum(which_agents)
base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int) | Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.
Parameters
----------
which_agents : np.array(Bool)
Boolean array of size self.AgentCount indicating which agents should be "born".
Returns
-------
None |
def add_watcher(self, issue, watcher):
url = self._get_url('issue/' + str(issue) + '/watchers')
self._session.post(
url, data=json.dumps(watcher)) | Add a user to an issue's watchers list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to add to the watchers list |
def check_client_key(self, client_key):
lower, upper = self.client_key_length
return (set(client_key) <= self.safe_characters and
lower <= len(client_key) <= upper) | Check that the client key only contains safe characters
and is no shorter than lower and no longer than upper. |
def liveReceivers(receivers):
for receiver in receivers:
if isinstance( receiver, WEAKREF_TYPES):
receiver = receiver()
if receiver is not None:
yield receiver
else:
yield receiver | Filter sequence of receivers to get resolved, live receivers
This is a generator which will iterate over
the passed sequence, checking for weak references
and resolving them, then returning all live
receivers. |
def call(self, action_name, container, instances=None, map_name=None, **kwargs):
return self.run_actions(action_name, container, instances=instances, map_name=map_name, **kwargs) | Generic function for running container actions based on a policy.
:param action_name: Action name.
:type action_name: unicode | str
:param container: Container name.
:type container: unicode | str
:param instances: Instance names to remove. If not specified, runs on all instances as specified in the
configuration (or just one default instance).
:type instances: collections.Iterable[unicode | str | NoneType]
:param map_name: Container map name. Optional - if not provided the default map is used.
:type map_name: unicode | str
:param kwargs: Additional kwargs for the policy method.
:return: Return values of actions.
:rtype: list[dockermap.map.runner.ActionOutput] |
def as_poly(self, margin_width=0, margin_height=0):
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert]) | Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]]) |
def clear(self):
self.redis_conn.delete(self.window_key)
self.redis_conn.delete(self.moderate_key)
self.queue.clear() | Clears all data associated with the throttled queue |
def routes(cls, application=None):
if application:
for route in cls._routes:
application.add_handlers(route['host'], route['spec'])
else:
return [route['spec'] for route in cls._routes] | Method for adding the routes to the `tornado.web.Application`. |
def do_grep(self, path, match):
try:
children = self.get_children(path)
except (NoNodeError, NoAuthError):
children = []
for child in children:
full_path = os.path.join(path, child)
try:
value, _ = self.get(full_path)
except (NoNodeError, NoAuthError):
value = ""
if value is not None:
matches = [line for line in value.split("\n") if match.search(line)]
if len(matches) > 0:
yield (full_path, matches)
for mpath, matches in self.do_grep(full_path, match):
yield (mpath, matches) | grep's work horse |
def grad_desc_update(x, a, c, step=0.01):
return x - step * gradient(x,a,c) | Given a value of x, return a better x
using gradient descent |
def build_embedding_weights(word_index, embeddings_index):
logger.info('Loading embeddings for all words in the corpus')
embedding_dim = list(embeddings_index.values())[0].shape[-1]
embedding_weights = np.zeros((len(word_index), embedding_dim))
for word, i in word_index.items():
word_vector = embeddings_index.get(word)
if word_vector is not None:
embedding_weights[i] = word_vector
return embedding_weights | Builds an embedding matrix for all words in vocab using embeddings_index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.