code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def file_hash(load, fnd):
if 'env' in load:
load.pop('env')
ret = {}
if 'saltenv' not in load:
return ret
if 'path' not in fnd or 'bucket' not in fnd or not fnd['path']:
return ret
cached_file_path = _get_cached_file_name(
fnd['bucket'],
load['saltenv'],
fnd['path'])
if os.path.isfile(cached_file_path):
ret['hsum'] = salt.utils.hashutils.get_hash(cached_file_path)
ret['hash_type'] = 'md5'
return ret | Return an MD5 file hash |
def get_sigma_tables(self, imt, rctx, stddev_types):
output_tables = []
for stddev_type in stddev_types:
if imt.name in 'PGA PGV':
interpolator = interp1d(self.magnitudes,
self.sigma[stddev_type][imt.name],
axis=2)
output_tables.append(
interpolator(rctx.mag).reshape(self.shape[0],
self.shape[3]))
else:
interpolator = interp1d(numpy.log10(self.periods),
self.sigma[stddev_type]["SA"],
axis=1)
period_table = interpolator(numpy.log10(imt.period))
mag_interpolator = interp1d(self.magnitudes,
period_table,
axis=1)
output_tables.append(mag_interpolator(rctx.mag))
return output_tables | Returns modification factors for the standard deviations, given the
rupture and intensity measure type.
:returns:
List of standard deviation modification tables, each as an array
of [Number Distances, Number Levels] |
def schema_file(self):
path = os.getcwd() + '/' + self.lazy_folder
return path + self.schema_filename | Gets the full path to the file in which to load configuration schema. |
def move_backward(self):
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0) | Make the drone move backwards. |
def navigation(self, id=None):
def wrapper(f):
self.register_element(id or f.__name__, f)
return f
return wrapper | Function decorator for navbar registration.
Convenience function, calls :meth:`.register_element` with ``id`` and
the decorated function as ``elem``.
:param id: ID to pass on. If ``None``, uses the decorated functions
name. |
def trace_read(self, offset, num_items):
buf_size = ctypes.c_uint32(num_items)
buf = (structs.JLinkTraceData * num_items)()
res = self._dll.JLINKARM_TRACE_Read(buf, int(offset), ctypes.byref(buf_size))
if (res == 1):
raise errors.JLinkException('Failed to read from trace buffer.')
return list(buf)[:int(buf_size.value)] | Reads data from the trace buffer and returns it.
Args:
self (JLink): the ``JLink`` instance.
offset (int): the offset from which to start reading from the trace
buffer.
num_items (int): number of items to read from the trace buffer.
Returns:
A list of ``JLinkTraceData`` instances corresponding to the items
read from the trace buffer. Note that this list may have size less
than ``num_items`` in the event that there are not ``num_items``
items in the trace buffer.
Raises:
JLinkException: on error. |
def create(self, jti, aud=''):
return self.client.post(self.url, data={'jti': jti, 'aud': aud}) | Adds a token to the blacklist.
Args:
jti (str): the jti of the JWT to blacklist.
aud (str, optional): The JWT's aud claim. The client_id of the
application for which it was issued.
body (dict):
See: https://auth0.com/docs/api/management/v2#!/Blacklists/post_tokens |
def override_start_requests(spider_cls, start_urls, callback=None, **attrs):
def start_requests():
for url in start_urls:
req = Request(url, dont_filter=True) if isinstance(url, six.string_types) else url
if callback is not None:
req.callback = callback
yield req
attrs['start_requests'] = staticmethod(start_requests)
return type(spider_cls.__name__, (spider_cls, ), attrs) | Returns a new spider class overriding the ``start_requests``.
This function is useful to replace the start requests of an existing spider
class on runtime.
Parameters
----------
spider_cls : scrapy.Spider
Spider class to be used as base class.
start_urls : iterable
Iterable of URLs or ``Request`` objects.
callback : callable, optional
Callback for the start URLs.
attrs : dict, optional
Additional class attributes.
Returns
-------
out : class
A subclass of ``spider_cls`` with overrided ``start_requests`` method. |
def parseInt(self, words):
words = words.replace(" and ", " ").lower()
words = re.sub(r'(\b)a(\b)', '\g<1>one\g<2>', words)
def textToNumber(s):
a = re.split(r"[\s-]+", s)
n = 0
g = 0
for w in a:
x = NumberService.__small__.get(w, None)
if x is not None:
g += x
elif w == "hundred":
g *= 100
else:
x = NumberService.__magnitude__.get(w, None)
if x is not None:
n += g * x
g = 0
else:
raise NumberService.NumberException(
"Unknown number: " + w)
return n + g
return textToNumber(words) | Parses words to the integer they describe.
Args:
words (str): Description of the integer.
Returns:
An integer representation of the words. |
def publish(self):
with db.session.begin_nested():
deposit = self.deposit_class.create(self.metadata)
deposit['_deposit']['created_by'] = self.event.user_id
deposit['_deposit']['owners'] = [self.event.user_id]
for key, url in self.files:
deposit.files[key] = self.gh.api.session.get(
url, stream=True).raw
deposit.publish()
recid, record = deposit.fetch_published()
self.model.recordmetadata = record.model | Publish GitHub release as record. |
def rlmb_ppo_base():
hparams = _rlmb_base()
ppo_params = dict(
base_algo="ppo",
base_algo_params="ppo_original_params",
real_batch_size=1,
simulated_batch_size=16,
eval_batch_size=32,
real_ppo_epochs_num=0,
ppo_epochs_num=1000,
ppo_epoch_length=hparams.simulated_rollout_length,
ppo_eval_every_epochs=0,
ppo_learning_rate_constant=1e-4,
real_ppo_epoch_length=16 * 200,
real_ppo_learning_rate_constant=1e-4,
real_ppo_effective_num_agents=16,
real_ppo_eval_every_epochs=0,
simulation_flip_first_random_for_beginning=True,
)
update_hparams(hparams, ppo_params)
return hparams | HParams for PPO base. |
def bcs_parameters(n_site, n_fermi, u, t) :
wave_num = np.linspace(0, 1, n_site, endpoint=False)
hop_erg = -2 * t * np.cos(2 * np.pi * wave_num)
fermi_erg = hop_erg[n_fermi // 2]
hop_erg = hop_erg - fermi_erg
def _bcs_gap(x):
s = 0.
for i in range(n_site):
s += 1. / np.sqrt(hop_erg[i] ** 2 + x ** 2)
return 1 + s * u / (2 * n_site)
delta = scipy.optimize.bisect(_bcs_gap, 0.01, 10000. * abs(u))
bcs_v = np.sqrt(0.5 * (1 - hop_erg / np.sqrt(hop_erg ** 2 + delta ** 2)))
bog_theta = np.arcsin(bcs_v)
return delta, bog_theta | Generate the parameters for the BCS ground state, i.e., the
superconducting gap and the rotational angles in the Bogoliubov
transformation.
Args:
n_site: the number of sites in the Hubbard model
n_fermi: the number of fermions
u: the interaction strength
t: the tunneling strength
Returns:
float delta, List[float] bog_theta |
def is_vimball(fd):
fd.seek(0)
try:
header = fd.readline()
except UnicodeDecodeError:
return False
if re.match('^" Vimball Archiver', header) is not None:
return True
return False | Test for vimball archive format compliance.
Simple check to see if the first line of the file starts with standard
vimball archive header. |
def create_invoice_from_ticket(pk, list_lines):
context = {}
if list_lines:
new_list_lines = [x[0] for x in SalesLineTicket.objects.values_list('line_order__pk').filter(pk__in=[int(x) for x in list_lines])]
if new_list_lines:
lo = SalesLineOrder.objects.values_list('order__pk').filter(pk__in=new_list_lines)[:1]
if lo and lo[0] and lo[0][0]:
new_pk = lo[0][0]
return GenLineProduct.create_invoice_from_order(new_pk, new_list_lines)
else:
error = _('Pedido no encontrado')
else:
error = _('Lineas no relacionadas con pedido')
else:
error = _('Lineas no seleccionadas')
context['error'] = error
return context | la pk y list_lines son de ticket, necesitamos la info de las lineas de pedidos |
def _parse_programs(self, string, parent, filepath=None):
moddocs = self.docparser.parse_docs(string)
matches = self.RE_PROGRAM.finditer(string)
result = []
for rmodule in matches:
name = rmodule.group("name").lower()
contents = re.sub("&[ ]*\n", "", rmodule.group("contents"))
module = self._process_module(name, contents, parent, rmodule, filepath)
if name in moddocs:
module.docstring = self.docparser.to_doc(moddocs[name][0], name)
module.docstart, module.docend = module.absolute_charindex(string, moddocs[name][1],
moddocs[name][2])
result.append(module)
return result | Extracts a PROGRAM from the specified fortran code file. |
def enable_device(self):
cmd_response = self.__send_command(const.CMD_ENABLEDEVICE)
if cmd_response.get('status'):
self.is_enabled = True
return True
else:
raise ZKErrorResponse("Can't enable device") | re-enable the connected device and allow user activity in device again
:return: bool |
def updatePools(self,
pool1,
username1,
password1,
pool2=None,
username2=None,
password2=None,
pool3=None,
username3=None,
password3=None):
return self.__post('/api/updatePools',
data={
'Pool1': pool1,
'UserName1': username1,
'Password1': password1,
'Pool2': pool2,
'UserName2': username2,
'Password2': password2,
'Pool3': pool3,
'UserName3': username3,
'Password3': password3,
}) | Change the pools of the miner. This call will restart cgminer. |
def get_callbacks(service):
callbacks = list(getattr(settings, 'MAMA_CAS_ATTRIBUTE_CALLBACKS', []))
if callbacks:
warnings.warn(
'The MAMA_CAS_ATTRIBUTE_CALLBACKS setting is deprecated. Service callbacks '
'should be configured using MAMA_CAS_SERVICES.', DeprecationWarning)
for backend in _get_backends():
try:
callbacks.extend(backend.get_callbacks(service))
except AttributeError:
raise NotImplementedError("%s.%s.get_callbacks() not implemented" % (
backend.__class__.__module__, backend.__class__.__name__)
)
return callbacks | Get configured callbacks list for a given service identifier. |
def get_assessment_ids_by_bank(self, bank_id):
id_list = []
for assessment in self.get_assessments_by_bank(bank_id):
id_list.append(assessment.get_id())
return IdList(id_list) | Gets the list of ``Assessment`` ``Ids`` associated with a ``Bank``.
arg: bank_id (osid.id.Id): ``Id`` of the ``Bank``
return: (osid.id.IdList) - list of related assessment ``Ids``
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
async def async_run(self) -> None:
self.main_task = self.loop.create_task(self.main())
await self.main_task | Asynchronously run the worker, does not close connections. Useful when testing. |
def output_metric(gandi, metrics, key, justify=10):
for metric in metrics:
key_name = metric[key].pop()
values = [point.get('value', 0) for point in metric['points']]
graph = sparks(values) if max(values) else ''
if sys.version_info < (2, 8):
graph = graph.encode('utf-8')
output_line(gandi, key_name, graph, justify) | Helper to output metrics. |
def substitute_filename(fn, variables):
for var, value in variables.items():
fn = fn.replace('+%s+' % var, str(value))
return fn | Substitute +variables+ in file directory names. |
def content_type(self, lst):
value = []
if isinstance(lst, str):
ct = defines.Content_types[lst]
self.add_content_type(ct)
elif isinstance(lst, list):
for ct in lst:
self.add_content_type(ct) | Set the CoRE Link Format ct attribute of the resource.
:param lst: the list of CoRE Link Format ct attribute of the resource |
def get_klass(self):
klass_name = self.kwargs.get('klass', None)
klass = self.registry.get(klass_name, None)
if not klass:
raise Http404("Unknown autocomplete class `{}`".format(klass_name))
return klass | Return the agnocomplete class to be used with the eventual query. |
def addError(self, test, err, capt=None):
exc_type, exc_val, tb = err
tb = ''.join(traceback.format_exception(
exc_type,
exc_val if isinstance(exc_val, exc_type) else exc_type(exc_val),
tb
))
name = id_split(test.id())
group = self.report_data[name[0]]
if issubclass(err[0], SkipTest):
type = 'skipped'
self.stats['skipped'] += 1
group.stats['skipped'] += 1
else:
type = 'error'
self.stats['errors'] += 1
group.stats['errors'] += 1
group.tests.append({
'name': name[-1],
'failed': True,
'type': type,
'errtype': nice_classname(err[0]),
'message': exc_message(err),
'tb': tb,
}) | Add error output to Xunit report. |
def peekView(self, newLength):
return memoryview(self.buf)[self.offset:self.offset + newLength] | Return a view of the next newLength bytes. |
def add_cors_headers(request, response):
response.headerlist.append(('Access-Control-Allow-Origin', '*'))
response.headerlist.append(
('Access-Control-Allow-Methods', 'GET, OPTIONS'))
response.headerlist.append(
('Access-Control-Allow-Headers',
','.join(DEFAULT_ACCESS_CONTROL_ALLOW_HEADERS))) | Add cors headers needed for web app implementation. |
def _constraints_are_whitelisted(self, constraint_tuple):
if self._acceptable_interpreter_constraints == []:
return True
return all(version.parse(constraint) in self._acceptable_interpreter_constraints
for constraint in constraint_tuple) | Detect whether a tuple of compatibility constraints
matches constraints imposed by the merged list of the global
constraints from PythonSetup and a user-supplied whitelist. |
def is_program_installed(basename):
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath | Return program absolute path if installed in PATH.
Otherwise, return None |
def declassify(to_remove, *args, **kwargs):
def argdecorate(fn):
@wraps(fn)
def declassed(*args, **kwargs):
ret = fn(*args, **kwargs)
try:
if type(ret) is list:
return [r[to_remove] for r in ret]
return ret[to_remove]
except KeyError:
return ret
return declassed
return argdecorate | flatten the return values of the mite api. |
def assert_valid_schema(schema: GraphQLSchema) -> None:
errors = validate_schema(schema)
if errors:
raise TypeError("\n\n".join(error.message for error in errors)) | Utility function which asserts a schema is valid.
Throws a TypeError if the schema is invalid. |
def get_foreign_module(namespace):
if namespace not in _MODULES:
try:
module = importlib.import_module("." + namespace, __package__)
except ImportError:
module = None
_MODULES[namespace] = module
module = _MODULES.get(namespace)
if module is None:
raise ForeignError("Foreign %r structs not supported" % namespace)
return module | Returns the module or raises ForeignError |
def validate(self, data):
validator = self._schema.validator(self._id)
validator.validate(data) | Validate the data against the schema. |
def tornado_combiner(configs, use_gevent=False, start=True, monkey_patch=None,
Container=None, Server=None, threadpool=None):
servers = []
if monkey_patch is None:
monkey_patch = use_gevent
if use_gevent:
if monkey_patch:
from gevent import monkey
monkey.patch_all()
if threadpool is not None:
from multiprocessing.pool import ThreadPool
if not isinstance(threadpool, ThreadPool):
threadpool = ThreadPool(threadpool)
for config in configs:
app = config['app']
port = config.get('port', 5000)
address = config.get('address', '')
server = tornado_run(app, use_gevent=use_gevent, port=port,
monkey_patch=False, address=address, start=False,
Container=Container,
Server=Server, threadpool=threadpool)
servers.append(server)
if start:
tornado_start()
return servers | Combine servers in one tornado event loop process
:param configs: [
{
'app': Microservice Application or another wsgi application, required
'port': int, default: 5000
'address': str, default: ""
},
{ ... }
]
:param use_gevent: if True, app.wsgi will be run in gevent.spawn
:param start: if True, will be call utils.tornado_start()
:param Container: your class, bases on tornado.wsgi.WSGIContainer, default: tornado.wsgi.WSGIContainer
:param Server: your class, bases on tornado.httpserver.HTTPServer, default: tornado.httpserver.HTTPServer
:param monkey_patch: boolean, use gevent.monkey.patch_all() for patching standard modules, default: use_gevent
:return: list of tornado servers |
def get_all_function_definitions(base_most_function):
return [base_most_function] + [function for derived_contract in base_most_function.contract.derived_contracts
for function in derived_contract.functions
if function.full_name == base_most_function.full_name] | Obtains all function definitions given a base-most function. This includes the provided function, plus any
overrides of that function.
Returns:
(list): Returns any the provided function and any overriding functions defined for it. |
def get_paths(folder, ignore_endswith=ignore_endswith):
folder = pathlib.Path(folder).resolve()
files = folder.rglob("*")
for ie in ignore_endswith:
files = [ff for ff in files if not ff.name.endswith(ie)]
return sorted(files) | Return hologram file paths
Parameters
----------
folder: str or pathlib.Path
Path to search folder
ignore_endswith: list
List of filename ending strings indicating which
files should be ignored. |
def get_id_n_version(ident_hash):
try:
id, version = split_ident_hash(ident_hash)
except IdentHashMissingVersion:
from pyramid.httpexceptions import HTTPNotFound
from cnxarchive.views.helpers import get_latest_version
try:
version = get_latest_version(ident_hash)
except HTTPNotFound:
raise NotFound(ident_hash)
id, version = split_ident_hash(join_ident_hash(ident_hash, version))
else:
verify_id_n_version(id, version)
return id, version | From the given ``ident_hash`` return the id and version. |
def fit(self, X, y=None):
X = self._validate_input(X)
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
if (self.strategy == "constant" and
X.dtype.kind in ("i", "u", "f") and
not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value))
if sparse.issparse(X):
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
fill_value)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
fill_value)
return self | Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : _SimpleImputer |
def fromlalcache(cachefile, coltype = int):
return segments.segmentlist(lal.CacheEntry(l, coltype = coltype).segment for l in cachefile) | Construct a segmentlist representing the times spanned by the files
identified in the LAL cache contained in the file object file. The
segmentlist will be created with segments whose boundaries are of
type coltype, which should raise ValueError if it cannot convert
its string argument.
Example:
>>> from pycbc_glue.lal import LIGOTimeGPS
>>> cache_seglists = fromlalcache(open(filename), coltype = LIGOTimeGPS).coalesce()
See also:
pycbc_glue.lal.CacheEntry |
def memoize(func):
cache = {}
@functools.wraps(func)
def wrapper(*args):
key = "__".join(str(arg) for arg in args)
if key not in cache:
cache[key] = func(*args)
return cache[key]
return wrapper | Classic memoize decorator for non-class methods |
def read_match_config_from_file(match_config_path: Path) -> MatchConfig:
config_obj = create_bot_config_layout()
config_obj.parse_file(match_config_path, max_index=MAX_PLAYERS)
return parse_match_config(config_obj, match_config_path, {}, {}) | Parse the rlbot.cfg file on disk into the python datastructure. |
def as_future(self, object_id, check_ready=True):
if not isinstance(object_id, ray.ObjectID):
raise TypeError("Input should be an ObjectID.")
plain_object_id = plasma.ObjectID(object_id.binary())
fut = PlasmaObjectFuture(loop=self._loop, object_id=plain_object_id)
if check_ready:
ready, _ = ray.wait([object_id], timeout=0)
if ready:
if self._loop.get_debug():
logger.debug("%s has been ready.", plain_object_id)
self._complete_future(fut)
return fut
if plain_object_id not in self._waiting_dict:
linked_list = PlasmaObjectLinkedList(self._loop, plain_object_id)
linked_list.add_done_callback(self._unregister_callback)
self._waiting_dict[plain_object_id] = linked_list
self._waiting_dict[plain_object_id].append(fut)
if self._loop.get_debug():
logger.debug("%s added to the waiting list.", fut)
return fut | Turn an object_id into a Future object.
Args:
object_id: A Ray's object_id.
check_ready (bool): If true, check if the object_id is ready.
Returns:
PlasmaObjectFuture: A future object that waits the object_id. |
def set_logging_settings(profile, setting, value, store='local'):
r
return salt.utils.win_lgpo_netsh.set_logging_settings(profile=profile,
setting=setting,
value=value,
store=store) | r'''
Configure logging settings for the Windows firewall.
.. versionadded:: 2018.3.4
.. versionadded:: 2019.2.0
Args:
profile (str):
The firewall profile to configure. Valid options are:
- domain
- public
- private
setting (str):
The logging setting to configure. Valid options are:
- allowedconnections
- droppedconnections
- filename
- maxfilesize
value (str):
The value to apply to the setting. Valid values are dependent upon
the setting being configured. Valid options are:
allowedconnections:
- enable
- disable
- notconfigured
droppedconnections:
- enable
- disable
- notconfigured
filename:
- Full path and name of the firewall log file
- notconfigured
maxfilesize:
- 1 - 32767
- notconfigured
.. note::
``notconfigured`` can only be used when using the lgpo store
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
bool: ``True`` if successful
Raises:
CommandExecutionError: If an error occurs
ValueError: If the parameters are incorrect
CLI Example:
.. code-block:: bash
# Log allowed connections and set that in local group policy
salt * firewall.set_logging_settings domain allowedconnections enable lgpo
# Don't log dropped connections
salt * firewall.set_logging_settings profile=private setting=droppedconnections value=disable
# Set the location of the log file
salt * firewall.set_logging_settings domain filename C:\windows\logs\firewall.log
# You can also use environment variables
salt * firewall.set_logging_settings domain filename %systemroot%\system32\LogFiles\Firewall\pfirewall.log
# Set the max file size of the log to 2048 Kb
salt * firewall.set_logging_settings domain maxfilesize 2048 |
def make_patch(self):
path = [self.arcs[0].start_point()]
for a in self.arcs:
if a.direction:
vertices = Path.arc(a.from_angle, a.to_angle).vertices
else:
vertices = Path.arc(a.to_angle, a.from_angle).vertices
vertices = vertices[np.arange(len(vertices) - 1, -1, -1)]
vertices = vertices * a.radius + a.center
path = path + list(vertices[1:])
codes = [1] + [4] * (len(path) - 1)
return PathPatch(Path(path, codes)) | Retuns a matplotlib PathPatch representing the current region. |
def _opts_to_dict(*opts):
ret = {}
for key, val in opts:
if key[:2] == '--':
key = key[2:]
elif key[:1] == '-':
key = key[1:]
if val == '':
val = True
ret[key.replace('-','_')] = val
return ret | Convert a tuple of options returned from getopt into a dictionary. |
def bed(args):
p = OptionParser(main.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
contigfile, = args
bedfile = contigfile.rsplit(".", 1)[0] + ".bed"
fw = open(bedfile, "w")
c = ContigFile(contigfile)
for rec in c.iter_records():
for r in rec.reads:
print(r.bedline, file=fw)
logging.debug("File written to `{0}`.".format(bedfile))
return bedfile | %prog bed contigfile
Prints out the contigs and their associated reads. |
def autoexec(pipe=None, name=None, exit_handler=None):
return pipeline(pipe=pipe, name=name, autoexec=True,
exit_handler=exit_handler) | create a pipeline with a context that will automatically execute the
pipeline upon leaving the context if no exception was raised.
:param pipe:
:param name:
:return: |
def hmsm_to_days(hour=0,min=0,sec=0,micro=0):
days = sec + (micro / 1.e6)
days = min + (days / 60.)
days = hour + (days / 60.)
return days / 24. | Convert hours, minutes, seconds, and microseconds to fractional days.
Parameters
----------
hour : int, optional
Hour number. Defaults to 0.
min : int, optional
Minute number. Defaults to 0.
sec : int, optional
Second number. Defaults to 0.
micro : int, optional
Microsecond number. Defaults to 0.
Returns
-------
days : float
Fractional days.
Examples
--------
>>> hmsm_to_days(hour=6)
0.25 |
def xy2geom(x, y, t_srs=None):
geom_wkt = 'POINT({0} {1})'.format(x, y)
geom = ogr.CreateGeometryFromWkt(geom_wkt)
if t_srs is not None and not wgs_srs.IsSame(t_srs):
ct = osr.CoordinateTransformation(t_srs, wgs_srs)
geom.Transform(ct)
geom.AssignSpatialReference(t_srs)
return geom | Convert x and y point coordinates to geom |
def smart_scrubb(df,col_name,error_rate = 0):
scrubbed = ""
while True:
valcounts = df[col_name].str[-len(scrubbed)-1:].value_counts()
if not len(valcounts):
break
if not valcounts[0] >= (1-error_rate) * _utils.rows(df):
break
scrubbed=valcounts.index[0]
if scrubbed == '':
return None
which = df[col_name].str.endswith(scrubbed)
_basics.col_scrubb(df,col_name,which,len(scrubbed),True)
if not which.all():
new_col_name = _basics.colname_gen(df,"{}_sb-{}".format(col_name,scrubbed))
df[new_col_name] = which
return scrubbed | Scrubs from the back of an 'object' column in a DataFrame
until the scrub would semantically alter the contents of the column. If only a
subset of the elements in the column are scrubbed, then a boolean array indicating which
elements have been scrubbed is appended to the dataframe. Returns the string that was scrubbed.
df - DataFrame
DataFrame to scrub
col_name - string
Name of column to scrub
error_rate - number, default 0
The maximum amount of values this function can ignore while scrubbing, expressed as a
fraction of the total amount of rows in the dataframe. |
def path(self) -> str:
return os.path.join(self.project_path, self.project.name) | We need to nest the git path inside the project path to make it easier
to create docker images. |
def send_message(self, options):
if not options.get("save_policy"):
raise ValueError("Only configuring save_policy is supported")
if self.socket:
self.socket.send(options)
elif self._jupyter_agent:
self._jupyter_agent.start()
self._jupyter_agent.rm.update_user_file_policy(
options["save_policy"])
else:
wandb.termerror(
"wandb.init hasn't been called, can't configure run") | Sends a message to the wandb process changing the policy
of saved files. This is primarily used internally by wandb.save |
def word_matches(s1, s2, n=3):
return __matches(s1, s2, word_ngrams, n=n) | Word-level n-grams that match between two strings
Args:
s1: a string
s2: another string
n: an int for the n in n-gram
Returns:
set: the n-grams found in both strings |
def _configure_connection(self, name, value):
self.update("pg_settings", dict(setting=value), dict(name=name)) | Sets a Postgres run-time connection configuration parameter.
:param name: the name of the parameter
:param value: a list of values matching the placeholders |
def _left_click(self, event):
self.update_active()
iid = self.current_iid
if iid is None:
return
args = (iid, event.x_root, event.y_root)
self.call_callbacks(iid, "left_callback", args) | Function bound to left click event for marker canvas |
def _subscribe_all(self):
for stream in (self.inbound_streams + self.outbound_streams):
for input_ in stream.inputs:
if not type(input_) is int and input_ is not None:
self._subscribe(stream, input_)
for plugin in self.plugins:
for input_ in plugin.inputs:
self._subscribe(plugin, input_)
for output in plugin.outputs:
subscriber = next((x for x in self.outbound_streams
if x.name == output), None)
if subscriber is None:
log.warn('The outbound stream {} does not '
'exist so will not receive messages '
'from {}'.format(output, plugin))
else:
self._subscribe(subscriber, plugin.name) | Subscribes all streams to their input.
Subscribes all plugins to all their inputs.
Subscribes all plugin outputs to the plugin. |
def get_args(obj):
if inspect.isfunction(obj):
return inspect.getargspec(obj).args
elif inspect.ismethod(obj):
return inspect.getargspec(obj).args[1:]
elif inspect.isclass(obj):
return inspect.getargspec(obj.__init__).args[1:]
elif hasattr(obj, '__call__'):
return inspect.getargspec(obj.__call__).args[1:]
else:
raise TypeError("Can't inspect signature of '%s' object." % obj) | Get a list of argument names for a callable. |
def before_insert(mapper, conn, target):
if target.sequence_id is None:
from ambry.orm.exc import DatabaseError
raise DatabaseError('Must have sequence_id before insertion')
assert (target.name == 'id') == (target.sequence_id == 1), (target.name, target.sequence_id)
Column.before_update(mapper, conn, target) | event.listen method for Sqlalchemy to set the seqience_id for this
object and create an ObjectNumber value for the id_ |
def add_element_list(self, elt_list, **kwargs):
for e in elt_list:
self.add_element(Element(e, **kwargs)) | Helper to add a list of similar elements to the current section.
Element names will be used as an identifier. |
def _make_scm(current_target):
tool_key = devpipeline_core.toolsupport.choose_tool_key(
current_target, devpipeline_scm._SCM_TOOL_KEYS
)
return devpipeline_core.toolsupport.tool_builder(
current_target.config, tool_key, devpipeline_scm.SCMS, current_target
) | Create an Scm for a component.
Arguments
component - The component being operated on. |
def enableEffect(self, name, **kwargs):
if name not in VALID_EFFECTS:
raise KeyError("KezMenu doesn't know an effect of type %s" % name)
self.__getattribute__(
'_effectinit_{}'.format(name.replace("-", "_"))
)(name, **kwargs) | Enable an effect in the KezMenu. |
def echo(self, variableName, verbose=False):
PARAMS={"variableName":variableName}
response=api(url=self.__url+"/echo", PARAMS=PARAMS, verbose=verbose)
return response | The echo command will display the value of the variable specified by the
variableName argument, or all variables if variableName is not provided.
:param variableName: The name of the variable or '*' to display the value of all variables.
:param verbose: print more |
def save_model(self, fname, include_unsigned_edges=False):
sif_str = self.print_model(include_unsigned_edges)
with open(fname, 'wb') as fh:
fh.write(sif_str.encode('utf-8')) | Save the assembled model's SIF string into a file.
Parameters
----------
fname : str
The name of the file to save the SIF into.
include_unsigned_edges : bool
If True, includes edges with an unknown activating/inactivating
relationship (e.g., most PTMs). Default is False. |
def _init_baremetal_trunk_interfaces(self, port_seg, segment):
list_to_init = []
inactive_switch = []
connections = self._get_baremetal_connections(
port_seg, False, True)
for switch_ip, intf_type, port, is_native, _ in connections:
try:
nxos_db.get_switch_if_host_mappings(
switch_ip,
nexus_help.format_interface_name(intf_type, port))
except excep.NexusHostMappingNotFound:
if self.is_switch_active(switch_ip):
list_to_init.append(
(switch_ip, intf_type, port, is_native, 0))
else:
inactive_switch.append(
(switch_ip, intf_type, port, is_native, 0))
self.driver.initialize_baremetal_switch_interfaces(list_to_init)
host_id = port_seg.get('dns_name')
if host_id is None:
host_id = const.RESERVED_PORT_HOST_ID
list_to_init += inactive_switch
for switch_ip, intf_type, port, is_native, ch_grp in list_to_init:
nxos_db.add_host_mapping(
host_id,
switch_ip,
nexus_help.format_interface_name(intf_type, port),
ch_grp, False) | Initialize baremetal switch interfaces and DB entry.
With baremetal transactions, the interfaces are not
known during initialization so they must be initialized
when the transactions are received.
* Reserved switch entries are added if needed.
* Reserved port entries are added.
* Determine if port channel is configured on the
interface and store it so we know to create a port-channel
binding instead of that defined in the transaction.
In this case, the RESERVED binding is the ethernet interface
with port-channel stored in channel-group field.
When this channel-group is not 0, we know to create a port binding
as a port-channel instead of interface ethernet. |
def gen_shell(opts, **kwargs):
if kwargs['winrm']:
try:
import saltwinshell
shell = saltwinshell.Shell(opts, **kwargs)
except ImportError:
log.error('The saltwinshell library is not available')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
shell = Shell(opts, **kwargs)
return shell | Return the correct shell interface for the target system |
def validate_redirect_uri(value):
sch, netloc, path, par, query, fra = urlparse(value)
if not (sch and netloc):
raise InvalidRedirectURIError()
if sch != 'https':
if ':' in netloc:
netloc, port = netloc.split(':', 1)
if not (netloc in ('localhost', '127.0.0.1') and sch == 'http'):
raise InsecureTransportError() | Validate a redirect URI.
Redirect URIs must be a valid URL and use https unless the host is
localhost for which http is accepted.
:param value: The redirect URI. |
def QueryFields(r, what, fields=None):
query = {}
if fields is not None:
query["fields"] = ",".join(fields)
return r.request("get", "/2/query/%s/fields" % what, query=query) | Retrieves available fields for a resource.
@type what: string
@param what: Resource name, one of L{constants.QR_VIA_RAPI}
@type fields: list of string
@param fields: Requested fields
@rtype: string
@return: job id |
def interactive_shell():
print('You should be able to read and update the "counter[0]" variable from this shell.')
try:
yield from embed(globals=globals(), return_asyncio_coroutine=True, patch_stdout=True)
except EOFError:
loop.stop() | Coroutine that starts a Python REPL from which we can access the global
counter variable. |
def place_analysis_summary_report(feature, parent):
_ = feature, parent
analysis_dir = get_analysis_dir(exposure_place['key'])
if analysis_dir:
return get_impact_report_as_string(analysis_dir)
return None | Retrieve an HTML place analysis table report from a multi exposure
analysis. |
def program_rtr(self, args, rout_id, namespace=None):
if namespace is None:
namespace = self.find_rtr_namespace(rout_id)
if namespace is None:
LOG.error("Unable to find namespace for router %s", rout_id)
return False
final_args = ['ip', 'netns', 'exec', namespace] + args
try:
utils.execute(final_args, root_helper=self.root_helper)
except Exception as e:
LOG.error("Unable to execute %(cmd)s. "
"Exception: %(exception)s",
{'cmd': final_args, 'exception': e})
return False
return True | Execute the command against the namespace. |
def get_logger_data(self):
return {
address : stream_capturer[0].dump_handler_config_data()
for address, stream_capturer in self._stream_capturers.iteritems()
} | Return data on managed loggers.
Returns a dictionary of managed logger configuration data. The format
is primarily controlled by the
:func:`SocketStreamCapturer.dump_handler_config_data` function::
{
<capture address>: <list of handler config for data capturers>
} |
def hkdf(self, chaining_key, input_key_material, dhlen=64):
if len(chaining_key) != self.HASHLEN:
raise HashError("Incorrect chaining key length")
if len(input_key_material) not in (0, 32, dhlen):
raise HashError("Incorrect input key material length")
temp_key = self.hmac_hash(chaining_key, input_key_material)
output1 = self.hmac_hash(temp_key, b'\x01')
output2 = self.hmac_hash(temp_key, output1 + b'\x02')
return output1, output2 | Hash-based key derivation function
Takes a ``chaining_key'' byte sequence of len HASHLEN, and an
``input_key_material'' byte sequence with length either zero
bytes, 32 bytes or dhlen bytes.
Returns two byte sequences of length HASHLEN |
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._ndarray_values
other_tuples = other._ndarray_values
uniq_tuples = set(self_tuples) & set(other_tuples)
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names) | Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index |
def check(self, return_code=0):
ret = self.call().return_code
ok = ret == return_code
if not ok:
raise EasyProcessError(
self, 'check error, return code is not {0}!'.format(return_code))
return self | Run command with arguments. Wait for command to complete. If the
exit code was as expected and there is no exception then return,
otherwise raise EasyProcessError.
:param return_code: int, expected return code
:rtype: self |
def get_playback_callback(resampler, samplerate, params):
def callback(outdata, frames, time, _):
last_fmphase = getattr(callback, 'last_fmphase', 0)
df = params['fm_gain'] * resampler.read(frames)
df = np.pad(df, (0, frames - len(df)), mode='constant')
t = time.outputBufferDacTime + np.arange(frames) / samplerate
phase = 2 * np.pi * params['carrier_frequency'] * t
fmphase = last_fmphase + 2 * np.pi * np.cumsum(df) / samplerate
outdata[:, 0] = params['output_volume'] * np.cos(phase + fmphase)
callback.last_fmphase = fmphase[-1]
return callback | Return a sound playback callback.
Parameters
----------
resampler
The resampler from which samples are read.
samplerate : float
The sample rate.
params : dict
Parameters for FM generation. |
def open(self):
if self.fd is not None:
raise self.AlreadyOpened()
logger.debug("Opening %s..." % (TUN_KO_PATH, ))
self.fd = os.open(TUN_KO_PATH, os.O_RDWR)
logger.debug("Opening %s tunnel '%s'..." % (self.mode_name.upper(), self.pattern, ))
try:
ret = fcntl.ioctl(self.fd, self.TUNSETIFF, struct.pack("16sH", self.pattern, self.mode | self.no_pi))
except IOError, e:
if e.errno == 1:
logger.error("Cannot open a %s tunnel because the operation is not permitted." % (self.mode_name.upper(), ))
raise self.NotPermitted()
raise
self.name = ret[:16].strip("\x00")
logger.info("Tunnel '%s' opened." % (self.name, )) | Create the tunnel.
If the tunnel is already opened, the function will
raised an AlreadyOpened exception. |
def decode(self, data: bytes) -> bytes:
if CONTENT_TRANSFER_ENCODING in self.headers:
data = self._decode_content_transfer(data)
if CONTENT_ENCODING in self.headers:
return self._decode_content(data)
return data | Decodes data according the specified Content-Encoding
or Content-Transfer-Encoding headers value. |
def parse_url(url):
if not RE_PROTOCOL_SERVER.match(url):
raise Exception("URL should begin with `protocol://domain`")
protocol, server, path, _, _, _ = urlparse.urlparse(url)
return protocol, server | Takes a URL string and returns its protocol and server |
def calc_all_routes_info(self, npaths=3, real_time=True, stop_at_bounds=False, time_delta=0):
routes = self.get_route(npaths, time_delta)
results = {route['routeName']: self._add_up_route(route['results'], real_time=real_time, stop_at_bounds=stop_at_bounds) for route in routes}
route_time = [route[0] for route in results.values()]
route_distance = [route[1] for route in results.values()]
self.log.info('Time %.2f - %.2f minutes, distance %.2f - %.2f km.', min(route_time), max(route_time), min(route_distance), max(route_distance))
return results | Calculate all route infos. |
def _from_rest_ignore(model, props):
fields = model.all_fields
for prop in props.keys():
if prop not in fields:
del props[prop] | Purge fields that are completely unknown |
def get_directorship_heads(self, val):
__ldap_group_ou__ = "cn=groups,cn=accounts,dc=csh,dc=rit,dc=edu"
res = self.__con__.search_s(
__ldap_group_ou__,
ldap.SCOPE_SUBTREE,
"(cn=eboard-%s)" % val,
['member'])
ret = []
for member in res[0][1]['member']:
try:
ret.append(member.decode('utf-8'))
except UnicodeDecodeError:
ret.append(member)
except KeyError:
continue
return [CSHMember(self,
dn.split('=')[1].split(',')[0],
True)
for dn in ret] | Get the head of a directorship
Arguments:
val -- the cn of the directorship |
def rolling_max(self, window_start, window_end, min_observations=None):
min_observations = self.__check_min_observations(min_observations)
agg_op = '__builtin__max__'
return SArray(_proxy=self.__proxy__.builtin_rolling_apply(agg_op, window_start, window_end, min_observations)) | Calculate a new SArray of the maximum value of different subsets over
this SArray.
The subset that the maximum is calculated over is defined as an
inclusive range relative to the position to each value in the SArray,
using `window_start` and `window_end`. For a better understanding of
this, see the examples below.
Parameters
----------
window_start : int
The start of the subset to calculate the maximum relative to the
current value.
window_end : int
The end of the subset to calculate the maximum relative to the current
value. Must be greater than `window_start`.
min_observations : int
Minimum number of non-missing observations in window required to
calculate the maximum (otherwise result is None). None signifies that
the entire window must not include a missing value. A negative
number throws an error.
Returns
-------
out : SArray
Examples
--------
>>> import pandas
>>> sa = SArray([1,2,3,4,5])
>>> series = pandas.Series([1,2,3,4,5])
A rolling max with a window including the previous 2 entries including
the current:
>>> sa.rolling_max(-2,0)
dtype: int
Rows: 5
[None, None, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3)
0 NaN
1 NaN
2 3
3 4
4 5
dtype: float64
Same rolling max operation, but 2 minimum observations:
>>> sa.rolling_max(-2,0,min_observations=2)
dtype: int
Rows: 5
[None, 2, 3, 4, 5]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, min_periods=2)
0 NaN
1 2
2 3
3 4
4 5
dtype: float64
A rolling max with a size of 3, centered around the current:
>>> sa.rolling_max(-1,1)
dtype: int
Rows: 5
[None, 3, 4, 5, None]
Pandas equivalent:
>>> pandas.rolling_max(series, 3, center=True)
0 NaN
1 3
2 4
3 5
4 NaN
dtype: float64
A rolling max with a window including the current and the 2 entries
following:
>>> sa.rolling_max(0,2)
dtype: int
Rows: 5
[3, 4, 5, None, None]
A rolling max with a window including the previous 2 entries NOT
including the current:
>>> sa.rolling_max(-2,-1)
dtype: int
Rows: 5
[None, None, 2, 3, 4] |
def wait_for_ready(self, instance_id, limit=14400, delay=10, pending=False):
now = time.time()
until = now + limit
mask = "mask[id, lastOperatingSystemReload[id], activeTransaction, provisionDate]"
instance = self.get_hardware(instance_id, mask=mask)
while now <= until:
if utils.is_ready(instance, pending):
return True
transaction = utils.lookup(instance, 'activeTransaction', 'transactionStatus', 'friendlyName')
snooze = min(delay, until - now)
LOGGER.info("%s - %d not ready. Auto retry in %ds", transaction, instance_id, snooze)
time.sleep(snooze)
instance = self.get_hardware(instance_id, mask=mask)
now = time.time()
LOGGER.info("Waiting for %d expired.", instance_id)
return False | Determine if a Server is ready.
A server is ready when no transactions are running on it.
:param int instance_id: The instance ID with the pending transaction
:param int limit: The maximum amount of seconds to wait.
:param int delay: The number of seconds to sleep before checks. Defaults to 10. |
def list_commands(self, ctx):
commands_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'commands')
command_list = [name for __, name, ispkg in pkgutil.iter_modules([commands_path]) if ispkg]
command_list.sort()
return command_list | List CLI commands
@type ctx: Context
@rtype: list |
def extract_lzma(archive, compression, cmd, verbosity, interactive, outdir):
cmdlist = [util.shell_quote(cmd), '--format=lzma']
if verbosity > 1:
cmdlist.append('-v')
outfile = util.get_single_outfile(outdir, archive)
cmdlist.extend(['-c', '-d', '--', util.shell_quote(archive), '>',
util.shell_quote(outfile)])
return (cmdlist, {'shell': True}) | Extract an LZMA archive. |
def del_windows_env_var(key):
if not isinstance(key, text_type):
raise TypeError("%r not of type %r" % (key, text_type))
status = winapi.SetEnvironmentVariableW(key, None)
if status == 0:
raise ctypes.WinError() | Delete an env var.
Raises:
WindowsError |
def process_array(elt, ascii=False):
del ascii
chld = elt.getchildren()
if len(chld) > 1:
raise ValueError()
chld = chld[0]
try:
name, current_type, scale = CASES[chld.tag](chld)
size = None
except ValueError:
name, current_type, size, scale = CASES[chld.tag](chld)
del name
myname = elt.get("name") or elt.get("label")
if elt.get("length").startswith("$"):
length = int(VARIABLES[elt.get("length")[1:]])
else:
length = int(elt.get("length"))
if size is not None:
return (myname, current_type, (length, ) + size, scale)
else:
return (myname, current_type, (length, ), scale) | Process an 'array' tag. |
def limit(self, limit):
clone = self._clone()
if isinstance(limit, int):
clone._limit = limit
return clone | Limit number of records |
def _update_mask(self):
self._threshold_mask = self._data > self._theta
self._threshold_mask_v = self._data > self._theta/np.abs(self._v) | Pre-compute masks for speed. |
def read_csv(filename):
with open(filename, 'r') as f:
r = csv.reader(f)
next(r)
wm = list(r)
for i, row in enumerate(wm):
row[1:4] = map(int, row[1:4])
row[4] = float(row[4])
return wm | Read a list of week-matrices from a CSV file. |
def get_mount_targets(filesystemid=None,
mounttargetid=None,
keyid=None,
key=None,
profile=None,
region=None,
**kwargs):
result = None
client = _get_conn(key=key, keyid=keyid, profile=profile, region=region)
if filesystemid:
response = client.describe_mount_targets(FileSystemId=filesystemid)
result = response["MountTargets"]
while "NextMarker" in response:
response = client.describe_mount_targets(FileSystemId=filesystemid,
Marker=response["NextMarker"])
result.extend(response["MountTargets"])
elif mounttargetid:
response = client.describe_mount_targets(MountTargetId=mounttargetid)
result = response["MountTargets"]
return result | Get all the EFS mount point properties for a specific filesystemid or
the properties for a specific mounttargetid. One or the other must be
specified
filesystemid
(string) - ID of the file system whose mount targets to list
Must be specified if mounttargetid is not
mounttargetid
(string) - ID of the mount target to have its properties returned
Must be specified if filesystemid is not
returns
(list[dict]) - list of all mount point properties
CLI Example:
.. code-block:: bash
salt 'my-minion' boto_efs.get_mount_targets |
def plot_scatter_matrix(self, freq=None, title=None,
figsize=(10, 10), **kwargs):
if title is None:
title = self._get_default_plot_title(
freq, 'Return Scatter Matrix')
plt.figure()
ser = self._get_series(freq).to_returns().dropna()
pd.scatter_matrix(ser, figsize=figsize, **kwargs)
return plt.suptitle(title) | Wrapper around pandas' scatter_matrix.
Args:
* freq (str): Data frequency used for display purposes.
Refer to pandas docs for valid freq strings.
* figsize ((x,y)): figure size
* title (str): Title if default not appropriate
* kwargs: passed to pandas' scatter_matrix method |
def cleanup():
to_stop = STARTED_TASKS.copy()
if to_stop:
print "Cleaning up..."
for task in to_stop:
try:
task.stop()
except:
etype, value, trace = sys.exc_info()
if not (isinstance(value, OSError) and value.errno == 3):
print ''.join(format_exception(etype, value, trace, None))
continue | Stop all started tasks on system exit.
Note: This only handles signals caught by the atexit module by default.
SIGKILL, for instance, will not be caught, so cleanup is not guaranteed in
all cases. |
def rename(self, oldkey, newkey):
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment | Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments. |
def push_plugin(self, name):
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True) | Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful |
def arcfour_drop(key, n=3072):
af = arcfour(key)
[af.next() for c in range(n)]
return af | Return a generator for the RC4-drop pseudorandom keystream given by
the key and number of bytes to drop passed as arguments. Dropped bytes
default to the more conservative 3072, NOT the SCAN default of 768. |
def get_context_data(self, **kwargs):
data = {}
if self._contextual_vals.current_level == 1 and self.max_levels > 1:
data['sub_menu_template'] = self.sub_menu_template.template.name
data.update(kwargs)
return super().get_context_data(**data) | Include the name of the sub menu template in the context. This is
purely for backwards compatibility. Any sub menus rendered as part of
this menu will call `sub_menu_template` on the original menu instance
to get an actual `Template` |
def initialize_watcher(samples):
work_dir = dd.get_in_samples(samples, dd.get_work_dir)
ww = WorldWatcher(work_dir,
is_on=any([dd.get_cwl_reporting(d[0]) for d in samples]))
ww.initialize(samples)
return ww | check to see if cwl_reporting is set for any samples,
and if so, initialize a WorldWatcher object from a set of samples, |
def write_branch_data(self, file):
writer = self._get_writer(file)
writer.writerow(BRANCH_ATTRS)
for branch in self.case.branches:
writer.writerow([getattr(branch, a) for a in BRANCH_ATTRS]) | Writes branch data as CSV. |
def _load_preset(self, path):
try:
with open(path, 'r') as f:
presetBody = json.load(f)
except IOError as e:
raise PresetException("IOError: " + e.strerror)
except ValueError as e:
raise PresetException("JSON decoding error: " + str(e))
except Exception as e:
raise PresetException(str(e))
try:
preset = Preset(presetBody)
except PresetException as e:
e.message = "Bad format: " + e.message
raise
if(preset.id in self.presets):
raise PresetException("Duplicate preset id: " + preset.id)
else:
self.presets[preset.id] = preset | load, validate and store a single preset file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.