code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def body(self):
if self._body is None:
if self._body_reader is None:
self._body = self.input.read(self.content_length or 0)
else:
self._body = self._body_reader(self.input)
return self._body | Reads and returns the entire request body.
On first access, reads `content_length` bytes from `input` and stores
the result on the request object. On subsequent access, returns the
cached value. |
def rm(path, service_names=None):
project = __load_project(path)
if isinstance(project, dict):
return project
else:
try:
project.remove_stopped(service_names)
except Exception as inst:
return __handle_except(inst)
return __standardize_result(True, 'Removin... | Remove stopped containers in the docker-compose file, service_names is a python
list, if omitted remove all stopped containers
path
Path where the docker-compose file is stored on the server
service_names
If specified will remove only the specified stopped services
CLI Example:
..... |
def wrap_inference_results(inference_result_proto):
inference_proto = inference_pb2.InferenceResult()
if isinstance(inference_result_proto,
classification_pb2.ClassificationResponse):
inference_proto.classification_result.CopyFrom(
inference_result_proto.result)
elif isinstance(inferen... | Returns packaged inference results from the provided proto.
Args:
inference_result_proto: The classification or regression response proto.
Returns:
An InferenceResult proto with the result from the response. |
def store_work_results(self, results, collection, md5):
results['md5'] = md5
results['__time_stamp'] = datetime.datetime.utcnow()
if 'mod_time' not in results:
results['mod_time'] = results['__time_stamp']
try:
self.database[collection].update({'md5':md5}, self.cl... | Store the output results of the worker.
Args:
results: a dictionary.
collection: the database collection to store the results in.
md5: the md5 of sample data to be updated. |
def create_logstash(self, **kwargs):
logstash = predix.admin.logstash.Logging(**kwargs)
logstash.create()
logstash.add_to_manifest(self)
logging.info('Install Kibana-Me-Logs application by following GitHub instructions')
logging.info('git clone https://github.com/cloudfoundry-com... | Creates an instance of the Logging Service. |
def add_reorganize_data(self, name, input_name, output_name, mode = 'SPACE_TO_DEPTH', block_size = 2):
spec = self.spec
nn_spec = self.nn_spec
spec_layer = nn_spec.layers.add()
spec_layer.name = name
spec_layer.input.append(input_name)
spec_layer.output.append(output_name... | Add a data reorganization layer of type "SPACE_TO_DEPTH" or "DEPTH_TO_SPACE".
Parameters
----------
name: str
The name of this layer.
input_name: str
The input blob name of this layer.
output_name: str
The output blob name of this layer.
... |
def _add_view_menu(self):
mainMenu = self.app.mainMenu()
viewMenu = AppKit.NSMenu.alloc().init()
viewMenu.setTitle_(localization["cocoa.menu.view"])
viewMenuItem = AppKit.NSMenuItem.alloc().init()
viewMenuItem.setSubmenu_(viewMenu)
mainMenu.addItem_(viewMenuItem)
... | Create a default View menu that shows 'Enter Full Screen'. |
def close(self):
for handle in self._handles:
if not handle.closed:
handle.close()
del self._handles[:]
for transport, _ in self.connections:
transport.close()
self._all_closed.wait() | Close the listening sockets and all accepted connections. |
def _re_raise_as(NewExc, *args, **kw):
etype, val, tb = sys.exc_info()
raise NewExc(*args, **kw), None, tb | Raise a new exception using the preserved traceback of the last one. |
def start(self):
logging.info("Fixedconf watcher plugin: Started")
cidr = self.conf['fixed_cidr']
hosts = self.conf['fixed_hosts'].split(":")
route_spec = {cidr : hosts}
try:
common.parse_route_spec_config(route_spec)
self.q_route_spec.put(route... | Start the config watch thread or process. |
def get_queues(*queue_names, **kwargs):
from .settings import QUEUES
if len(queue_names) <= 1:
return [get_queue(*queue_names, **kwargs)]
kwargs['job_class'] = get_job_class(kwargs.pop('job_class', None))
queue_params = QUEUES[queue_names[0]]
connection_params = filter_connection_params(queu... | Return queue instances from specified queue names.
All instances must use the same Redis connection. |
def purge_metadata_by_name(self, name):
meta_dir = self._get_metadata_dir_by_name(name, self._metadata_base_dir)
logger.debug('purging metadata directory: {}'.format(meta_dir))
try:
rm_rf(meta_dir)
except OSError as e:
raise ProcessMetadataManager.MetadataError('failed to purge metadata dire... | Purge a processes metadata directory.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal. |
def _validate_file_roots(file_roots):
if not isinstance(file_roots, dict):
log.warning('The file_roots parameter is not properly formatted,'
' using defaults')
return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
return _normalize_roots(file_roots) | If the file_roots option has a key that is None then we will error out,
just replace it with an empty list |
def validate(tool_class, model_class):
if not hasattr(tool_class, 'name'):
raise ImproperlyConfigured("No 'name' attribute found for tool %s." % (
tool_class.__name__
))
if not hasattr(tool_class, 'label'):
raise ImproperlyConfigured("No 'label' attribute found for tool %s." ... | Does basic ObjectTool option validation. |
def get_structure(atoms, cls=None):
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True) | Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure |
def cases(self, env, data):
for handler in self.handlers:
env._push()
data._push()
try:
result = handler(env, data)
finally:
env._pop()
data._pop()
if result is not None:
return result | Calls each nested handler until one of them returns nonzero result.
If any handler returns `None`, it is interpreted as
"request does not match, the handler has nothing to do with it and
`web.cases` should try to call the next handler". |
def show_command(endpoint_id, rule_id):
client = get_client()
rule = client.get_endpoint_acl_rule(endpoint_id, rule_id)
formatted_print(
rule,
text_format=FORMAT_TEXT_RECORD,
fields=(
("Rule ID", "id"),
("Permissions", "permissions"),
("Shared With... | Executor for `globus endpoint permission show` |
def PostUnregistration(method):
if not isinstance(method, types.FunctionType):
raise TypeError("@PostUnregistration can only be applied on functions")
validate_method_arity(method, "service_reference")
_append_object_entry(
method,
constants.IPOPO_METHOD_CALLBACKS,
constants.... | The service post-unregistration callback decorator is called after a service
of the component has been unregistered from the framework.
The decorated method must accept the
:class:`~pelix.framework.ServiceReference` of the registered
service as argument::
@PostUnregistration
def callback... |
def meth_list(args):
r = fapi.list_repository_methods(namespace=args.namespace,
name=args.method,
snapshotId=args.snapshot_id)
fapi._check_response_code(r, 200)
methods = r.json()
results = []
for m in methods:
ns = m[... | List workflows in the methods repository |
def std_blocksum(data, block_sizes, mask=None):
data = np.ma.asanyarray(data)
if mask is not None and mask is not np.ma.nomask:
mask = np.asanyarray(mask)
if data.shape != mask.shape:
raise ValueError('data and mask must have the same shape.')
data.mask |= mask
stds = []
... | Calculate the standard deviation of block-summed data values at
sizes of ``block_sizes``.
Values from incomplete blocks, either because of the image edges or
masked pixels, are not included.
Parameters
----------
data : array-like
The 2D array to block sum.
block_sizes : int, arra... |
def get_info(brain_or_object, endpoint=None, complete=False):
if not is_brain(brain_or_object):
brain_or_object = get_brain(brain_or_object)
if brain_or_object is None:
logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object))
return {}
complete = True
... | Extract the data from the catalog brain or object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param endpoint: The named URL endpoint for the root of the items
:type endpoint: str/unicode
:param complete: Fl... |
def in_cache(self, zenpy_object):
object_type = get_object_type(zenpy_object)
cache_key_attr = self._cache_key_attribute(object_type)
return self.get(object_type, getattr(zenpy_object, cache_key_attr)) is not None | Determine whether or not this object is in the cache |
def charset_to_int(s, charset):
output = 0
for char in s:
output = output * len(charset) + charset.index(char)
return output | Turn a string into a non-negative integer.
>>> charset_to_int('0', B40_CHARS)
0
>>> charset_to_int('10', B40_CHARS)
40
>>> charset_to_int('abcd', B40_CHARS)
658093
>>> charset_to_int('', B40_CHARS)
0
>>> charset_to_int('muneeb.id', B40_CHARS)
149190078205533
>>> charset_to_i... |
def index_delete(index, hosts=None, profile=None):
es = _get_instance(hosts, profile)
try:
result = es.indices.delete(index=index)
return result.get('acknowledged', False)
except elasticsearch.exceptions.NotFoundError:
return True
except elasticsearch.TransportError as e:
... | Delete an index
index
Index name
CLI example::
salt myminion elasticsearch.index_delete testindex |
def set_perspective(self, fov, aspect, near, far):
self.matrix = transforms.perspective(fov, aspect, near, far) | Set the perspective
Parameters
----------
fov : float
Field of view.
aspect : float
Aspect ratio.
near : float
Near location.
far : float
Far location. |
def cummean(expr, sort=None, ascending=True, unique=False,
preceding=None, following=None):
data_type = _stats_type(expr)
return _cumulative_op(expr, CumMean, sort=sort, ascending=ascending,
unique=unique, preceding=preceding,
following=following, ... | Calculate cumulative mean of a sequence expression.
:param expr: expression for calculation
:param sort: name of the sort column
:param ascending: whether to sort in ascending order
:param unique: whether to eliminate duplicate entries
:param preceding: the start point of a window
:param follow... |
def save_context(context):
file_path = _get_context_filepath()
content = format_to_http_prompt(context, excluded_options=EXCLUDED_OPTIONS)
with io.open(file_path, 'w', encoding='utf-8') as f:
f.write(content) | Save a Context object to user data directory. |
def within(self, x, ctrs, kdtree=None):
if kdtree is None:
idxs = np.where(lalg.norm(ctrs - x, axis=1) <= self.radius)[0]
else:
idxs = kdtree.query_ball_point(x, self.radius, p=2.0, eps=0)
return idxs | Check which balls `x` falls within. Uses a K-D Tree to
perform the search if provided. |
def trcdep():
depth = ctypes.c_int()
libspice.trcdep_c(ctypes.byref(depth))
return depth.value | Return the number of modules in the traceback representation.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/trcdep_c.html
:return: The number of modules in the traceback.
:rtype: int |
async def client_event_handler(self, client_id, event_tuple, user_data):
conn_string, event_name, _event = event_tuple
self._logger.debug("Ignoring event %s from device %s forwarded for client %s",
event_name, conn_string, client_id)
return None | Method called to actually send an event to a client.
Users of this class should override this method to actually forward
device events to their clients. It is called with the client_id
passed to (or returned from) :meth:`setup_client` as well as the
user_data object that was included t... |
def passthrough_repl(self, inputstring, **kwargs):
out = []
index = None
for c in append_it(inputstring, None):
try:
if index is not None:
if c is not None and c in nums:
index += c
elif c == unwrapper an... | Add back passthroughs. |
def get_shutit_pexpect_session_environment(self, environment_id):
if not isinstance(environment_id, str):
self.fail('Wrong argument type in get_shutit_pexpect_session_environment')
for env in shutit_global.shutit_global_object.shutit_pexpect_session_environments:
if env.environment_id == environment_id:
r... | Returns the first shutit_pexpect_session object related to the given
environment-id |
def get_time():
time_request = '\x1b' + 47 * '\0'
now = struct.unpack("!12I", ntp_service.request(time_request, timeout=5.0).data.read())[10]
return time.ctime(now - EPOCH_START) | Get time from a locally running NTP server |
def gen_div(src1, src2, dst):
assert src1.size == src2.size
return ReilBuilder.build(ReilMnemonic.DIV, src1, src2, dst) | Return a DIV instruction. |
def _clean_dirty(self, obj=None):
obj = obj or self
obj.__dict__['_dirty_attributes'].clear()
obj._dirty = False
for key, val in vars(obj).items():
if isinstance(val, BaseObject):
self._clean_dirty(val)
else:
func = getattr(val, '_c... | Recursively clean self and all child objects. |
def _check_markers(task_ids, offset=10):
shuffle(task_ids)
has_errors = False
for index in xrange(0, len(task_ids), offset):
keys = [ndb.Key(FuriousAsyncMarker, id)
for id in task_ids[index:index + offset]]
markers = ndb.get_multi(keys)
if not all(markers):
... | Returns a flag for markers being found for the task_ids. If all task ids
have markers True will be returned. Otherwise it will return False as soon
as a None result is hit. |
def write_meta(self, role):
meta_file = utils.file_to_string(self.paths["meta"])
self.update_gen_report(role, "meta", meta_file) | Write out a new meta file. |
def prune_creds_json(creds: dict, cred_ids: set) -> str:
rv = deepcopy(creds)
for key in ('attrs', 'predicates'):
for attr_uuid, creds_by_uuid in rv[key].items():
rv[key][attr_uuid] = [cred for cred in creds_by_uuid if cred['cred_info']['referent'] in cred_ids]
empties = [attr_uuid f... | Strip all creds out of the input json structure that do not match any of the input credential identifiers.
:param creds: indy-sdk creds structure
:param cred_ids: the set of credential identifiers of interest
:return: the reduced creds json |
def get_page_children_dict(self, page_qs=None):
children_dict = defaultdict(list)
for page in page_qs or self.pages_for_display:
children_dict[page.path[:-page.steplen]].append(page)
return children_dict | Returns a dictionary of lists, where the keys are 'path' values for
pages, and the value is a list of children pages for that page. |
def get_queryset(self):
qs = VersionedQuerySet(self.model, using=self._db)
if hasattr(self, 'instance') and hasattr(self.instance, '_querytime'):
qs.querytime = self.instance._querytime
return qs | Returns a VersionedQuerySet capable of handling version time
restrictions.
:return: VersionedQuerySet |
def in_session(self):
session = self.get_session()
try:
yield session
session.commit()
except IntegrityError:
session.rollback()
raise DuplicateError("Duplicate unique value detected!")
except (OperationalError, DisconnectionError):
... | Provide a session scope around a series of operations. |
def value_series(self, key, start=None, end=None, interval=None,
namespace=None, cache=None):
return self.make_context(key=key, start=start, end=end,
interval=interval, namespace=namespace,
cache=cache).value_series() | Get a time series of gauge values |
def home_page(self, tld_type: Optional[TLDType] = None) -> str:
resource = self.random.choice(USERNAMES)
domain = self.top_level_domain(
tld_type=tld_type,
)
return 'http://www.{}{}'.format(
resource, domain) | Generate a random home page.
:param tld_type: TLD type.
:return: Random home page.
:Example:
http://www.fontir.info |
def imprint(self, path=None):
if self.version is not None:
with open(path or self.version_file, 'w') as h:
h.write(self.version + '\n')
else:
raise ValueError('Can not write null version to file.')
return self | Write the determined version, if any, to ``self.version_file`` or
the path passed as an argument. |
def progressive(image_field, alt_text=''):
if not isinstance(image_field, ImageFieldFile):
raise ValueError('"image_field" argument must be an ImageField.')
for engine in engines.all():
if isinstance(engine, BaseEngine) and hasattr(engine, 'env'):
env = engine.env
if isin... | Used as a Jinja2 filter, this function returns a safe HTML chunk.
Usage (in the HTML template):
{{ obj.image|progressive }}
:param django.db.models.fields.files.ImageFieldFile image_field: image
:param str alt_text: str
:return: a safe HTML template ready to be rendered |
def add_vcenter(self, **kwargs):
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter",
xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id = ET.SubElement(vcenter, "id")
id.text = kwargs.pop('id')
credentials = ET.SubElement(vcenter... | Add vCenter on the switch
Args:
id(str) : Name of an established vCenter
url (bool) : vCenter URL
username (str): Username of the vCenter
password (str): Password of the vCenter
callback (function): A function executed upon completion of the
... |
def _store_information(self):
print '<<< Generating Information Storage >>>'
for name, meth in inspect.getmembers(self, predicate=inspect.isroutine):
if not name.startswith('_'):
info = {'command': name, 'sig': str(funcsigs.signature(meth)), 'docstring': meth.__doc__}
... | Store infomation about Workbench and its commands |
def load_schema(schema_path):
with open(schema_path, 'r') as schema_file:
schema = simplejson.load(schema_file)
resolver = RefResolver('', '', schema.get('models', {}))
return build_request_to_validator_map(schema, resolver) | Prepare the api specification for request and response validation.
:returns: a mapping from :class:`RequestMatcher` to :class:`ValidatorMap`
for every operation in the api specification.
:rtype: dict |
def is_int(tg_type, inc_array=False):
global _scalar_int_types, _array_int_types
if tg_type in _scalar_int_types:
return True
if not inc_array:
return False
return tg_type in _array_int_types | Tells if the given tango type is integer
:param tg_type: tango type
:type tg_type: :class:`tango.CmdArgType`
:param inc_array: (optional, default is False) determines if include array
in the list of checked types
:type inc_array: :py:obj:`bool`
:return: True if the given tang... |
def get_description(cls) -> str:
if cls.__doc__ is None:
raise ValueError('No docstring found for {}'.format(cls.__name__))
return cls.__doc__.strip() | The description is expected to be the command class' docstring. |
def load(self, filename=None):
fields = []
with open(filename, 'r') as f:
format_data = f.read().strip()
lines = format_data.split('\n')
self._sql_version = lines.pop(0)
self._num_fields = int(lines.pop(0))
for line in lines:
line = re.sub(' +', ' ... | Reads a non-XML bcp FORMAT file and parses it into fields list used for creating bulk data file |
def stoptimes(self, start_date, end_date):
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['STOPTIMES'], params)
return response | Return all stop times in the date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.stoptimes(today - datetime.timedelta(days=1), today) |
def bind(self, typevar, its_type):
assert type(typevar) == tg.TypeVar
if self.is_generic_in(typevar):
self.bind_to_instance(typevar, its_type)
else:
self._ns[typevar] = its_type | Binds typevar to the type its_type.
Binding occurs on the instance if the typevar is a TypeVar of the
generic type of the instance, on call level otherwise. |
def prepare_input_data(config):
if not dd.get_disambiguate(config):
return dd.get_input_sequence_files(config)
work_bam = dd.get_work_bam(config)
logger.info("Converting disambiguated reads to fastq...")
fq_files = convert_bam_to_fastq(
work_bam, dd.get_work_dir(config), None, None, conf... | In case of disambiguation, we want to run fusion calling on
the disambiguated reads, which are in the work_bam file.
As EricScript accepts 2 fastq files as input, we need to convert
the .bam to 2 .fq files. |
def do_eni(self,args):
parser = CommandArgumentParser("eni")
parser.add_argument(dest='eni',help='eni index or name');
args = vars(parser.parse_args(args))
print "loading eni {}".format(args['eni'])
try:
index = int(args['eni'])
eniSummary = self.wrappedSt... | Go to the specified eni. eni -h for detailed help. |
def add_tlink(self,my_tlink):
if self.temporalRelations_layer is None:
self.temporalRelations_layer = CtemporalRelations()
self.root.append(self.temporalRelations_layer.get_node())
self.temporalRelations_layer.add_tlink(my_tlink) | Adds a tlink to the temporalRelations layer
@type my_tlink: L{Ctlink}
@param my_tlink: tlink object |
def worker_recover(name, workers=None, profile='default'):
if workers is None:
workers = []
return _bulk_state(
'modjk.bulk_recover', name, workers, profile
) | Recover all the workers in the modjk load balancer
Example:
.. code-block:: yaml
loadbalancer:
modjk.worker_recover:
- workers:
- app1
- app2 |
def exists(name, tags=None, region=None, key=None, keyid=None, profile=None):
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
return {'exists': bool(rds)}
except ClientError as e:
return {'error':... | Check to see if an RDS exists.
CLI example::
salt myminion boto_rds.exists myrds region=us-east-1 |
def add_arguments(self, parser):
parser.add_argument('app_label', nargs='*')
for argument in self.arguments:
parser.add_argument(*argument.split(' '), **self.arguments[argument]) | Unpack self.arguments for parser.add_arguments. |
def time_stamp():
fmt = '%Y-%m-%dT%H:%M:%S.%f'
date = datetime.datetime
date_delta = datetime.timedelta
now = datetime.datetime.utcnow()
return fmt, date, date_delta, now | Setup time functions
:returns: ``tuple`` |
def transplant(new_net, net, suffix=''):
for p in net.params:
p_new = p + suffix
if p_new not in new_net.params:
print 'dropping', p
continue
for i in range(len(net.params[p])):
if i > (len(new_net.params[p_new]) - 1):
print 'dropping', p, ... | Transfer weights by copying matching parameters, coercing parameters of
incompatible shape, and dropping unmatched parameters.
The coercion is useful to convert fully connected layers to their
equivalent convolutional layers, since the weights are the same and only
the shapes are different. In particu... |
def get_filetypes_info(editor_quote="`", flag_leaf=True):
NONE_REPL = ""
import f311
data = []
for attr in f311.classes_file(flag_leaf):
description = a99.get_obj_doc0(attr)
def_ = NONE_REPL if attr.default_filename is None else attr.default_filename
ee = attr.editors
if ... | Reports available data types
Args:
editor_quote: character to enclose the name of the editor script between.
flag_leaf: see tabulate_filetypes_rest()
Returns:
list: list of FileTypeInfo |
def _initialize(self, **resource_attributes):
super(APIResourceCollection, self)._initialize(**resource_attributes)
dict_list = self.data
self.data = []
for resource in dict_list:
self.data.append(self._expected_api_resource(**resource)) | Initialize the collection.
:param resource_attributes: API resource parameters |
def enclosure_groups(self):
if not self.__enclosure_groups:
self.__enclosure_groups = EnclosureGroups(self.__connection)
return self.__enclosure_groups | Gets the EnclosureGroups API client.
Returns:
EnclosureGroups: |
def pix2sky(self, pixel):
pixbox = numpy.array([pixel, pixel])
skybox = self.wcs.all_pix2world(pixbox, 1)
return [float(skybox[0][0]), float(skybox[0][1])] | Get the sky coordinates for a given image pixel.
Parameters
----------
pixel : (float, float)
Image coordinates.
Returns
-------
ra,dec : float
Sky coordinates (degrees) |
def _format_keyword(self, keyword):
import re
result = ''
if keyword:
result = re.sub(r"\W", "", keyword)
result = re.sub(r"_", "", result)
return result | Removing special character from a keyword. Analysis Services must have
this kind of keywords. E.g. if assay name from GeneXpert Instrument is
'Ebola RUO', an AS must be created on Bika with the keyword 'EbolaRUO' |
def from_flags(flags, ednsflags):
value = (flags & 0x000f) | ((ednsflags >> 20) & 0xff0)
if value < 0 or value > 4095:
raise ValueError('rcode must be >= 0 and <= 4095')
return value | Return the rcode value encoded by flags and ednsflags.
@param flags: the DNS flags
@type flags: int
@param ednsflags: the EDNS flags
@type ednsflags: int
@raises ValueError: rcode is < 0 or > 4095
@rtype: int |
def science_object_update(self, pid_old, path, pid_new, format_id=None):
self._queue_science_object_update(pid_old, path, pid_new, format_id) | Obsolete a Science Object on a Member Node with a different one. |
def validate_timeout_or_zero(option, value):
if value is None:
raise ConfigurationError("%s cannot be None" % (option, ))
if value == 0 or value == "0":
return 0
return validate_positive_float(option, value) / 1000.0 | Validates a timeout specified in milliseconds returning
a value in floating point seconds for the case where None is an error
and 0 is valid. Setting the timeout to nothing in the URI string is a
config error. |
def resample_multipitch(times, frequencies, target_times):
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
frequency_index = np.arange(0, n_times)
new_frequency_index = scipy.interpolate.interp1d(
tim... | Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_... |
def run_nupack(kwargs):
run = NUPACK(kwargs['seq'])
output = getattr(run, kwargs['cmd'])(**kwargs['arguments'])
return output | Run picklable Nupack command.
:param kwargs: keyword arguments to pass to Nupack as well as 'cmd'.
:returns: Variable - whatever `cmd` returns. |
def allState(self, *args, **kwargs):
return self._makeApiCall(self.funcinfo["allState"], *args, **kwargs) | List out the entire internal state
This method is only for debugging the ec2-manager
This method is ``experimental`` |
def source_extraction(in1, tolerance, mode="cpu", store_on_gpu=False,
neg_comp=False):
if mode=="cpu":
return cpu_source_extraction(in1, tolerance, neg_comp)
elif mode=="gpu":
return gpu_source_extraction(in1, tolerance, store_on_gpu, neg_comp) | Convenience function for allocating work to cpu or gpu, depending on the selected mode.
INPUTS:
in1 (no default): Array containing the wavelet decomposition.
tolerance (no default): Percentage of maximum coefficient at which objects are deemed significant.
mode (default="cpu"):Mode... |
def relativize_classpath(classpath, root_dir, followlinks=True):
def relativize_url(url, root_dir):
url = os.path.realpath(url) if followlinks else url
root_dir = os.path.realpath(root_dir) if followlinks else root_dir
url_in_bundle = os.path.relpath(url, root_dir)
if os.path.isdir(url):
url_in_... | Convert into classpath relative to a directory.
This is eventually used by a jar file located in this directory as its manifest
attribute Class-Path. See
https://docs.oracle.com/javase/7/docs/technotes/guides/extensions/spec.html#bundled
:param list classpath: Classpath to be relativized.
:param string root... |
def condition_details_has_owner(condition_details, owner):
if 'subconditions' in condition_details:
result = condition_details_has_owner(condition_details['subconditions'], owner)
if result:
return True
elif isinstance(condition_details, list):
for subcondition in condition_d... | Check if the public_key of owner is in the condition details
as an Ed25519Fulfillment.public_key
Args:
condition_details (dict): dict with condition details
owner (str): base58 public key of owner
Returns:
bool: True if the public key is found in the condition details, False otherw... |
def request(community_id, record_id, accept):
c = Community.get(community_id)
assert c is not None
record = Record.get_record(record_id)
if accept:
c.add_record(record)
record.commit()
else:
InclusionRequest.create(community=c, record=record,
n... | Request a record acceptance to a community. |
def _init_relationships(self, relationships_arg):
if relationships_arg:
relationships_all = self._get_all_relationships()
if relationships_arg is True:
return relationships_all
else:
return relationships_all.intersection(relationships_arg)
... | Return a set of relationships found in all subset GO Terms. |
def load_each(*loaders):
def _load_each(metadata):
return merge(
loader(metadata)
for loader in loaders
)
return _load_each | Loader factory that combines a series of loaders. |
def is_ancestor(self, commit1, commit2, patch=False):
result = self.hg("log", "-r", "first(%s::%s)" % (commit1, commit2),
"--template", "exists", patch=patch)
return "exists" in result | Returns True if commit1 is a direct ancestor of commit2, or False
otherwise.
This method considers a commit to be a direct ancestor of itself |
def upload(self, path, engine, description=None):
if description is None:
head, tail = ntpath.split(path)
description = tail or ntpath.basename(head)
url = "http://quickslice.{}/config/raw/".format(self.config.host)
with open(path) as config_file:
content = co... | Create a new config resource in the slicing service and upload the path contents to it |
def scan(self, filetypes=None):
self.logger.debug("Scanning FS content.")
checksums = self.filetype_filter(self._filesystem.checksums('/'),
filetypes=filetypes)
self.logger.debug("Querying %d objects to VTotal.", len(checksums))
for files in chunk... | Iterates over the content of the disk and queries VirusTotal
to determine whether it's malicious or not.
filetypes is a list containing regular expression patterns.
If given, only the files which type will match with one or more of
the given patterns will be queried against VirusTotal.
... |
def json_description_metadata(description):
if description[:6] == 'shape=':
shape = tuple(int(i) for i in description[7:-1].split(','))
return dict(shape=shape)
if description[:1] == '{' and description[-1:] == '}':
return json.loads(description)
raise ValueError('invalid JSON image ... | Return metatata from JSON formated image description as dict.
Raise ValuError if description is of unknown format.
>>> description = '{"shape": [256, 256, 3], "axes": "YXS"}'
>>> json_description_metadata(description) # doctest: +SKIP
{'shape': [256, 256, 3], 'axes': 'YXS'}
>>> json_description_m... |
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
b_regex = "%s/*/bridge" % vnic_dir
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] | Return a list of bridges on the system. |
def _parse_args(cls):
cls.parser = argparse.ArgumentParser()
cls.parser.add_argument(
"symbol", help="Symbol for horizontal line", nargs="*")
cls.parser.add_argument(
"--color", "-c", help="Color of the line", default=None, nargs=1)
cls.parser.add_argument(
... | Method to parse command line arguments |
def scene_command(self, command):
self.logger.info("scene_command: Group %s Command %s", self.group_id, command)
command_url = self.hub.hub_url + '/0?' + command + self.group_id + "=I=0"
return self.hub.post_direct_command(command_url) | Wrapper to send posted scene command and get response |
def addResource(self, key, filePath, text):
url = self.root + "/addresource"
params = {
"f": "json",
"token" : self._securityHandler.token,
"key" : key,
"text" : text
}
files = {}
files['file'] = filePath
res = self._post(url=url,
... | The add resource operation allows the administrator to add a file
resource, for example, the organization's logo or custom banner.
The resource can be used by any member of the organization. File
resources use storage space from your quota and are scanned for
viruses.
Inputs:
... |
def populateFromDirectory(self, vcfDirectory):
pattern = os.path.join(vcfDirectory, "*.vcf.gz")
dataFiles = []
indexFiles = []
for vcfFile in glob.glob(pattern):
dataFiles.append(vcfFile)
indexFiles.append(vcfFile + ".tbi")
self.populateFromFile(dataFiles,... | Populates this VariantSet by examing all the VCF files in the
specified directory. This is mainly used for as a convenience
for testing purposes. |
def end_profiling(profiler, filename, sorting=None):
profiler.disable()
s = six.StringIO()
ps = pstats.Stats(profiler, stream=s).sort_stats(sorting)
ps.print_stats()
with open(filename, "w+") as f:
_logger.info("[calculate_ts_features] Finished profiling of time series feature extraction")
... | Helper function to stop the profiling process and write out the profiled
data into the given filename. Before this, sort the stats by the passed sorting.
:param profiler: An already started profiler (probably by start_profiling).
:type profiler: cProfile.Profile
:param filename: The name of the output ... |
def init_types_collection(filter_filename=default_filter_filename):
global _filter_filename
_filter_filename = filter_filename
sys.setprofile(_trace_dispatch)
threading.setprofile(_trace_dispatch) | Setup profiler hooks to enable type collection.
Call this one time from the main thread.
The optional argument is a filter that maps a filename (from
code.co_filename) to either a normalized filename or None.
For the default filter see default_filter_filename(). |
def _cleanup_api(self):
resources = __salt__['boto_apigateway.describe_api_resources'](restApiId=self.restApiId,
**self._common_aws_args)
if resources.get('resources'):
res = resources.get('resources')[1:]
res... | Helper method to clean up resources and models if we detected a change in the swagger file
for a stage |
def add_contact_to_group(self, contact, group):
if isinstance(contact, basestring):
contact = self.get_contact(contact)
if isinstance(group, basestring):
group = self.get_group(group)
method, url = get_URL('contacts_add_to_group')
payload = {
'apikey':... | Add contact to group
:param contact: name or contact object
:param group: name or group object
:type contact: ``str``, ``unicode``, ``dict``
:type group: ``str``, ``unicode``, ``dict``
:rtype: ``bool`` |
def is_expired(self, time_offset_seconds=0):
now = datetime.datetime.utcnow()
if time_offset_seconds:
now = now + datetime.timedelta(seconds=time_offset_seconds)
ts = boto.utils.parse_ts(self.expiration)
delta = ts - now
return delta.total_seconds() <= 0 | Checks to see if the Session Token is expired or not. By default
it will check to see if the Session Token is expired as of the
moment the method is called. However, you can supply an
optional parameter which is the number of seconds of offset
into the future for the check. For exampl... |
def update_not_existing_kwargs(to_update, update_from):
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update | This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts. |
def from_interbase_coordinates(contig, start, end=None):
typechecks.require_string(contig)
typechecks.require_integer(start)
if end is None:
end = start + 1
typechecks.require_integer(end)
contig = pyensembl.locus.normalize_chromosome(contig)
return Locus(cont... | Given coordinates in 0-based interbase coordinates, return a Locus
instance. |
def _update_port_locations(self, initial_coordinates):
particles = list(self.particles())
for port in self.all_ports():
if port.anchor:
idx = particles.index(port.anchor)
shift = particles[idx].pos - initial_coordinates[idx]
port.translate(shif... | Adjust port locations after particles have moved
Compares the locations of Particles between 'self' and an array of
reference coordinates. Shifts Ports in accordance with how far anchors
have been moved. This conserves the location of Ports with respect to
their anchor Particles, but ... |
def log(self, level, *args, **kwargs):
return self._log_kw(level, args, kwargs) | Delegate a log call to the underlying logger. |
def list_icmp_block(zone, permanent=True):
cmd = '--zone={0} --list-icmp-blocks'.format(zone)
if permanent:
cmd += ' --permanent'
return __firewall_cmd(cmd).split() | List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone |
def position(parser, token):
bits = token.split_contents()
nodelist = parser.parse(('end' + bits[0],))
parser.delete_first_token()
return _parse_position_tag(bits, nodelist) | Render a given position for category.
If some position is not defined for first category, position from its parent
category is used unless nofallback is specified.
Syntax::
{% position POSITION_NAME for CATEGORY [nofallback] %}{% endposition %}
{% position POSITION_NAME for CATEGORY using ... |
def timestamps(self):
timestamps = set()
for series in self.groups.itervalues():
timestamps |= set(series.timestamps)
return sorted(list(timestamps)) | Get all timestamps from all series in the group. |
def initialize_switch_endpoints(self):
self._switches = {}
self._port_group_info = {}
self._validate_config()
for s in cfg.CONF.ml2_arista.switch_info:
switch_ip, switch_user, switch_pass = s.split(":")
if switch_pass == "''":
switch_pass = ''
... | Initialize endpoints for switch communication |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.