code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def _send_cli_conf_string(self, nexus_host, cli_str):
starttime = time.time()
path_snip = snipp.PATH_USER_CMDS
body_snip = snipp.BODY_USER_CONF_CMDS % ('1', cli_str)
LOG.debug("NexusDriver CLI config for host %s: path: %s body: %s",
nexus_host, path_snip, body_snip)
self.nxapi_client.rest_post(path_snip, nexus_host, body_snip)
self.capture_and_print_timeshot(
starttime, "send_cliconf",
switch=nexus_host) | Sends CLI Config commands to Nexus switch using NXAPI. |
def add(self, component: Union[Component, Sequence[Component]]) -> None:
try:
self[Span(*self._available_cell())] = component
except NoUnusedCellsError:
span = list(self._spans.keys())[-1]
self._spans[span] += component | Add a widget to the grid in the next available cell.
Searches over columns then rows for available cells.
Parameters
----------
components : bowtie._Component
A Bowtie widget instance. |
def appdata_roaming_dir():
install = arcpy.GetInstallInfo('desktop')
app_data = arcpy.GetSystemEnvironment("APPDATA")
product_dir = ''.join((install['ProductName'], major_version()))
return os.path.join(app_data, 'ESRI', product_dir) | Returns the roaming AppData directory for the installed ArcGIS Desktop. |
def backtrack(self, source):
key = self.get_tok(source)
s = self[key]()
meta = s.metadata['original_source']
cls = meta['cls']
args = meta['args']
kwargs = meta['kwargs']
cls = import_name(cls)
sout = cls(*args, **kwargs)
sout.metadata = s.metadata['original_metadata']
sout.name = s.metadata['original_name']
return sout | Given a unique key in the store, recreate original source |
def _get_price_id_for_upgrade(self, package_items, option, value, public=True):
warnings.warn("use _get_price_id_for_upgrade_option() instead",
DeprecationWarning)
option_category = {
'memory': 'ram',
'cpus': 'guest_core',
'nic_speed': 'port_speed'
}
category_code = option_category[option]
for item in package_items:
is_private = (item.get('units') == 'PRIVATE_CORE')
for price in item['prices']:
if 'locationGroupId' in price and price['locationGroupId']:
continue
if 'categories' not in price:
continue
categories = price['categories']
for category in categories:
if not (category['categoryCode'] == category_code
and str(item['capacity']) == str(value)):
continue
if option == 'cpus':
if public and not is_private:
return price['id']
elif not public and is_private:
return price['id']
elif option == 'nic_speed':
if 'Public' in item['description']:
return price['id']
else:
return price['id'] | Find the price id for the option and value to upgrade.
Deprecated in favor of _get_price_id_for_upgrade_option()
:param list package_items: Contains all the items related to an VS
:param string option: Describes type of parameter to be upgraded
:param int value: The value of the parameter to be upgraded
:param bool public: CPU will be in Private/Public Node. |
def catch_mon_errors(conn, logger, hostname, cfg, args):
monmap = mon_status_check(conn, logger, hostname, args).get('monmap', {})
mon_initial_members = get_mon_initial_members(args, _cfg=cfg)
public_addr = cfg.safe_get('global', 'public_addr')
public_network = cfg.safe_get('global', 'public_network')
mon_in_monmap = [
mon.get('name')
for mon in monmap.get('mons', [{}])
if mon.get('name') == hostname
]
if mon_initial_members is None or not hostname in mon_initial_members:
logger.warning('%s is not defined in `mon initial members`', hostname)
if not mon_in_monmap:
logger.warning('monitor %s does not exist in monmap', hostname)
if not public_addr and not public_network:
logger.warning('neither `public_addr` nor `public_network` keys are defined for monitors')
logger.warning('monitors may not be able to form quorum') | Make sure we are able to catch up common mishaps with monitors
and use that state of a monitor to determine what is missing
and warn apropriately about it. |
def get_instance(self, payload):
return RoleInstance(self._version, payload, service_sid=self._solution['service_sid'], ) | Build an instance of RoleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.chat.v2.service.role.RoleInstance
:rtype: twilio.rest.chat.v2.service.role.RoleInstance |
def raw_escape(pattern, unix=False):
pattern = util.norm_pattern(pattern, False, True)
return escape(pattern, unix) | Apply raw character transform before applying escape. |
def _init_credentials(self, oauth_token, oauth_token_secret):
"Depending on the state passed in, get self._oauth up and running"
if oauth_token and oauth_token_secret:
if self.verified:
self._init_oauth(oauth_token, oauth_token_secret)
else:
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
else:
oauth = OAuth1(
self.consumer_key,
client_secret=self.consumer_secret,
callback_uri=self.callback_uri,
rsa_key=self.rsa_key,
signature_method=self._signature_method
)
url = self.base_url + REQUEST_TOKEN_URL
headers = {'User-Agent': self.user_agent}
response = requests.post(url=url, headers=headers, auth=oauth)
self._process_oauth_response(response) | Depending on the state passed in, get self._oauth up and running |
def _find_keys(self, identity='image'):
prefix = add_prefix('', identity)
raw_keys = self._find_keys_raw(prefix) or []
for raw_key in raw_keys:
yield del_prefix(raw_key) | Finds and returns all keys for identity, |
def read_table(filename, sheetname, index_col=None):
if sheetname is None or \
(hasattr(sheetname, '__iter__') \
and not isinstance(sheetname, six.string_types)):
raise TypeError("sheetname should specify a single sheet")
if packaging.version.parse(pd.__version__) \
< packaging.version.parse('0.21'):
table = pd.read_excel(filename,
sheetname=sheetname,
index_col=index_col)
else:
table = pd.read_excel(filename,
sheet_name=sheetname,
index_col=index_col)
if index_col is not None:
table = table[pd.notnull(table.index)]
if table.index.has_duplicates:
raise ValueError("sheet {} on file {} contains duplicated values "
"for column {}".format(sheetname, filename, index_col))
return table | Return the contents of an Excel table as a pandas DataFrame.
Parameters
----------
filename : str
Name of the Excel file to read.
sheetname : str or int
Name or index of the sheet inside the Excel file to read.
index_col : str, optional
Column name or index to be used as row labels of the DataFrame. If
None, default index will be used.
Returns
-------
table : DataFrame
A DataFrame containing the data in the specified Excel table. If
`index_col` is not None, rows in which their `index_col` field
is empty will not be present in `table`.
Raises
------
ValueError
If `index_col` is specified and two rows contain the same
`index_col` field. |
def map(self, data, layout):
msg = "{} should implement this method."
raise NotImplementedError(
msg.format(self.__class.__name__)) | Assign a data points to panels
Parameters
----------
data : DataFrame
Data for a layer
layout : DataFrame
As returned by self.compute_layout
Returns
-------
data : DataFrame
Data with all points mapped to the panels
on which they will be plotted. |
def collect_cases(data_dir):
cases = {}
for root, dirs, files in os.walk(data_dir):
if not dirs:
split_case = os.path.relpath(root, data_dir).split(os.path.sep)
if split_case[0] not in cases:
cases[split_case[0]] = []
cases[split_case[0]].append("-".join(split_case[1:]))
return cases | Find all cases and subcases of a particular run type |
def embedManifestDllCheck(target, source, env):
if env.get('WINDOWS_EMBED_MANIFEST', 0):
manifestSrc = target[0].get_abspath() + '.manifest'
if os.path.exists(manifestSrc):
ret = (embedManifestDllAction) ([target[0]],None,env)
if ret:
raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0]))
return ret
else:
print('(embed: no %s.manifest found; not embedding.)'%str(target[0]))
return 0 | Function run by embedManifestDllCheckAction to check for existence of manifest
and other conditions, and embed the manifest by calling embedManifestDllAction if so. |
def args(parsed_args, name=None):
strings = parsed_args.arg_strings(name)
files = [s for s in strings if os.path.isfile(s)]
if files:
streams = [open(f) for f in files]
else:
streams = []
if getattr(parsed_args, 'paste', not files):
streams.append(clipboard_stream())
if getattr(parsed_args, 'stdin', False):
streams.append(sys.stdin)
elif not streams:
streams = [sys.stdin]
return streams | Interpret parsed args to streams |
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context) | Create a new server instance that is either threaded, or forks
or just processes one request after another. |
def _feature_most_population(self, results):
try:
populations = [i['population'] for i in results['hits']['hits']]
most_pop = results['hits']['hits'][np.array(populations).astype("int").argmax()]
return most_pop['country_code3']
except Exception as e:
return "" | Find the placename with the largest population and return its country.
More population is a rough measure of importance.
Paramaters
----------
results: dict
output of `query_geonames`
Returns
-------
most_pop: str
ISO code of country of place with largest population,
or empty string if none |
def get_previous_tag(cls, el):
sibling = el.previous_sibling
while not cls.is_tag(sibling) and sibling is not None:
sibling = sibling.previous_sibling
return sibling | Get previous sibling tag. |
def events(self, event_id):
json = self.request('/events/%s' % event_id, method='GET')
status = json.get('status')
if status == 'OK':
event_json = json.get('event')
event = Event.from_json(event_json)
return event
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) | This method is primarily used to report on the progress of an event
by providing the percentage of completion.
Required parameters
event_id:
Numeric, this is the id of the event you would like more
information about |
def env(mounts):
f_mounts = [m.strip("/") for m in mounts]
root = local.path("/")
ld_libs = [root / m / "lib" for m in f_mounts]
ld_libs.extend([root / m / "lib64" for m in f_mounts])
paths = [root / m / "bin" for m in f_mounts]
paths.extend([root / m / "sbin" for m in f_mounts])
paths.extend([root / m for m in f_mounts])
return paths, ld_libs | Compute the environment of the change root for the user.
Args:
mounts: The mountpoints of the current user.
Return:
paths
ld_libs |
def dinfContributingArea(self,
contributing_area_grid,
flow_dir_grid,
outlet_shapefile=None,
weight_grid=None,
edge_contamination=False,
):
log("PROCESS: DinfContributingArea")
cmd = [os.path.join(self.taudem_exe_path, 'areadinf'),
'-ang', flow_dir_grid,
'-sca', contributing_area_grid,
]
if outlet_shapefile:
cmd += ['-o', outlet_shapefile]
if weight_grid:
cmd += ['-wg', weight_grid]
if not edge_contamination:
cmd = cmd + ['-nc']
self._run_mpi_cmd(cmd)
self._add_prj_file(flow_dir_grid,
contributing_area_grid) | Calculates contributing area with Dinf method. |
def update(self, other=(), **kwargs):
_kwargs = dict(kwargs)
_kwargs.update(other)
for key, value in _kwargs.items():
self[key] = value | Just like `dict.update` |
def song(self):
if self._song is None:
self._song = Song(self._song_data)
return self._song | the song associated with the project |
def get_monomers(self, ligands=True, pseudo_group=False):
base_filters = dict(ligands=ligands, pseudo_group=pseudo_group)
restricted_mol_types = [x[0] for x in base_filters.items() if not x[1]]
in_groups = [x for x in self.filter_mol_types(restricted_mol_types)]
monomers = itertools.chain(
*(p.get_monomers(ligands=ligands) for p in in_groups))
return monomers | Retrieves all the `Monomers` from the `Assembly` object.
Parameters
----------
ligands : bool, optional
If `true`, will include ligand `Monomers`.
pseudo_group : bool, optional
If `True`, will include pseudo atoms. |
def is_text_type(text):
if isinstance(text, six.text_type) or isinstance(text, six.string_types):
return True
return False | Check if given parameter is a string or not
Parameters
----------
text : *
Parameter to be checked for text type
Returns
-------
bool
Whether parameter is a string or not |
def initialize(self, params, repetition):
super(TinyCIFARExperiment, self).initialize(params, repetition)
self.network_type = params.get("network_type", "sparse") | Initialize experiment parameters and default values from configuration file.
Called at the beginning of each experiment and each repetition. |
def safe_request(
url,
method=None,
params=None,
data=None,
json=None,
headers=None,
allow_redirects=False,
timeout=30,
verify_ssl=True,
):
session = requests.Session()
kwargs = {}
if json:
kwargs['json'] = json
if not headers:
headers = {}
headers.setdefault('Content-Type', 'application/json')
if data:
kwargs['data'] = data
if params:
kwargs['params'] = params
if headers:
kwargs['headers'] = headers
if method is None:
method = 'POST' if (data or json) else 'GET'
response = session.request(
method=method,
url=url,
allow_redirects=allow_redirects,
timeout=timeout,
verify=verify_ssl,
**kwargs
)
return response | A slightly safer version of `request`. |
def load(self, path):
config = _Config.load(os.path.join(path, 'config.pkl'))
config.save_dir = path
self._vocab = vocab = ParserVocabulary.load(config.save_vocab_path)
with mx.Context(mxnet_prefer_gpu()):
self._parser = BiaffineParser(vocab, config.word_dims, config.tag_dims, config.dropout_emb,
config.lstm_layers,
config.lstm_hiddens, config.dropout_lstm_input, config.dropout_lstm_hidden,
config.mlp_arc_size,
config.mlp_rel_size, config.dropout_mlp, config.debug)
self._parser.load(config.save_model_path)
return self | Load from disk
Parameters
----------
path : str
path to the directory which typically contains a config.pkl file and a model.bin file
Returns
-------
DepParser
parser itself |
def stop_listener_thread(self):
self.should_listen = False
if self.sync_thread:
self.sync_thread.kill()
self.sync_thread.get()
if self._handle_thread is not None:
self._handle_thread.get()
self.sync_thread = None
self._handle_thread = None | Kills sync_thread greenlet before joining it |
def _dumpNdarrayToFile(filelike, ndarray):
bytedata = ndarray.tobytes('C')
start = filelike.tell()
end = start + len(bytedata)
metadata = {'start': start, 'end': end, 'size': ndarray.size,
'dtype': ndarray.dtype.name, 'shape': ndarray.shape
}
filelike.write(bytedata)
return metadata | Serializes an N-dimensional ``numpy.array`` to bytes, writes the bytes to
the filelike object and returns a dictionary with metadata, necessary to
restore the ``numpy.array`` from the file.
:param filelike: can be a file or a file-like object that provides the
methods ``.write()`` and ``.tell()``.
:param ndarray: a N-dimensional ``numpy.array``
:returns: a metadata dictionary ::
{'start': start position in the file, 'end': end position in the file,
'size': size of the array, 'dtype': numpy data type of the array,
'shape': description of the array shape
} |
def clone_with_git(repo_uri, dest_path):
log.info('Cloning git repo %s to %s', repo_uri, dest_path)
git.Repo.clone_from(repo_uri, dest_path, depth=1) | Create a clone by cloning a git repository.
Args:
repo_uri: The URI of the git repository to clone.
dest_path: The location to clone to. |
def delete_network_postcommit(self, context):
segments = context.network_segments
network_name = context.current['name']
for segment in segments:
if not self.check_segment(segment):
return
vlan_id = segment.get(api.SEGMENTATION_ID)
if not vlan_id:
return
port_profile = self.make_profile_name(vlan_id)
trunk_vlans = (
CONF.sriov_multivlan_trunk.network_vlans.get(network_name, []))
self.driver.delete_all_config_for_vlan(vlan_id, port_profile,
trunk_vlans) | Delete all configuration added to UCS Manager for the vlan_id. |
def make_full_qualified_url(self, path: str) -> str:
return self.application_uri.rstrip('/') + '/' + path.lstrip('/') | append application url to path |
def aws(product, tile, folder, redownload, info, entire, bands, l2a):
band_list = None if bands is None else bands.split(',')
data_source = DataSource.SENTINEL2_L2A if l2a else DataSource.SENTINEL2_L1C
if info:
if product is None:
click.echo(get_safe_format(tile=tile, entire_product=entire, data_source=data_source))
else:
click.echo(get_safe_format(product_id=product))
else:
if product is None:
download_safe_format(tile=tile, folder=folder, redownload=redownload, entire_product=entire,
bands=band_list, data_source=data_source)
else:
download_safe_format(product_id=product, folder=folder, redownload=redownload, bands=band_list) | Download Sentinel-2 data from Sentinel-2 on AWS to ESA SAFE format. Download uses multiple threads.
\b
Examples with Sentinel-2 L1C data:
sentinelhub.aws --product S2A_MSIL1C_20170414T003551_N0204_R016_T54HVH_20170414T003551
sentinelhub.aws --product S2A_MSIL1C_20170414T003551_N0204_R016_T54HVH_20170414T003551 -i
sentinelhub.aws --product S2A_MSIL1C_20170414T003551_N0204_R016_T54HVH_20170414T003551 -f /home/ESA_Products
sentinelhub.aws --product S2A_MSIL1C_20170414T003551_N0204_R016_T54HVH_20170414T003551 --bands B08,B11
sentinelhub.aws --tile T54HVH 2017-04-14
sentinelhub.aws --tile T54HVH 2017-04-14 -e
\b
Examples with Sentinel-2 L2A data:
sentinelhub.aws --product S2A_MSIL2A_20180402T151801_N0207_R068_T33XWJ_20180402T202222
sentinelhub.aws --tile T33XWJ 2018-04-02 --l2a |
def _pot_month_counts(self, pot_dataset):
periods = pot_dataset.continuous_periods()
result = [set() for x in range(12)]
for period in periods:
year = period.start_date.year
month = period.start_date.month
while True:
result[month - 1].add(year)
if year == period.end_date.year and month == period.end_date.month:
break
month += 1
if month == 13:
month = 1
year += 1
return result | Return a list of 12 sets. Each sets contains the years included in the POT record period.
:param pot_dataset: POT dataset (records and meta data)
:type pot_dataset: :class:`floodestimation.entities.PotDataset` |
def check_attr_dimension(attr_id, **kwargs):
attr_i = _get_attr(attr_id)
datasets = db.DBSession.query(Dataset).filter(Dataset.id == ResourceScenario.dataset_id,
ResourceScenario.resource_attr_id == ResourceAttr.id,
ResourceAttr.attr_id == attr_id).all()
bad_datasets = []
for d in datasets:
if attr_i.dimension_id is None and d.unit is not None or \
attr_i.dimension_id is not None and d.unit is None or \
units.get_dimension_by_unit_id(d.unit_id) != attr_i.dimension_id:
bad_datasets.append(d.id)
if len(bad_datasets) > 0:
raise HydraError("Datasets %s have a different dimension_id to attribute %s"%(bad_datasets, attr_id))
return 'OK' | Check that the dimension of the resource attribute data is consistent
with the definition of the attribute.
If the attribute says 'volume', make sure every dataset connected
with this attribute via a resource attribute also has a dimension
of 'volume'. |
def get_columns(self, index, columns=None, as_dict=False):
i = sorted_index(self._index, index) if self._sort else self._index.index(index)
return self.get_location(i, columns, as_dict) | For a single index and list of column names return a DataFrame of the values in that index as either a dict
or a DataFrame
:param index: single index value
:param columns: list of column names
:param as_dict: if True then return the result as a dictionary
:return: DataFrame or dictionary |
def validate(self):
if self.status_code == 200 and self.data.get("ok", False):
self._logger.debug("Received the following response: %s", self.data)
return self
msg = "The request to the Slack API failed."
raise e.SlackApiError(message=msg, response=self.data) | Check if the response from Slack was successful.
Returns:
(SlackResponse)
This method returns it's own object. e.g. 'self'
Raises:
SlackApiError: The request to the Slack API failed. |
def _validate_format(req):
for key in SLOJSONRPC._min_keys:
if not key in req:
logging.debug('JSONRPC: Fmt Error: Need key "%s"' % key)
raise SLOJSONRPCError(-32600)
for key in req.keys():
if not key in SLOJSONRPC._allowed_keys:
logging.debug('JSONRPC: Fmt Error: Not allowed key "%s"' % key)
raise SLOJSONRPCError(-32600)
if req['jsonrpc'] != '2.0':
logging.debug('JSONRPC: Fmt Error: "jsonrpc" needs to be "2.0"')
raise SLOJSONRPCError(-32600) | Validate jsonrpc compliance of a jsonrpc-dict.
req - the request as a jsonrpc-dict
raises SLOJSONRPCError on validation error |
def pre_run_hook(self, func, prefix=None):
cf = self.capture(func, prefix=prefix)
self.pre_run_hooks.append(cf)
return cf | Decorator to add a pre-run hook to this ingredient.
Pre-run hooks are captured functions that are run, just before the
main function is executed. |
def to_execution_plan(self,
domain,
default_screen,
start_date,
end_date):
if self._domain is not GENERIC and self._domain is not domain:
raise AssertionError(
"Attempted to compile Pipeline with domain {} to execution "
"plan with different domain {}.".format(self._domain, domain)
)
return ExecutionPlan(
domain=domain,
terms=self._prepare_graph_terms(default_screen),
start_date=start_date,
end_date=end_date,
) | Compile into an ExecutionPlan.
Parameters
----------
domain : zipline.pipeline.domain.Domain
Domain on which the pipeline will be executed.
default_screen : zipline.pipeline.term.Term
Term to use as a screen if self.screen is None.
all_dates : pd.DatetimeIndex
A calendar of dates to use to calculate starts and ends for each
term.
start_date : pd.Timestamp
The first date of requested output.
end_date : pd.Timestamp
The last date of requested output.
Returns
-------
graph : zipline.pipeline.graph.ExecutionPlan
Graph encoding term dependencies, including metadata about extra
row requirements. |
def update_leads_list(self, leads_list_id, name, team_id=None):
params = self.base_params
payload = {'name': name}
if team_id:
payload['team_id'] = team_id
endpoint = self.base_endpoint.format('leads_lists/' + str(leads_list_id))
return self._query_hunter(endpoint, params, 'put', payload) | Update a leads list.
:param name: Name of the list to update. Must be defined.
:param team_id: The id of the list to share this list with.
:return: 204 Response. |
def list_images(self):
r = self.get(self.registry_url + '/v2/_catalog', auth=self.auth)
return r.json()['repositories'] | List images stored in the registry.
Returns:
list[str]: List of image names. |
def upgradeBatch(self, n):
store = self.store
def _doBatch(itemType):
upgradedAnything = False
for theItem in store.query(itemType, limit=n):
upgradedAnything = True
try:
self.upgradeItem(theItem)
except:
f = Failure()
raise ItemUpgradeError(
f, theItem.storeID, itemType,
_typeNameToMostRecentClass[itemType.typeName])
return upgradedAnything
if self.upgradesPending:
didAny = False
while self._oldTypesRemaining:
t0 = self._oldTypesRemaining[0]
upgradedAnything = store.transact(_doBatch, t0)
if not upgradedAnything:
self._oldTypesRemaining.pop(0)
if didAny:
msg("%s finished upgrading %s" % (store.dbdir.path, qual(t0)))
continue
elif not didAny:
didAny = True
msg("%s beginning upgrade..." % (store.dbdir.path,))
yield None
if didAny:
msg("%s completely upgraded." % (store.dbdir.path,)) | Upgrade the entire store in batches, yielding after each batch.
@param n: Number of upgrades to perform per transaction
@type n: C{int}
@raise axiom.errors.ItemUpgradeError: if an item upgrade failed
@return: A generator that yields after each batch upgrade. This needs
to be consumed for upgrading to actually take place. |
def _warmup(self, num_updates):
assert self.base_lr is not None
if not self.warmup:
return self.base_lr
fraction = (num_updates + 1) * self.base_lr / (self.warmup + 1)
if num_updates > self.last_warmup_log and num_updates % self.log_warmup_every_t == 0:
self.last_warmup_log = num_updates
logger.info("Learning rate warmup: %3.0f%%", fraction / self.base_lr * 100.0)
return fraction | Returns linearly increasing fraction of base_lr. |
def list_subnetpools(self, retrieve_all=True, **_params):
return self.list('subnetpools', self.subnetpools_path, retrieve_all,
**_params) | Fetches a list of all subnetpools for a project. |
def require_foreign(namespace, symbol=None):
try:
if symbol is None:
get_foreign_module(namespace)
else:
get_foreign_struct(namespace, symbol)
except ForeignError as e:
raise ImportError(e) | Raises ImportError if the specified foreign module isn't supported or
the needed dependencies aren't installed.
e.g.: check_foreign('cairo', 'Context') |
def downloadSessionImages(server, filename=None, height=150, width=150,
opacity=100, saturation=100):
info = {}
for media in server.sessions():
url = None
for part in media.iterParts():
if media.thumb:
url = media.thumb
if part.indexes:
url = '/library/parts/%s/indexes/%s/%s' % (part.id, part.indexes.lower(), media.viewOffset)
if url:
if filename is None:
prettyname = media._prettyfilename()
filename = 'session_transcode_%s_%s_%s' % (media.usernames[0], prettyname, int(time.time()))
url = server.transcodeImage(url, height, width, opacity, saturation)
filepath = download(url, filename=filename)
info['username'] = {'filepath': filepath, 'url': url}
return info | Helper to download a bif image or thumb.url from plex.server.sessions.
Parameters:
filename (str): default to None,
height (int): Height of the image.
width (int): width of the image.
opacity (int): Opacity of the resulting image (possibly deprecated).
saturation (int): Saturating of the resulting image.
Returns:
{'hellowlol': {'filepath': '<filepath>', 'url': 'http://<url>'},
{'<username>': {filepath, url}}, ... |
async def set_as_default_gateway(self):
interface = self._data['interface']
await interface._handler.set_default_gateway(
system_id=interface.node.system_id, id=interface.id,
link_id=self.id) | Set this link as the default gateway for the node. |
def _flush_ndb_puts(self, items, options):
assert ndb is not None
ndb.put_multi(items, config=self._create_config(options)) | Flush all NDB puts to datastore. |
def prefetch_translations(instances, **kwargs):
from .mixins import ModelMixin
if not isinstance(instances, collections.Iterable):
instances = [instances]
populate_missing = kwargs.get("populate_missing", True)
grouped_translations = utils.get_grouped_translations(instances, **kwargs)
if not grouped_translations and populate_missing:
for instance in instances:
instance.populate_missing_translations()
for instance in instances:
if (
issubclass(instance.__class__, ModelMixin)
and instance.pk in grouped_translations
):
for translation in grouped_translations[instance.pk]:
instance._linguist.set_cache(instance=instance, translation=translation)
if populate_missing:
instance.populate_missing_translations() | Prefetches translations for the given instances.
Can be useful for a list of instances. |
def retry(self):
logger.info('Job {0} retrying all failed tasks'.format(self.name))
self.initialize_snapshot()
failed_task_names = []
for task_name, log in self.run_log['tasks'].items():
if log.get('success', True) == False:
failed_task_names.append(task_name)
if len(failed_task_names) == 0:
raise DagobahError('no failed tasks to retry')
self._set_status('running')
self.run_log['last_retry_time'] = datetime.utcnow()
logger.debug('Job {0} seeding run logs'.format(self.name))
for task_name in failed_task_names:
self._put_task_in_run_log(task_name)
self.tasks[task_name].start()
self._commit_run_log() | Restarts failed tasks of a job. |
def register_entry_points(self):
for entrypoint in entrypoints.get_group_all("papermill.engine"):
self.register(entrypoint.name, entrypoint.load()) | Register entrypoints for an engine
Load handlers provided by other packages |
def _promote_solitary_xvowel(self):
char_type = self.active_char_type
if char_type == VOWEL or char_type == CV or self.active_xvowel is None:
return
self._set_char(self.active_xvowel, XVOWEL)
self.active_xvowel = None
self.active_xvowel_info = None | "Promotes" the current xvowel to a regular vowel, in case
it is not otherwise connected to a character.
Used to print small vowels that would otherwise get lost;
normally small vowels always form a pair, but in case one is
by itself it should basically act like a regular vowel. |
def rule(ctxt, name):
if name in ctxt.rule_cache:
ctxt.stack.append(ctxt.rule_cache[name])
return
try:
rule = ctxt.policy[name]
except KeyError:
log = logging.getLogger('policies')
log.warn("Request to evaluate non-existant rule %r "
"while evaluating rule %r" % (name, ctxt.name))
ctxt.stack.append(False)
ctxt.rule_cache[name] = False
return
with ctxt.push_rule(name):
rule.instructions(ctxt, True)
ctxt.rule_cache[name] = ctxt.stack[-1] | Allows evaluation of another rule while evaluating a rule.
:param ctxt: The evaluation context for the rule.
:param name: The name of the rule to evaluate. |
def catalog(self, table='', column=''):
lookup_table = self.lookup_table
if lookup_table is not None:
if table:
if column:
column = column.upper()
return lookup_table[table][column]
return lookup_table[table]
return self.lookup_methods
return None | Lookup the values available for querying. |
def _check_pattern_list(patterns, key, default=None):
if not patterns:
return default
if isinstance(patterns, basestring):
return [patterns]
if isinstance(patterns, list):
if all(isinstance(p, basestring) for p in patterns):
return patterns
raise ValueError("Invalid file patterns in key '{}': must be a string or "
'list of strings'.format(key)) | Validates file search patterns from user configuration.
Acceptable input is a string (which will be converted to a singleton list),
a list of strings, or anything falsy (such as None or an empty dictionary).
Empty or unset input will be converted to a default.
Args:
patterns: input from user configuration (YAML).
key (str): name of the configuration key the input came from,
used for error display purposes.
Keyword Args:
default: value to return in case the input is empty or unset.
Returns:
list[str]: validated list of patterns
Raises:
ValueError: if the input is unacceptable. |
def _sanitize_resources(cls, resources):
try:
for resource in cls._loop_raw(resources):
cls._sanitize_resource(resource)
except (KeyError, TypeError):
_LOGGER.debug("no shade data available")
return None | Loops over incoming data looking for base64 encoded data and
converts them to a readable format. |
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True):
wcs_data = np.zeros(wcs.npix)
self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize)
return wcs_data | Creates and fills a wcs map from the hpx data using the pre-calculated
mappings
hpx_data : the input HEALPix data
wcs : the WCS object
normalize : True -> perserve integral by splitting HEALPix values between bins |
def rouge_1(hypotheses, references):
rouge_1 = [
rouge_n([hyp], [ref], 1) for hyp, ref in zip(hypotheses, references)
]
rouge_1_f, _, _ = map(np.mean, zip(*rouge_1))
return rouge_1_f | Calculate ROUGE-1 F1, precision, recall scores |
def aggregate_data(self, start, end, aggregation, keys=[], tags=[],
attrs={}, rollup=None, period=None, interpolationf=None,
interpolation_period=None, tz=None, limit=1000):
url = 'segment'
vstart = check_time_param(start)
vend = check_time_param(end)
params = {
'start': vstart,
'end': vend,
'key': keys,
'tag': tags,
'attr': attrs,
'aggregation.fold': aggregation,
'rollup.fold': rollup,
'rollup.period': period,
'interpolation.function': interpolationf,
'interpolation.period': interpolation_period,
'tz': tz,
'limit': limit
}
url_args = endpoint.make_url_args(params)
url = '?'.join([url, url_args])
resp = self.session.get(url)
return resp | Read data from multiple series according to a filter and apply a
function across all the returned series to put the datapoints together
into one aggregrate series.
See the :meth:`list_series` method for a description of how the filter
criteria are applied, and the :meth:`read_data` method for how to
work with the start, end, and tz parameters.
Valid aggregation functions are the same as valid rollup functions.
:param string aggregation: the aggregation to perform
:param keys: (optional) filter by one or more series keys
:type keys: list or string
:param tags: (optional) filter by one or more tags
:type tags: list or string
:param dict attrs: (optional) filter by one or more key-value
attributes
:param start: the start time for the data points
:type start: string or Datetime
:param end: the end time for the data points
:type end: string or Datetime
:param string rollup: (optional) the name of a rollup function to use
:param string period: (optional) downsampling rate for the data
:param string interpolationf: (optional) an interpolation function
to run over the series
:param string interpolation_period: (optional) the period to
interpolate data into
:param string tz: (optional) the timezone to place the data into
:rtype: :class:`tempodb.protocol.cursor.DataPointCursor` with an
iterator over :class:`tempodb.protocol.objects.DataPoint`
objects |
def jd_to_datetime(jd):
year, month, day = jd_to_date(jd)
frac_days,day = math.modf(day)
day = int(day)
hour,min,sec,micro = days_to_hmsm(frac_days)
return datetime(year,month,day,hour,min,sec,micro) | Convert a Julian Day to an `jdutil.datetime` object.
Parameters
----------
jd : float
Julian day.
Returns
-------
dt : `jdutil.datetime` object
`jdutil.datetime` equivalent of Julian day.
Examples
--------
>>> jd_to_datetime(2446113.75)
datetime(1985, 2, 17, 6, 0) |
def generate_new_bracket(self):
logger.debug(
'start to create a new SuccessiveHalving iteration, self.curr_s=%d', self.curr_s)
if self.curr_s < 0:
logger.info("s < 0, Finish this round of Hyperband in BOHB. Generate new round")
self.curr_s = self.s_max
self.brackets[self.curr_s] = Bracket(s=self.curr_s, s_max=self.s_max, eta=self.eta,
max_budget=self.max_budget, optimize_mode=self.optimize_mode)
next_n, next_r = self.brackets[self.curr_s].get_n_r()
logger.debug(
'new SuccessiveHalving iteration, next_n=%d, next_r=%d', next_n, next_r)
generated_hyper_configs = self.brackets[self.curr_s].get_hyperparameter_configurations(
next_n, next_r, self.cg)
self.generated_hyper_configs = generated_hyper_configs.copy() | generate a new bracket |
def started(name):
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
volinfo = __salt__['glusterfs.info']()
if name not in volinfo:
ret['result'] = False
ret['comment'] = 'Volume {0} does not exist'.format(name)
return ret
if int(volinfo[name]['status']) == 1:
ret['comment'] = 'Volume {0} is already started'.format(name)
ret['result'] = True
return ret
elif __opts__['test']:
ret['comment'] = 'Volume {0} will be started'.format(name)
ret['result'] = None
return ret
vol_started = __salt__['glusterfs.start_volume'](name)
if vol_started:
ret['result'] = True
ret['comment'] = 'Volume {0} is started'.format(name)
ret['change'] = {'new': 'started', 'old': 'stopped'}
else:
ret['result'] = False
ret['comment'] = 'Failed to start volume {0}'.format(name)
return ret | Check if volume has been started
name
name of the volume
.. code-block:: yaml
mycluster:
glusterfs.started: [] |
def get_project(self, project_id):
try:
result = self._request('/getproject/',
{'projectid': project_id})
return TildaProject(**result)
except NetworkError:
return [] | Get project info |
def sumlogs(x, axis=None, out=None):
maxx = x.max(axis=axis, keepdims=True)
xnorm = x - maxx
np.exp(xnorm, out=xnorm)
out = np.sum(xnorm, axis=axis, out=out)
if isinstance(out, np.ndarray):
np.log(out, out=out)
else:
out = np.log(out)
out += np.squeeze(maxx)
return out | Sum of vector where numbers are represented by their logarithms.
Calculates ``np.log(np.sum(np.exp(x), axis=axis))`` in such a fashion that
it works even when elements have large magnitude. |
def _keep_this(self, name):
for keep_name in self.keep:
if name == keep_name:
return True
return False | Return True if there are to be no modifications to name. |
def _find_output_dependencies(self, outputs):
dependencies = []
for address in outputs:
dependencies.extend(
self._predecessor_tree.find_write_predecessors(address))
return dependencies | Use the predecessor tree to find dependencies based on outputs.
Returns: A list of transaction ids. |
def enable(self):
if not CrashReporter.active:
CrashReporter.active = True
self._excepthook = sys.excepthook
sys.excepthook = self.exception_handler
self.logger.info('CrashReporter: Enabled')
if self.report_dir:
if os.path.exists(self.report_dir):
if self.get_offline_reports():
self.submit_offline_reports()
remaining_reports = len(self.get_offline_reports())
if remaining_reports and self.watcher_enabled:
self.start_watcher()
else:
os.makedirs(self.report_dir) | Enable the crash reporter. CrashReporter is defaulted to be enabled on creation. |
def get_state(self, key=None, drop_defaults=False):
if key is None:
keys = self.keys
elif isinstance(key, string_types):
keys = [key]
elif isinstance(key, collections.Iterable):
keys = key
else:
raise ValueError("key must be a string, an iterable of keys, or None")
state = {}
traits = self.traits()
for k in keys:
to_json = self.trait_metadata(k, 'to_json', self._trait_to_json)
value = to_json(getattr(self, k), self)
if not PY3 and isinstance(traits[k], Bytes) and isinstance(value, bytes):
value = memoryview(value)
if not drop_defaults or not self._compare(value, traits[k].default_value):
state[k] = value
return state | Gets the widget state, or a piece of it.
Parameters
----------
key : unicode or iterable (optional)
A single property's name or iterable of property names to get.
Returns
-------
state : dict of states
metadata : dict
metadata for each field: {key: metadata} |
def get_nt_filename (path):
unc, rest = os.path.splitunc(path)
head, tail = os.path.split(rest)
if not tail:
return path
for fname in os.listdir(unc+head):
if fname.lower() == tail.lower():
return os.path.join(get_nt_filename(unc+head), fname)
log.error(LOG_CHECK, "could not find %r in %r", tail, head)
return path | Return case sensitive filename for NT path. |
def concat(self, axis, other, **kwargs):
return self._append_list_of_managers(other, axis, **kwargs) | Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects. |
def get_request(self):
request, client_addr = super(BoundedThreadingMixIn, self).get_request()
overload = False
with self._thread_guard:
if self._threads is not None and len(self._threads) + 1 > MAX_RPC_THREADS:
overload = True
if overload:
res = self.overloaded(client_addr)
request.sendall(res)
sys.stderr.write('{} - - [{}] "Overloaded"\n'.format(client_addr[0], time_str(time.time())))
self.shutdown_request(request)
return None, None
return request, client_addr | Accept a request, up to the given number of allowed threads.
Defer to self.overloaded if there are already too many pending requests. |
def new_media_status(self, media_status):
casts = self._casts
group_members = self._mz.members
for member_uuid in group_members:
if member_uuid not in casts:
continue
for listener in list(casts[member_uuid]['listeners']):
listener.multizone_new_media_status(
self._group_uuid, media_status) | Handle reception of a new MediaStatus. |
def _generate_energy_edges_single(ene):
midene = np.sqrt((ene[1:] * ene[:-1]))
elo, ehi = np.zeros(len(ene)) * ene.unit, np.zeros(len(ene)) * ene.unit
elo[1:] = ene[1:] - midene
ehi[:-1] = midene - ene[:-1]
elo[0] = ene[0] * (1 - ene[0] / (ene[0] + ehi[0]))
ehi[-1] = elo[-1]
return u.Quantity([elo, ehi]) | Generate energy edges for single group |
def on_close(self):
if self.id in self.funcserver.websocks:
self.funcserver.websocks[self.id] = None
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.add_callback(lambda: self.funcserver.websocks.pop(self.id, None))
psession = self.funcserver.pysessions.get(self.pysession_id, None)
if psession:
psession['socks'].remove(self.id)
if not psession['socks']:
del self.funcserver.pysessions[self.pysession_id] | Called when client closes this connection. Cleanup
is done here. |
def _clone(self, *args, **kwargs):
clone = super(VersionedQuerySet, self)._clone(**kwargs)
clone.querytime = self.querytime
return clone | Overrides the QuerySet._clone method by adding the cloning of the
VersionedQuerySet's query_time parameter
:param kwargs: Same as the original QuerySet._clone params
:return: Just as QuerySet._clone, this method returns a clone of the
original object |
def _init_structures(self, data, subjects):
x = []
mu = []
rho2 = np.zeros(subjects)
trace_xtx = np.zeros(subjects)
for subject in range(subjects):
mu.append(np.mean(data[subject], 1))
rho2[subject] = 1
trace_xtx[subject] = np.sum(data[subject] ** 2)
x.append(data[subject] - mu[subject][:, np.newaxis])
return x, mu, rho2, trace_xtx | Initializes data structures for SRM and preprocess the data.
Parameters
----------
data : list of 2D arrays, element i has shape=[voxels_i, samples]
Each element in the list contains the fMRI data of one subject.
subjects : int
The total number of subjects in `data`.
Returns
-------
x : list of array, element i has shape=[voxels_i, samples]
Demeaned data for each subject.
mu : list of array, element i has shape=[voxels_i]
Voxel means over samples, per subject.
rho2 : array, shape=[subjects]
Noise variance :math:`\\rho^2` per subject.
trace_xtx : array, shape=[subjects]
The squared Frobenius norm of the demeaned data in `x`. |
def crossvalidate_model(cls, clusterer, data, num_folds, rnd):
return javabridge.static_call(
"Lweka/clusterers/ClusterEvaluation;", "crossValidateModel",
"(Lweka/clusterers/DensityBasedClusterer;Lweka/core/Instances;ILjava/util/Random;)D",
clusterer.jobject, data.jobject, num_folds, rnd.jobject) | Cross-validates the clusterer and returns the loglikelihood.
:param clusterer: the clusterer instance to evaluate
:type clusterer: Clusterer
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:return: the cross-validated loglikelihood
:rtype: float |
def _clone_block_and_wires(block_in):
block_in.sanity_check()
block_out = block_in.__class__()
temp_wv_map = {}
with set_working_block(block_out, no_sanity_check=True):
for wirevector in block_in.wirevector_subset():
new_wv = clone_wire(wirevector)
temp_wv_map[wirevector] = new_wv
return block_out, temp_wv_map | This is a generic function to copy the WireVectors for another round of
synthesis This does not split a WireVector with multiple wires.
:param block_in: The block to change
:param synth_name: a name to prepend to all new copies of a wire
:return: the resulting block and a WireVector map |
def route(
self,
uri,
methods=frozenset({"GET"}),
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
if strict_slashes is None:
strict_slashes = self.strict_slashes
def decorator(handler):
route = FutureRoute(
handler,
uri,
methods,
host,
strict_slashes,
stream,
version,
name,
)
self.routes.append(route)
return handler
return decorator | Create a blueprint route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:param methods: list of acceptable HTTP methods.
:param host: IP Address of FQDN for the sanic server to use.
:param strict_slashes: Enforce the API urls are requested with a
training */*
:param stream: If the route should provide a streaming support
:param version: Blueprint Version
:param name: Unique name to identify the Route
:return a decorated method that when invoked will return an object
of type :class:`FutureRoute` |
def parallel_scan(self, scan_id, target):
try:
ret = self.exec_scan(scan_id, target)
if ret == 0:
self.add_scan_host_detail(scan_id, name='host_status',
host=target, value='0')
elif ret == 1:
self.add_scan_host_detail(scan_id, name='host_status',
host=target, value='1')
elif ret == 2:
self.add_scan_host_detail(scan_id, name='host_status',
host=target, value='2')
else:
logger.debug('%s: No host status returned', target)
except Exception as e:
self.add_scan_error(scan_id, name='', host=target,
value='Host process failure (%s).' % e)
logger.exception('While scanning %s:', target)
else:
logger.info("%s: Host scan finished.", target) | Starts the scan with scan_id. |
def _get_access_type(self, mode):
access_type = None
for char in mode:
if char in 'bt':
if access_type is not None:
raise IOError('File mode "%s" contains contradictory flags' % mode)
access_type = char
elif char not in 'rbt':
raise NotImplementedError(
'%s objects are read-only; unsupported mode "%s"'%
(type(self), mode))
if access_type is None: access_type = 't'
return access_type | Make sure mode is appropriate; return 'b' for binary access and 't' for text |
def merge(filehandle_1, filehandle_2, output_filehandle):
line2 = filehandle_2.readline()
for line1 in filehandle_1.readlines():
while line2 != '' and line2 <= line1:
output_filehandle.write(line2)
line2 = filehandle_2.readline()
output_filehandle.write(line1)
while line2 != '':
output_filehandle.write(line2)
line2 = filehandle_2.readline() | Merges together two files maintaining sorted order. |
def network(n):
tpm(n.tpm)
connectivity_matrix(n.cm)
if n.cm.shape[0] != n.size:
raise ValueError("Connectivity matrix must be NxN, where N is the "
"number of nodes in the network.")
return True | Validate a |Network|.
Checks the TPM and connectivity matrix. |
def _connect_hive(self, hive):
try:
handle = self._remote_hives[hive]
except KeyError:
handle = win32.RegConnectRegistry(self._machine, hive)
self._remote_hives[hive] = handle
return handle | Connect to the specified hive of a remote Registry.
@note: The connection will be cached, to close all connections and
erase this cache call the L{close} method.
@type hive: int
@param hive: Hive to connect to.
@rtype: L{win32.RegistryKeyHandle}
@return: Open handle to the remote Registry hive. |
def reference(self, tkn: str):
return self.grammarelts[tkn] if tkn in self.grammarelts else UndefinedElement(tkn) | Return the element that tkn represents |
def _create_connection(self):
transport = SSL if self.ssl else TCP
return transport(self.server, self.port) | Creates a transport channel.
:return: transport channel instance
:rtype: :class:`fatbotslim.irc.tcp.TCP` or :class:`fatbotslim.irc.tcp.SSL` |
def complete_pool_name(arg):
search_string = '^'
if arg is not None:
search_string += arg
res = Pool.search({
'operator': 'regex_match',
'val1': 'name',
'val2': search_string
})
ret = []
for p in res['result']:
ret.append(p.name)
return ret | Returns list of matching pool names |
def validate(mcs, bases, attributes):
if bases[0] is object:
return None
mcs.check_model_cls(attributes)
mcs.check_include_exclude(attributes)
mcs.check_properties(attributes) | Check attributes. |
def array_bytes(array):
return np.product(array.shape)*np.dtype(array.dtype).itemsize | Estimates the memory of the supplied array in bytes |
def search(self, term):
r = requests.get(self.apiurl + "/users", params={"filter[name]": term}, headers=self.header)
if r.status_code != 200:
raise ServerError
jsd = r.json()
if jsd['meta']['count']:
return SearchWrapper(jsd['data'], jsd['links']['next'] if 'next' in jsd['links'] else None, self.header)
else:
return None | Search for a user by name.
:param str term: What to search for.
:return: The results as a SearchWrapper iterator or None if no results.
:rtype: SearchWrapper or None |
def _save_json(self, filename):
with open(filename, 'w') as file_handle:
json.dump(self._sensors, file_handle, cls=MySensorsJSONEncoder,
indent=4)
file_handle.flush()
os.fsync(file_handle.fileno()) | Save sensors to json file. |
def _link(self, base_dir: str, artifact_map: dict, conf: Config):
num_linked = 0
for dst, src in artifact_map.items():
abs_src = join(conf.project_root, src)
abs_dest = join(conf.project_root, base_dir, dst)
link_node(abs_src, abs_dest)
num_linked += 1
return num_linked | Link all artifacts in `artifact_map` under `base_dir` and return
the number of artifacts linked. |
def get_delimited_message_bytes(byte_stream, nr=4):
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes) | Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after. |
def to_dict(self):
as_dict = dict(self.payload or ())
as_dict['message'] = self.message
return as_dict | Return a dictionary representation of the exception. |
def EmitProto(cls):
result = "message %s {\n" % cls.__name__
for _, desc in sorted(iteritems(cls.type_infos_by_field_number)):
result += desc.Definition()
result += "}\n"
return result | Emits .proto file definitions. |
def tofile(self, f):
f.write(pack(self.FILE_FMT, self.scale, self.ratio,
self.initial_capacity, self.error_rate))
f.write(pack(b'<l', len(self.filters)))
if len(self.filters) > 0:
headerpos = f.tell()
headerfmt = b'<' + b'Q'*(len(self.filters))
f.write(b'.' * calcsize(headerfmt))
filter_sizes = []
for filter in self.filters:
begin = f.tell()
filter.tofile(f)
filter_sizes.append(f.tell() - begin)
f.seek(headerpos)
f.write(pack(headerfmt, *filter_sizes)) | Serialize this ScalableBloomFilter into the file-object
`f'. |
def mb_filter(fastq, cores):
filter_mb = partial(umi_filter)
p = multiprocessing.Pool(cores)
chunks = tz.partition_all(10000, read_fastq(fastq))
bigchunks = tz.partition_all(cores, chunks)
for bigchunk in bigchunks:
for chunk in p.map(filter_mb, list(bigchunk)):
for read in chunk:
sys.stdout.write(read) | Filters umis with non-ACGT bases
Expects formatted fastq files. |
def ID_colored_tube(color):
tubing_data_path = os.path.join(os.path.dirname(__file__), "data",
"3_stop_tubing.txt")
df = pd.read_csv(tubing_data_path, delimiter='\t')
idx = df["Color"] == color
return df[idx]['Diameter (mm)'].values[0] * u.mm | Look up the inner diameter of Ismatec 3-stop tubing given its color code.
:param color: Color of the 3-stop tubing
:type color: string
:returns: Inner diameter of the 3-stop tubing (mm)
:rtype: float
:Examples:
>>> from aguaclara.research.peristaltic_pump import ID_colored_tube
>>> from aguaclara.core.units import unit_registry as u
>>> ID_colored_tube("yellow-blue")
<Quantity(1.52, 'millimeter')>
>>> ID_colored_tube("orange-yellow")
<Quantity(0.51, 'millimeter')>
>>> ID_colored_tube("purple-white")
<Quantity(2.79, 'millimeter')> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.