code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def create_vcf(isomirs, matures, gtf, vcf_file=None):
global STDOUT
isomirs['sv'] = [_get_reference_position(m) for m in isomirs["isomir"]]
mirna = isomirs.groupby(['chrom']).sum()
sv = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).sum()
sv["diff"] = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).size().reset_index().loc[:,0]
pass_pos = _get_pct(sv, mirna)
if vcf_file:
with open(vcf_file, 'w') as out_handle:
STDOUT = out_handle
pass_pos = liftover(pass_pos, matures)
if gtf:
vcf_genome_file = vcf_file.replace(".vcf", "_genome.vcf")
with open(vcf_genome_file, 'w') as out_handle:
STDOUT = out_handle
pass_pos = liftover_to_genome(pass_pos, gtf) | Create vcf file of changes for all samples.
PASS will be ones with > 3 isomiRs supporting the position
and > 30% of reads, otherwise LOW |
def get_95_percentile_bleak(games_nr, n_back=500):
end_game = int(games_nr.latest_game_number)
start_game = end_game - n_back if end_game >= n_back else 0
moves = games_nr.bleakest_moves(start_game, end_game)
evals = np.array([m[2] for m in moves])
return np.percentile(evals, 5) | Gets the 95th percentile of bleakest_eval from bigtable |
def pattern(self, pattern):
enc_pattern = _converters[type(pattern)](pattern)
if (enc_pattern, True) not in self._refs:
ch = _Sender(self, enc_pattern,
is_pattern=True)
self._refs[(enc_pattern, True)] = ch
return self._refs[(enc_pattern, True)] | Create a pattern channel.
Returns ``_Sender`` object implementing
:class:`~aioredis.abc.AbcChannel`. |
def search(self, q, search_handler=None, **kwargs):
params = {'q': q}
params.update(kwargs)
response = self._select(params, handler=search_handler)
decoded = self.decoder.decode(response)
self.log.debug(
"Found '%s' search results.",
(decoded.get('response', {}) or {}).get('numFound', 0)
)
return self.results_cls(decoded) | Performs a search and returns the results.
Requires a ``q`` for a string version of the query to run.
Optionally accepts ``**kwargs`` for additional options to be passed
through the Solr URL.
Returns ``self.results_cls`` class object (defaults to
``pysolr.Results``)
Usage::
# All docs.
results = solr.search('*:*')
# Search with highlighting.
results = solr.search('ponies', **{
'hl': 'true',
'hl.fragsize': 10,
}) |
def lastindex(*args, **kwargs):
_, idx = _index(*args, start=sys.maxsize, step=-1, **kwargs)
return idx | Search a list backwards for an exact element,
or element satisfying a predicate.
Usage::
lastindex(element, list_)
lastindex(of=element, in_=list_)
lastindex(where=predicate, in_=list_)
:param element, of: Element to search for (by equality comparison)
:param where: Predicate defining an element to search for.
This should be a callable taking a single argument
and returning a boolean result.
:param list_, in_: List to search in
:return: Index of the last matching element, or -1 if none was found
.. versionadded:: 0.0.3 |
def handle_stream(self, stream, address):
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close() | Handle incoming streams and add messages to the incoming queue |
def list_firewall_rules(self, retrieve_all=True, **_params):
return self.list('firewall_rules', self.firewall_rules_path,
retrieve_all, **_params) | Fetches a list of all firewall rules for a project. |
def _reindex_multi(self, axes, copy, fill_value):
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value) | We are guaranteed non-Nones in the axes. |
def process_pybel_graph(graph):
bp = PybelProcessor(graph)
bp.get_statements()
if bp.annot_manager.failures:
logger.warning('missing %d annotation pairs',
sum(len(v)
for v in bp.annot_manager.failures.values()))
return bp | Return a PybelProcessor by processing a PyBEL graph.
Parameters
----------
graph : pybel.struct.BELGraph
A PyBEL graph to process
Returns
-------
bp : PybelProcessor
A PybelProcessor object which contains INDRA Statements in
bp.statements. |
def clean_slug(self):
source = self.cleaned_data.get('slug', '')
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get('title', ''))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list('translations__slug', flat=True))
slug = source
i = 1
while slug in used:
slug = '%s-%s' % (source, i)
i += 1
return slug | Generate a valid slug, in case the given one is taken |
def prepare_request_params(self, _query_params, _json_params):
self._query_params = dictset(
_query_params or self.request.params.mixed())
self._json_params = dictset(_json_params)
ctype = self.request.content_type
if self.request.method in ['POST', 'PUT', 'PATCH']:
if ctype == 'application/json':
try:
self._json_params.update(self.request.json)
except simplejson.JSONDecodeError:
log.error(
"Expecting JSON. Received: '{}'. "
"Request: {} {}".format(
self.request.body, self.request.method,
self.request.url))
self._json_params = BaseView.convert_dotted(self._json_params)
self._query_params = BaseView.convert_dotted(self._query_params)
self._params = self._query_params.copy()
self._params.update(self._json_params) | Prepare query and update params. |
def open( self ):
if self._connection is None:
self._connection = sqlite3.connect(self._dbfile) | Open the database connection. |
def create(self):
self.logger.log(logging.DEBUG, 'os.mkdir %s', self.name)
os.mkdir(self.name) | called to create the work space |
def update_by_function(self, extra_doc, index, doc_type, id, querystring_args=None,
update_func=None, attempts=2):
if querystring_args is None:
querystring_args = {}
if update_func is None:
update_func = dict.update
for attempt in range(attempts - 1, -1, -1):
current_doc = self.get(index, doc_type, id, **querystring_args)
new_doc = update_func(current_doc, extra_doc)
if new_doc is None:
new_doc = current_doc
try:
return self.index(new_doc, index, doc_type, id,
version=current_doc._meta.version, querystring_args=querystring_args)
except VersionConflictEngineException:
if attempt <= 0:
raise
self.refresh(index) | Update an already indexed typed JSON document.
The update happens client-side, i.e. the current document is retrieved,
updated locally and finally pushed to the server. This may repeat up to
``attempts`` times in case of version conflicts.
:param update_func: A callable ``update_func(current_doc, extra_doc)``
that computes and returns the updated doc. Alternatively it may
update ``current_doc`` in place and return None. The default
``update_func`` is ``dict.update``.
:param attempts: How many times to retry in case of version conflict. |
def visit_assert(self, node, parent):
newnode = nodes.Assert(node.lineno, node.col_offset, parent)
if node.msg:
msg = self.visit(node.msg, newnode)
else:
msg = None
newnode.postinit(self.visit(node.test, newnode), msg)
return newnode | visit a Assert node by returning a fresh instance of it |
def _has_y(self, kwargs):
return (('y' in kwargs) or (self._element_y in kwargs) or
(self._type == 3 and self._element_1my in kwargs)) | Returns True if y is explicitly defined in kwargs |
def register(self, bucket, name_or_func, func=None):
assert bucket in self, 'Bucket %s is unknown' % bucket
if func is None and hasattr(name_or_func, '__name__'):
name = name_or_func.__name__
func = name_or_func
elif func:
name = name_or_func
if name in self[bucket]:
raise AlreadyRegistered('The function %s is already registered' % name)
self[bucket][name] = func | Add a function to the registry by name |
def update(self, users=None, groups=None):
if users is not None and isinstance(users, string_types):
users = (users,)
if groups is not None and isinstance(groups, string_types):
groups = (groups,)
data = {
'id': self.id,
'categorisedActors': {
'atlassian-user-role-actor': users,
'atlassian-group-role-actor': groups}}
super(Role, self).update(**data) | Add the specified users or groups to this project role. One of ``users`` or ``groups`` must be specified.
:param users: a user or users to add to the role
:type users: string, list or tuple
:param groups: a group or groups to add to the role
:type groups: string, list or tuple |
def get_dropbox_folder_location():
host_db_path = os.path.join(os.environ['HOME'], '.dropbox/host.db')
try:
with open(host_db_path, 'r') as f_hostdb:
data = f_hostdb.read().split()
except IOError:
error("Unable to find your Dropbox install =(")
dropbox_home = base64.b64decode(data[1]).decode()
return dropbox_home | Try to locate the Dropbox folder.
Returns:
(str) Full path to the current Dropbox folder |
def WaitUntilComplete(self,poll_freq=2,timeout=None):
start_time = time.time()
while len(self.requests):
cur_requests = []
for request in self.requests:
status = request.Status()
if status in ('notStarted','executing','resumed','queued','running'): cur_requests.append(request)
elif status == 'succeeded': self.success_requests.append(request)
elif status in ("failed", "unknown"): self.error_requests.append(request)
self.requests = cur_requests
if self.requests > 0 and clc.v2.time_utils.TimeoutExpired(start_time, timeout):
raise clc.RequestTimeoutException('Timeout waiting for Requests: {0}'.format(self.requests[0].id),
self.requests[0].Status())
time.sleep(poll_freq)
return(len(self.error_requests)) | Poll until all request objects have completed.
If status is 'notStarted' or 'executing' continue polling.
If status is 'succeeded' then success
Else log as error
poll_freq option is in seconds
Returns an Int the number of unsuccessful requests. This behavior is subject to change.
>>> clc.v2.Server(alias='BTDI',id='WA1BTDIKRT02').PowerOn().WaitUntilComplete()
0 |
def label( self, node ):
result = []
if node.get('type'):
result.append( node['type'] )
if node.get('name' ):
result.append( node['name'] )
elif node.get('value') is not None:
result.append( unicode(node['value'])[:32])
if 'module' in node and not node['module'] in result:
result.append( ' in %s'%( node['module'] ))
if node.get( 'size' ):
result.append( '%s'%( mb( node['size'] )))
if node.get( 'totsize' ):
result.append( '(%s)'%( mb( node['totsize'] )))
parent_count = len( node.get('parents',()))
if parent_count > 1:
result.append( '/%s refs'%( parent_count ))
return " ".join(result) | Return textual description of this node |
def save_model(self, request, obj, form, change):
super(DisplayableAdmin, self).save_model(request, obj, form, change)
if settings.USE_MODELTRANSLATION:
lang = get_language()
for code in OrderedDict(settings.LANGUAGES):
if code != lang:
try:
activate(code)
except:
pass
else:
obj.save()
activate(lang) | Save model for every language so that field auto-population
is done for every each of it. |
def getPlugItObject(hproPk):
from hprojects.models import HostedProject
try:
hproject = HostedProject.objects.get(pk=hproPk)
except (HostedProject.DoesNotExist, ValueError):
try:
hproject = HostedProject.objects.get(plugItCustomUrlKey=hproPk)
except HostedProject.DoesNotExist:
raise Http404
if hproject.plugItURI == '' and not hproject.runURI:
raise Http404
plugIt = PlugIt(hproject.plugItURI)
if hasattr(hproject, 'plugItCustomUrlKey') and hproject.plugItCustomUrlKey:
baseURI = reverse('plugIt.views.main', args=(hproject.plugItCustomUrlKey, ''))
else:
baseURI = reverse('plugIt.views.main', args=(hproject.pk, ''))
return (plugIt, baseURI, hproject) | Return the plugit object and the baseURI to use if not in standalone mode |
def merge_config_files(fnames):
def _load_yaml(fname):
with open(fname) as in_handle:
config = yaml.safe_load(in_handle)
return config
out = _load_yaml(fnames[0])
for fname in fnames[1:]:
cur = _load_yaml(fname)
for k, v in cur.items():
if k in out and isinstance(out[k], dict):
out[k].update(v)
else:
out[k] = v
return out | Merge configuration files, preferring definitions in latter files. |
def by_name(self, country, language="en"):
with override(language):
for code, name in self:
if name.lower() == country.lower():
return code
if code in self.OLD_NAMES:
for old_name in self.OLD_NAMES[code]:
if old_name.lower() == country.lower():
return code
return "" | Fetch a country's ISO3166-1 two letter country code from its name.
An optional language parameter is also available.
Warning: This depends on the quality of the available translations.
If no match is found, returns an empty string.
..warning:: Be cautious about relying on this returning a country code
(especially with any hard-coded string) since the ISO names of
countries may change over time. |
def require_login(func):
@wraps(func)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth:
return authenticate()
user = session.query(User).filter(
User.name == auth.username
).first()
if user and user.check(auth.password):
g.user = user
return func(*args, **kwargs)
else:
return authenticate()
return decorated | Function wrapper to signalize that a login is required. |
def object_version_choices(obj):
choices = BLANK_CHOICE_DASH + [(PublishAction.UNPUBLISH_CHOICE, 'Unpublish current version')]
if obj is not None:
saved_versions = Version.objects.filter(
content_type=ContentType.objects.get_for_model(obj),
object_id=obj.pk,
).exclude(
version_number=None,
)
for version in saved_versions:
choices.append((version.version_number, version))
return choices | Return a list of form choices for versions of this object which can be published. |
async def nodes(self, *,
dc=None, near=None, watch=None, consistency=None):
params = {"dc": dc, "near": near}
response = await self._api.get("/v1/catalog/nodes",
params=params,
watch=watch,
consistency=consistency)
return consul(response) | Lists nodes in a given DC
Parameters:
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
near (str): Sort the node list in ascending order based on the
estimated round trip time from that node.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list
It returns a body like this::
[
{
"Node": "baz",
"Address": "10.1.10.11",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.11"
}
},
{
"Node": "foobar",
"Address": "10.1.10.12",
"TaggedAddresses": {
"lan": "10.1.10.11",
"wan": "10.1.10.12"
}
}
] |
def _iter_enum_constant_ids(eid):
for bitmask in _iter_bitmasks(eid):
for value in _iter_enum_member_values(eid, bitmask):
for cid, serial in _iter_serial_enum_member(eid, value, bitmask):
yield cid | Iterate the constant IDs of all members in the given enum |
def body_content(self, election_day, body, division=None):
from electionnight.models import PageType
body_type = ContentType.objects.get_for_model(body)
page_type = PageType.objects.get(
model_type=body_type,
election_day=election_day,
body=body,
jurisdiction=body.jurisdiction,
division_level=body.jurisdiction.division.level,
)
page_type_content = self.get(
content_type=ContentType.objects.get_for_model(page_type),
object_id=page_type.pk,
election_day=election_day,
)
kwargs = {
"content_type__pk": body_type.pk,
"object_id": body.pk,
"election_day": election_day,
}
if division:
kwargs["division"] = division
content = self.get(**kwargs)
return {
"site": self.site_content(election_day)["site"],
"page_type": self.serialize_content_blocks(page_type_content),
"page": self.serialize_content_blocks(content),
"featured": [
e.meta.ap_election_id for e in content.featured.all()
],
} | Return serialized content for a body page. |
def sh(self, cmd, ignore_error=False, cwd=None, shell=False, **kwargs):
kwargs.update({
'shell': shell,
'cwd': cwd or self.fpath,
'stderr': subprocess.STDOUT,
'stdout': subprocess.PIPE,
'ignore_error': ignore_error})
log.debug((('cmd', cmd), ('kwargs', kwargs)))
return sh(cmd, **kwargs) | Run a command with the current working directory set to self.fpath
Args:
cmd (str or tuple): cmdstring or listlike
Keyword Arguments:
ignore_error (bool): if False, raise an Exception if p.returncode is
not 0
cwd (str): current working dir to run cmd with
shell (bool): subprocess.Popen ``shell`` kwarg
Returns:
str: stdout output of wrapped call to ``sh`` (``subprocess.Popen``) |
def run_queues(self):
if self.exception:
raise self.exception
listeners = self.__listeners_for_thread
return sum(l.process() for l in listeners) > 0 | Run all queues that have data queued |
def index(self):
self._event_error = False
try:
compilable_files = self.finder.mirror_sources(
self.settings.SOURCES_PATH,
targetdir=self.settings.TARGET_PATH,
excludes=self.settings.EXCLUDES
)
self.compilable_files = dict(compilable_files)
self.source_files = self.compilable_files.keys()
self.inspector.reset()
self.inspector.inspect(
*self.source_files,
library_paths=self.settings.LIBRARY_PATHS
)
except BoussoleBaseException as e:
self._event_error = True
self.logger.error(six.text_type(e)) | Reset inspector buffers and index project sources dependencies.
This have to be executed each time an event occurs.
Note:
If a Boussole exception occurs during operation, it will be catched
and an error flag will be set to ``True`` so event operation will
be blocked without blocking or breaking watchdog observer. |
def get_news_aggregation(self):
news_aggregation_url = self.api_path + "news_aggregation" + "/"
response = self.get_response(news_aggregation_url)
return response | Calling News Aggregation API
Return:
json data |
def abbrev(self,dev_suffix=""):
return '.'.join(str(el) for el in self.release) + \
(dev_suffix if self.commit_count > 0 or self.dirty else "") | Abbreviated string representation, optionally declaring whether it is
a development version. |
def keypair():
fields = [
('User ID', 'user_id'),
('Access Key', 'access_key'),
('Secret Key', 'secret_key'),
('Active?', 'is_active'),
('Admin?', 'is_admin'),
('Created At', 'created_at'),
('Last Used', 'last_used'),
('Res.Policy', 'resource_policy'),
('Rate Limit', 'rate_limit'),
('Concur.Limit', 'concurrency_limit'),
('Concur.Used', 'concurrency_used'),
]
with Session() as session:
try:
kp = session.KeyPair(session.config.access_key)
info = kp.info(fields=(item[1] for item in fields))
except Exception as e:
print_error(e)
sys.exit(1)
rows = []
for name, key in fields:
rows.append((name, info[key]))
print(tabulate(rows, headers=('Field', 'Value'))) | Show the server-side information of the currently configured access key. |
async def close_interface(self, client_id, conn_string, interface):
conn_id = self._client_connection(client_id, conn_string)
await self.adapter.close_interface(conn_id, interface)
self._hook_close_interface(conn_string, interface, client_id) | Close a device interface on behalf of a client.
See :meth:`AbstractDeviceAdapter.close_interface`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter.
interface (str): The name of the interface to close.
Raises:
DeviceServerError: There is an issue with your client_id such
as not being connected to the device.
DeviceAdapterError: The adapter had an issue closing the interface. |
def parse(self, filepath, content):
try:
parsed = yaml.load(content)
except yaml.YAMLError as exc:
msg = "No YAML object could be decoded from file: {}\n{}"
raise SettingsBackendError(msg.format(filepath, exc))
return parsed | Parse opened settings content using YAML parser.
Args:
filepath (str): Settings object, depends from backend
content (str): Settings content from opened file, depends from
backend.
Raises:
boussole.exceptions.SettingsBackendError: If parser can not decode
a valid YAML object.
Returns:
dict: Dictionnary containing parsed setting elements. |
def overlaps_range(self, begin, end):
if self.is_empty():
return False
elif begin >= end:
return False
elif self.overlaps_point(begin):
return True
return any(
self.overlaps_point(bound)
for bound in self.boundary_table
if begin < bound < end
) | Returns whether some interval in the tree overlaps the given
range. Returns False if given a null interval over which to
test.
Completes in O(r*log n) time, where r is the range length and n
is the table size.
:rtype: bool |
def append_station(self, params, stationFile=''):
if self.new_format:
if stationFile:
st_file = stationFile
else:
st_file = self.stations_file
st_file, ret = self._get_playlist_abspath_from_data(st_file)
if ret < -1:
return ret
try:
with open(st_file, 'a') as cfgfile:
writter = csv.writer(cfgfile)
writter.writerow(params)
return 0
except:
return -5
else:
self.stations.append([ params[0], params[1], params[2] ])
self.dirty_playlist = True
st_file, ret = self._get_playlist_abspath_from_data(stationFile)
if ret < -1:
return ret
ret = self.save_playlist_file(st_file)
if ret < 0:
ret -= 4
return ret | Append a station to csv file
return 0: All ok
-2 - playlist not found
-3 - negative number specified
-4 - number not found
-5: Error writing file
-6: Error renaming file |
def update_fid_list(self,filename,N):
self.filelist_count[self.filelist.index(filename)] = N
fid=self.fid_list.compress([enum[1][0] == os.path.basename(filename) for enum in enumerate(zip(*(self.filelist,self.fid_list)))])
self.__dict__.update({'fileid' :np.append(self.fileid,np.repeat(fid,N))}) | update file indices attribute `altimetry.data.hydro_data.fileid` |
def upsert(self, key, value, cas=0, ttl=0, format=None,
persist_to=0, replicate_to=0):
return _Base.upsert(self, key, value, cas=cas, ttl=ttl,
format=format, persist_to=persist_to,
replicate_to=replicate_to) | Unconditionally store the object in Couchbase.
:param key:
The key to set the value with. By default, the key must be
either a :class:`bytes` or :class:`str` object encodable as
UTF-8. If a custom `transcoder` class is used (see
:meth:`~__init__`), then the key object is passed directly
to the transcoder, which may serialize it how it wishes.
:type key: string or bytes
:param value: The value to set for the key.
This should be a native Python value which will be transparently
serialized to JSON by the library. Do not pass already-serialized
JSON as the value or it will be serialized again.
If you are using a different `format` setting (see `format`
parameter), and/or a custom transcoder then value for this
argument may need to conform to different criteria.
:param int cas: The _CAS_ value to use. If supplied, the value
will only be stored if it already exists with the supplied
CAS
:param int ttl: If specified, the key will expire after this
many seconds
:param int format: If specified, indicates the `format` to use
when encoding the value. If none is specified, it will use
the `default_format` For more info see
:attr:`~.default_format`
:param int persist_to:
Perform durability checking on this many nodes nodes for
persistence to disk. See :meth:`endure` for more information
:param int replicate_to: Perform durability checking on this
many replicas for presence in memory. See :meth:`endure` for
more information.
:raise: :exc:`.ArgumentError` if an argument is supplied that is
not applicable in this context. For example setting the CAS
as a string.
:raise: :exc`.CouchbaseNetworkError`
:raise: :exc:`.KeyExistsError` if the key already exists on the
server with a different CAS value.
:raise: :exc:`.ValueFormatError` if the value cannot be
serialized with chosen encoder, e.g. if you try to store a
dictionary in plain mode.
:return: :class:`~.Result`.
Simple set::
cb.upsert('key', 'value')
Force JSON document format for value::
cb.upsert('foo', {'bar': 'baz'}, format=couchbase.FMT_JSON)
Insert JSON from a string::
JSONstr = '{"key1": "value1", "key2": 123}'
JSONobj = json.loads(JSONstr)
cb.upsert("documentID", JSONobj, format=couchbase.FMT_JSON)
Force UTF8 document format for value::
cb.upsert('foo', "<xml></xml>", format=couchbase.FMT_UTF8)
Perform optimistic locking by specifying last known CAS version::
cb.upsert('foo', 'bar', cas=8835713818674332672)
Several sets at the same time (mutli-set)::
cb.upsert_multi({'foo': 'bar', 'baz': 'value'})
.. seealso:: :meth:`upsert_multi` |
def _fix_orig_vcf_refs(data):
variantcaller = tz.get_in(("config", "algorithm", "variantcaller"), data)
if variantcaller:
data["vrn_file_orig"] = data["vrn_file"]
for i, sub in enumerate(data.get("group_orig", [])):
sub_vrn = sub.pop("vrn_file", None)
if sub_vrn:
sub["vrn_file_orig"] = sub_vrn
data["group_orig"][i] = sub
return data | Supply references to initial variantcalls if run in addition to batching. |
def equivalent_relshell_type(val):
builtin_type = type(val)
if builtin_type not in Type._typemap:
raise NotImplementedError("builtin type %s is not convertible to relshell type" %
(builtin_type))
relshell_type_str = Type._typemap[builtin_type]
return Type(relshell_type_str) | Returns `val`'s relshell compatible type.
:param val: value to check relshell equivalent type
:raises: `NotImplementedError` if val's relshell compatible type is not implemented. |
def _wrap_attribute(self, attr):
if not attr:
const = const_factory(attr)
const.parent = self
return const
return attr | Wrap the empty attributes of the Slice in a Const node. |
def add_poly_index(self, i, j, k):
self.idx_data.write(
struct.pack(self.idx_fmt, i) +
struct.pack(self.idx_fmt, j) +
struct.pack(self.idx_fmt, k)
) | Add 3 ``SCALAR`` of ``uint`` to the ``idx_data`` buffer. |
def tag_values(request):
data = defaultdict(lambda: {"values": {}})
for tag in Tag.objects.filter(lang=get_language(request)):
data[tag.type]["name"] = tag.type_name
data[tag.type]["values"][tag.value] = tag.value_name
return render_json(request, data, template='concepts_json.html', help_text=tag_values.__doc__) | Get tags types and values with localized names
language:
language of tags |
def _path_is_abs(path):
if path is None:
return True
try:
return os.path.isabs(path)
except AttributeError:
return False | Return a bool telling whether or ``path`` is absolute. If ``path`` is None,
return ``True``. This function is designed to validate variables which
optionally contain a file path. |
def bound_bboxes(bboxes):
group_x0 = min(map(lambda l: l[x0], bboxes))
group_y0 = min(map(lambda l: l[y0], bboxes))
group_x1 = max(map(lambda l: l[x1], bboxes))
group_y1 = max(map(lambda l: l[y1], bboxes))
return (group_x0, group_y0, group_x1, group_y1) | Finds the minimal bbox that contains all given bboxes |
def _measure(self, weighted):
return (
self._measures.means
if self._measures.means is not None
else self._measures.weighted_counts
if weighted
else self._measures.unweighted_counts
) | _BaseMeasure subclass representing primary measure for this cube.
If the cube response includes a means measure, the return value is
means. Otherwise it is counts, with the choice between weighted or
unweighted determined by *weighted*.
Note that weighted counts are provided on an "as-available" basis.
When *weighted* is True and the cube response is not weighted,
unweighted counts are returned. |
def set_fd_value(tag, value):
if tag.VR == 'OB' or tag.VR == 'UN':
value = struct.pack('d', value)
tag.value = value | Setters for data that also work with implicit transfersyntax
:param value: the value to set on the tag
:param tag: the tag to read |
def printrdf(wflow, ctx, style):
rdf = gather(wflow, ctx).serialize(format=style, encoding='utf-8')
if not rdf:
return u""
return rdf.decode('utf-8') | Serialize the CWL document into a string, ready for printing. |
def composite(
background_image,
foreground_image,
foreground_width_ratio=0.25,
foreground_position=(0.0, 0.0),
):
if foreground_width_ratio <= 0:
return background_image
composite = background_image.copy()
width = int(foreground_width_ratio * background_image.shape[1])
foreground_resized = resize(foreground_image, width)
size = foreground_resized.shape
x = int(foreground_position[1] * (background_image.shape[1] - size[1]))
y = int(foreground_position[0] * (background_image.shape[0] - size[0]))
composite[y : y + size[0], x : x + size[1]] = foreground_resized
return composite | Takes two images and composites them. |
def _validate_instance(self, instance, errors, path_prefix=''):
if not isinstance(instance, dict):
errors[path_prefix] = "Expected instance of dict to validate against schema."
return
self._apply_validations(errors, path_prefix, self._validates, instance)
for field, spec in self.doc_spec.iteritems():
path = self._append_path(path_prefix, field)
if field in instance:
self._validate_value(instance[field], spec, path, errors)
else:
if spec.get('required', False):
errors[path] = "{} is required.".format(path)
if self._strict:
for field in instance:
if field not in self.doc_spec:
errors[self._append_path(path_prefix, field)] = "Unexpected document field not present in schema" | Validates that the given instance of a document conforms to the given schema's
structure and validations. Any validation errors are added to the given errors
collection. The caller should assume the instance is considered valid if the
errors collection is empty when this method returns. |
def get_visible_elements(self, locator, params=None, timeout=None):
return self.get_present_elements(locator, params, timeout, True) | Get elements both present AND visible in the DOM.
If timeout is 0 (zero) return WebElement instance or None, else we wait and retry for timeout and raise
TimeoutException should the element not be found.
:param locator: locator tuple
:param params: (optional) locator params
:param timeout: (optional) time to wait for element (default: self._explicit_wait)
:return: WebElement instance |
def build(self, bug: Bug):
r = self.__api.post('bugs/{}/build'.format(bug.name))
if r.status_code == 204:
return
if r.status_code == 200:
raise Exception("bug already built: {}".format(bug.name))
if r.status_code == 400:
raise Exception("build failure")
if r.status_code == 404:
raise KeyError("no bug found with given name: {}".format(bug.name))
self.__api.handle_erroneous_response(r) | Instructs the server to build the Docker image associated with a given
bug. |
def get_default(self):
length = len(self)
if length == 0:
return None
elif length == 1:
return self[0]
else:
return sorted(self, key=attrgetter('id'))[0] | Return the default VLAN from the set. |
def delete_subnet(self, subnet):
subnet_id = self._find_subnet_id(subnet)
ret = self.network_conn.delete_subnet(subnet=subnet_id)
return ret if ret else True | Deletes the specified subnet |
def install_database(name, owner, template='template0', encoding='UTF8', locale='en_US.UTF-8'):
create_database(name, owner, template=template, encoding=encoding,
locale=locale) | Require a PostgreSQL database.
::
from fabtools import require
require.postgres.database('myapp', owner='dbuser') |
def select_cb(self, viewer, event, data_x, data_y):
if not (self._cmxoff <= data_x < self._cmwd):
return
i = int(data_y / (self._cmht + self._cmsep))
if 0 <= i < len(self.cm_names):
name = self.cm_names[i]
msg = "cmap => '%s'" % (name)
self.logger.info(msg)
channel = self.fv.get_channel_info()
if channel is not None:
viewer = channel.fitsimage
viewer.set_color_map(name) | Called when the user clicks on the color bar viewer.
Calculate the index of the color bar they clicked on and
set that color map in the current channel viewer. |
def is_enable(self, plugin_name=None):
if not plugin_name:
plugin_name = self.plugin_name
try:
d = getattr(self.args, 'disable_' + plugin_name)
except AttributeError:
return True
else:
return d is False | Return true if plugin is enabled. |
def iter_notifications(self, all=False, participating=False, number=-1,
etag=None):
params = None
if all:
params = {'all': all}
elif participating:
params = {'participating': participating}
url = self._build_url('notifications')
return self._iter(int(number), url, Thread, params, etag=etag) | Iterate over the user's notification.
:param bool all: (optional), iterate over all notifications
:param bool participating: (optional), only iterate over notifications
in which the user is participating
:param int number: (optional), how many notifications to return
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`Thread <github3.notifications.Thread>` |
def add(self, sid, token):
try:
self.dbcur.execute(SQL_SENSOR_INS, (sid, token))
except sqlite3.IntegrityError:
pass | Add new sensor to the database
Parameters
----------
sid : str
SensorId
token : str |
def data(self):
try:
data = self._data
except AttributeError:
data = self._data = json.loads(self.json)
return data | Returns self.json loaded as a python object. |
def service(self, service):
if service is None:
raise ValueError("Invalid value for `service`, must not be `None`")
allowed_values = ["lwm2m", "bootstrap"]
if service not in allowed_values:
raise ValueError(
"Invalid value for `service` ({0}), must be one of {1}"
.format(service, allowed_values)
)
self._service = service | Sets the service of this TrustedCertificateInternalResp.
Service name where the certificate is to be used.
:param service: The service of this TrustedCertificateInternalResp.
:type: str |
def _set_initial(self, C_in, scale_in):
r
self.C_in = C_in
self.scale_in = scale_in | r"""Set the initial values for parameters and Wilson coefficients at
the scale `scale_in`. |
def make_key(observer):
if hasattr(observer, "__self__"):
inst = observer.__self__
method_name = observer.__name__
key = (id(inst), method_name)
else:
key = id(observer)
return key | Construct a unique, hashable, immutable key for an observer. |
def i2c_config(self, read_delay_time=0):
task = asyncio.ensure_future(self.core.i2c_config(read_delay_time))
self.loop.run_until_complete(task) | This method configures Arduino i2c with an optional read delay time.
:param read_delay_time: firmata i2c delay time
:returns: No return value |
def make_pdb(self, alt_states=False, inc_ligands=True):
if any([False if x.id else True for x in self._monomers]):
self.relabel_monomers()
if self.ligands and inc_ligands:
monomers = self._monomers + self.ligands._monomers
else:
monomers = self._monomers
pdb_str = write_pdb(monomers, self.id, alt_states=alt_states)
return pdb_str | Generates a PDB string for the `Polymer`.
Parameters
----------
alt_states : bool, optional
Include alternate conformations for `Monomers` in PDB.
inc_ligands : bool, optional
Includes `Ligands` in PDB.
Returns
-------
pdb_str : str
String of the pdb for the `Polymer`. Generated using information
from the component `Monomers`. |
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location | Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body. |
def LogClientError(self, client_id, log_message=None, backtrace=None):
self.RegisterClientError(
client_id, log_message=log_message, backtrace=backtrace) | Logs an error for a client. |
def get_format_spec(self):
return u"{{:{align}{width}}}".format(align=self.align, width=self.width) | The format specification according to the values of `align` and `width` |
def clean_column_names(df: DataFrame) -> DataFrame:
f = df.copy()
f.columns = [col.strip() for col in f.columns]
return f | Strip the whitespace from all column names in the given DataFrame
and return the result. |
def eval(self):
if self.and_or == 'or':
return [Input(self.alias, file, self.cwd, 'and')
for file in self.files]
return ' '.join(self.files) | Evaluates the given input and returns a string containing the
actual filenames represented. If the input token represents multiple
independent files, then eval will return a list of all the input files
needed, otherwise it returns the filenames in a string. |
def send_response(self, transaction):
host, port = transaction.request.source
key_token = hash(str(host) + str(port) + str(transaction.request.token))
if key_token in self._relations:
if transaction.response.code == defines.Codes.CONTENT.number:
if transaction.resource is not None and transaction.resource.observable:
transaction.response.observe = transaction.resource.observe_count
self._relations[key_token].allowed = True
self._relations[key_token].transaction = transaction
self._relations[key_token].timestamp = time.time()
else:
del self._relations[key_token]
elif transaction.response.code >= defines.Codes.ERROR_LOWER_BOUND:
del self._relations[key_token]
return transaction | Finalize to add the client to the list of observer.
:type transaction: Transaction
:param transaction: the transaction that owns the response
:return: the transaction unmodified |
def observable_copy(value, name, instance):
container_class = value.__class__
if container_class in OBSERVABLE_REGISTRY:
observable_class = OBSERVABLE_REGISTRY[container_class]
elif container_class in OBSERVABLE_REGISTRY.values():
observable_class = container_class
else:
observable_class = add_properties_callbacks(
type(container_class)(
str('Observable{}'.format(container_class.__name__)),
(container_class,),
MUTATOR_CATEGORIES,
)
)
OBSERVABLE_REGISTRY[container_class] = observable_class
value = observable_class(value)
value._name = name
value._instance = instance
return value | Return an observable container for HasProperties notifications
This method creates a new container class to allow HasProperties
instances to :code:`observe_mutations`. It returns a copy of the
input value as this new class.
The output class behaves identically to the input value's original
class, except when it is used as a property on a HasProperties
instance. In that case, it notifies the HasProperties instance of
any mutations or operations. |
def CompareTo(self, other_hash_value):
if len(self.hash_value) != len(other_hash_value):
raise ValueError("Length of hashes doesn't match.")
for i in xrange(0, len(self.hash_value)):
if(self.hash_value[len(self.hash_value) - i - 1] < other_hash_value[len(self.hash_value) - i - 1]):
return -1
elif self.hash_value[len(self.hash_value) - i - 1] > other_hash_value[len(self.hash_value) - i - 1]:
return 1
return 0 | Compares the passed hash value with the hash value of this object |
def aggregate(self, val1, val2):
assert val1 is not None
assert val2 is not None
return self._aggregator(val1, val2) | Aggregate two event values. |
def normalize_url(url):
if url.endswith('.json'):
url = url[:-5]
if url.endswith('/'):
url = url[:-1]
return url | Return url after stripping trailing .json and trailing slashes. |
def validate_group(images):
image_ids = set()
for image in images:
key = image.folder + image.name
if key in image_ids:
raise ValueError('Duplicate images in group: ' + key)
else:
image_ids.add(key) | Validates that the combination of folder and name for all images in
a group is unique. Raises a ValueError exception if uniqueness
constraint is violated.
Parameters
----------
images : List(GroupImage)
List of images in group |
def register_deregister(notifier, event_type, callback=None,
args=None, kwargs=None, details_filter=None,
weak=False):
if callback is None:
yield
else:
notifier.register(event_type, callback,
args=args, kwargs=kwargs,
details_filter=details_filter,
weak=weak)
try:
yield
finally:
notifier.deregister(event_type, callback,
details_filter=details_filter) | Context manager that registers a callback, then deregisters on exit.
NOTE(harlowja): if the callback is none, then this registers nothing, which
is different from the behavior of the ``register`` method
which will *not* accept none as it is not callable... |
def ReadAllFlowRequestsAndResponses(self, client_id, flow_id):
flow_key = (client_id, flow_id)
try:
self.flows[flow_key]
except KeyError:
return []
request_dict = self.flow_requests.get(flow_key, {})
response_dict = self.flow_responses.get(flow_key, {})
res = []
for request_id in sorted(request_dict):
res.append((request_dict[request_id], response_dict.get(request_id, {})))
return res | Reads all requests and responses for a given flow from the database. |
def remove_log_group(self, group_name):
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e)) | Filter all log groups that match the name given in log_filter. |
def sudo_support(fn, command):
if not command.script.startswith('sudo '):
return fn(command)
result = fn(command.update(script=command.script[5:]))
if result and isinstance(result, six.string_types):
return u'sudo {}'.format(result)
elif isinstance(result, list):
return [u'sudo {}'.format(x) for x in result]
else:
return result | Removes sudo before calling fn and adds it after. |
def permissions(self):
permissions_dict = {"self": {}, "parent": {}}
for field in self.properties._meta.get_fields():
split_field = field.name.split('_', 1)
if len(split_field) <= 0 or split_field[0] not in ['self', 'parent']:
continue
permissions_dict[split_field[0]][split_field[1]] = getattr(self.properties, field.name)
return permissions_dict | Dynamically generate dictionary of privacy options |
def compute_resids(xy,uv,fit):
print('FIT coeffs: ',fit['coeffs'])
xn,yn = apply_fit(uv,fit['coeffs'])
resids = xy - np.transpose([xn,yn])
return resids | Compute the residuals based on fit and input arrays to the fit |
def _get_initial_name(self):
name = None
addr = self.addr
if self._function_manager is not None:
if addr in self._function_manager._kb.labels:
name = self._function_manager._kb.labels[addr]
if name is None and self.project is not None:
project = self.project
if project.is_hooked(addr):
hooker = project.hooked_by(addr)
name = hooker.display_name
elif project.simos.is_syscall_addr(addr):
syscall_inst = project.simos.syscall_from_addr(addr)
name = syscall_inst.display_name
if name is None:
name = 'sub_%x' % addr
return name | Determine the most suitable name of the function.
:return: The initial function name.
:rtype: string |
def ucase(inchar, lenout=None):
if lenout is None:
lenout = len(inchar) + 1
inchar = stypes.stringToCharP(inchar)
outchar = stypes.stringToCharP(" " * lenout)
lenout = ctypes.c_int(lenout)
libspice.ucase_c(inchar, lenout, outchar)
return stypes.toPythonString(outchar) | Convert the characters in a string to uppercase.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ucase_c.html
:param inchar: Input string.
:type inchar: str
:param lenout: Optional Maximum length of output string.
:type lenout: int
:return: Output string, all uppercase.
:rtype: str |
def update_user_password(new_pwd_user_id, new_password,**kwargs):
try:
user_i = db.DBSession.query(User).filter(User.id==new_pwd_user_id).one()
user_i.password = bcrypt.hashpw(str(new_password).encode('utf-8'), bcrypt.gensalt())
return user_i
except NoResultFound:
raise ResourceNotFoundError("User (id=%s) not found"%(new_pwd_user_id)) | Update a user's password |
def form_field(self):
"Returns appropriate form field."
label = unicode(self)
defaults = dict(required=False, label=label, widget=self.widget)
defaults.update(self.extra)
return self.field_class(**defaults) | Returns appropriate form field. |
def get_commands(self, peer_jid):
disco = self.dependencies[aioxmpp.disco.DiscoClient]
response = yield from disco.query_items(
peer_jid,
node=namespaces.xep0050_commands,
)
return response.items | Return the list of commands offered by the peer.
:param peer_jid: JID of the peer to query
:type peer_jid: :class:`~aioxmpp.JID`
:rtype: :class:`list` of :class:`~.disco.xso.Item`
:return: List of command items
In the returned list, each :class:`~.disco.xso.Item` represents one
command supported by the peer. The :attr:`~.disco.xso.Item.node`
attribute is the identifier of the command which can be used with
:meth:`get_command_info` and :meth:`execute`. |
def execute(self, lst):
from . import QueryableListMixed
if not issubclass(lst.__class__, QueryableListBase):
lst = QueryableListMixed(lst)
filters = copy.copy(self.filters)
nextFilter = filters.popleft()
while nextFilter:
(filterMethod, filterArgs) = nextFilter
lst = self._applyFilter(lst, filterMethod, filterArgs)
if len(lst) == 0:
return lst
try:
nextFilter = filters.popleft()
except:
break
return lst | execute - Execute the series of filters, in order, on the provided list.
@param lst <list/ A QueryableList type> - The list to filter. If you already know the types of items within
the list, you can pick a QueryableList implementing class to get faster results. Otherwise, if a list type that does
not extend QueryableListBase is provided, QueryableListMixed will be used (Supports both object-like and dict-like items)
@return - QueryableList of results. If you provided #lst as a QueryableList type already, that same type will be returned.
Otherwise, a QueryableListMixed will be returned. |
def read_raw(self, length, *, error=None):
if length is None:
length = len(self)
raw = dict(
packet=self._read_fileng(length),
error=error or None,
)
return raw | Read raw packet data. |
def search(self, searchstring):
ret = {}
for user in self.users:
match = False
for attr in self.search_attrs:
if attr not in self.users[user]:
pass
elif re.search(searchstring + '.*', self.users[user][attr]):
match = True
if match:
ret[user] = self.users[user]
return ret | Search backend for users
:param searchstring: the search string
:type searchstring: string
:rtype: dict of dict ( {<user attr key>: {<attr>: <value>}} ) |
def add_scan_host_detail(self, scan_id, host='', name='', value=''):
self.scan_collection.add_result(scan_id, ResultType.HOST_DETAIL, host,
name, value) | Adds a host detail result to scan_id scan. |
def _verify(self):
scenario_names = [c.scenario.name for c in self._configs]
if self._scenario_name not in scenario_names:
msg = ("Scenario '{}' not found. "
'Exiting.').format(self._scenario_name)
util.sysexit_with_message(msg) | Verify the specified scenario was found and returns None.
:return: None |
def _get_ssh_public_key(self):
key = ipa_utils.generate_public_ssh_key(self.ssh_private_key_file)
return '{user}:{key} {user}'.format(
user=self.ssh_user,
key=key.decode()
) | Generate SSH public key from private key. |
def column_correlations(self, X):
utils.validation.check_is_fitted(self, 's_')
if isinstance(X, np.ndarray):
X = pd.DataFrame(X)
row_pc = self.row_coordinates(X)
return pd.DataFrame({
component: {
feature: row_pc[component].corr(X[feature])
for feature in X.columns
}
for component in row_pc.columns
}) | Returns the column correlations with each principal component. |
def iter_tags(self, number=-1, etag=None):
url = self._build_url('tags', base_url=self._api)
return self._iter(int(number), url, RepoTag, etag=etag) | Iterates over tags on this repository.
:param int number: (optional), return up to at most number tags.
Default: -1 returns all available tags.
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of :class:`RepoTag <github3.repos.tag.RepoTag>`\ s |
def relative_abundance(biomf, sampleIDs=None):
if sampleIDs is None:
sampleIDs = biomf.ids()
else:
try:
for sid in sampleIDs:
assert sid in biomf.ids()
except AssertionError:
raise ValueError(
"\nError while calculating relative abundances: The sampleIDs provided do"
" not match the sampleIDs in biom file. Please double check the sampleIDs"
" provided.\n")
otuIDs = biomf.ids(axis="observation")
norm_biomf = biomf.norm(inplace=False)
return {sample: {otuID: norm_biomf.get_value_by_ids(otuID, sample)
for otuID in otuIDs} for sample in sampleIDs} | Calculate the relative abundance of each OTUID in a Sample.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: list
:param sampleIDs: A list of sample id's from BIOM format OTU table.
:rtype: dict
:return: Returns a keyed on SampleIDs, and the values are dictionaries keyed on
OTUID's and their values represent the relative abundance of that OTUID in
that SampleID. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.