text stringlengths 78 104k | score float64 0 0.18 |
|---|---|
def get_responses(self):
"""Gets list of the latest responses"""
response_list = []
for question_map in self._my_map['questions']:
response_list.append(self._get_response_from_question_map(question_map))
return ResponseList(response_list) | 0.010638 |
def install_python_package(self, arch, name=None, env=None, is_dir=True):
'''Automate the installation of a Python package (or a cython
package where the cython components are pre-built).'''
# arch = self.filtered_archs[0] # old kivy-ios way
if name is None:
name = self.name
if env is None:
env = self.get_recipe_env(arch)
info('Installing {} into site-packages'.format(self.name))
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.hostpython_location)
if self.ctx.python_recipe.name != 'python2legacy':
hpenv = env.copy()
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=.',
_env=hpenv, *self.setup_extra_args)
elif self.call_hostpython_via_targetpython:
shprint(hostpython, 'setup.py', 'install', '-O2', _env=env,
*self.setup_extra_args)
else: # python2legacy
hppath = join(dirname(self.hostpython_location), 'Lib', 'site-packages')
hpenv = env.copy()
if 'PYTHONPATH' in hpenv:
hpenv['PYTHONPATH'] = ':'.join([hppath] + hpenv['PYTHONPATH'].split(':'))
else:
hpenv['PYTHONPATH'] = hppath
shprint(hostpython, 'setup.py', 'install', '-O2',
'--root={}'.format(self.ctx.get_python_install_dir()),
'--install-lib=lib/python2.7/site-packages',
_env=hpenv, *self.setup_extra_args)
# If asked, also install in the hostpython build dir
if self.install_in_hostpython:
self.install_hostpython_package(arch) | 0.002092 |
def tags_published():
"""
Return the published tags.
"""
from tagging.models import Tag
from zinnia.models.entry import Entry
tags_entry_published = Tag.objects.usage_for_queryset(
Entry.published.all())
# Need to do that until the issue #44 of django-tagging is fixed
return Tag.objects.filter(name__in=[t.name for t in tags_entry_published]) | 0.002611 |
def get_res(ds, t_srs=None, square=False):
"""Get GDAL Dataset raster resolution
"""
gt = ds.GetGeoTransform()
ds_srs = get_ds_srs(ds)
#This is Xres, Yres
res = [gt[1], np.abs(gt[5])]
if square:
res = [np.mean(res), np.mean(res)]
if t_srs is not None and not ds_srs.IsSame(t_srs):
if True:
#This diagonal approach is similar to the approach in gdaltransformer.cpp
#Bad news for large extents near the poles
#ullr = get_ullr(ds, t_srs)
#diag = np.sqrt((ullr[0]-ullr[2])**2 + (ullr[1]-ullr[3])**2)
extent = ds_extent(ds, t_srs)
diag = np.sqrt((extent[2]-extent[0])**2 + (extent[3]-extent[1])**2)
res = diag / np.sqrt(ds.RasterXSize**2 + ds.RasterYSize**2)
res = [res, res]
else:
#Compute from center pixel
ct = osr.CoordinateTransformation(ds_srs, t_srs)
pt = get_center(ds)
#Transform center coordinates
pt_ct = ct.TransformPoint(*pt)
#Transform center + single pixel offset coordinates
pt_ct_plus = ct.TransformPoint(pt[0] + gt[1], pt[1] + gt[5])
#Compute resolution in new units
res = [pt_ct_plus[0] - pt_ct[0], np.abs(pt_ct_plus[1] - pt_ct[1])]
return res | 0.009774 |
def search(cls, *, limit=100, page=1, properties=None, return_query=False):
"""Search for issues based on the provided filters
Args:
limit (`int`): Number of results to return. Default: 100
page (`int`): Pagination offset for results. Default: 1
properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
of strings, in which case a boolean OR search is performed on the values
return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
on its own
Returns:
`list` of `Issue`, `sqlalchemy.orm.Query`
"""
qry = db.Issue.order_by(Issue.issue_id).filter(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
if properties:
for prop_name, value in properties.items():
alias = aliased(IssueProperty)
qry = qry.join(alias, Issue.issue_id == alias.issue_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] | 0.002899 |
def xmoe2_dense(sz):
"""Series of architectural experiments on language modeling.
Larger models than the ones above.
All models are trained on sequences of 1024 tokens.
We assume infinite training data, so no dropout necessary.
We process 2^36 tokens in training = 524288 steps at batch size 128
TODO(noam): find a large enough dataset for these experiments.
You can use languagemodel_wiki_noref_v32k_l1k, but this is too small,
(1 epoch = ~46000 steps) so training will cover about 11 epochs.
Note: configurations and code are likely to change without notice.
Run on TPU 4x4 for 524288 steps unless otherwise indicated.
Args:
sz: an integer
Returns:
a hparams
"""
hparams = mtf_transformer.mtf_transformer_paper_lm(sz)
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.max_length = 1024
hparams.batch_size = 128
hparams.learning_rate_schedule = "rsqrt_decay*linear_decay"
hparams.learning_rate_decay_steps = 65536
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
return hparams | 0.011285 |
def load_entry_points(self):
"""Load tasks from entry points."""
if self.entry_point_group:
task_packages = {}
for item in pkg_resources.iter_entry_points(
group=self.entry_point_group):
# Celery 4.2 requires autodiscover to be called with
# related_name for Python 2.7.
try:
pkg, related_name = item.module_name.rsplit('.', 1)
except ValueError:
warnings.warn(
'The celery task module "{}" was not loaded. '
'Defining modules in bare Python modules is no longer '
'supported due to Celery v4.2 constraints. Please '
'move the module into a Python package.'.format(
item.module_name
),
RuntimeWarning
)
continue
if related_name not in task_packages:
task_packages[related_name] = []
task_packages[related_name].append(pkg)
if task_packages:
for related_name, packages in task_packages.items():
self.celery.autodiscover_tasks(
packages, related_name=related_name, force=True
) | 0.001433 |
def from_dict(raw_data):
"""Create Image from raw dictionary data."""
url = None
width = None
height = None
try:
url = raw_data['url']
width = raw_data['width']
height = raw_data['height']
except KeyError:
raise ValueError('Unexpected image json structure')
except TypeError:
# Happens when raw_data is None, i.e. when a term has no image:
pass
return Image(url, width, height) | 0.003899 |
def _getPageInfo(self, pno, what):
"""Show fonts or images used on a page."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
val = _fitz.Document__getPageInfo(self, pno, what)
x = []
for v in val:
if v not in x:
x.append(v)
val = x
return val | 0.005038 |
def _create_djset(args, cls):
""" Return a DjSecret object """
name = args.get('--name')
settings = args.get('--settings')
if name:
return cls(name=name)
elif settings:
return cls(name=settings)
else:
return cls() | 0.003831 |
def shutdown(self):
"""Forcefully shutdown the entire pool, closing all non-executing
connections.
:raises: ConnectionBusyError
"""
with self._lock:
for cid in list(self.connections.keys()):
if self.connections[cid].executing:
raise ConnectionBusyError(cid)
if self.connections[cid].locked:
self.connections[cid].free()
self.connections[cid].close()
del self.connections[cid] | 0.003752 |
def dump_pdb(filename, molecule, atomnames=None, resnames=None, chain_ids=None, occupancies=None, betas=None):
"""Writes a single molecule to a pdb file.
This function is based on the pdb file specification:
http://www.wwpdb.org/documentation/format32/sect9.html
For convenience, the relevant table is copied and the character indexes are
transformed to C-style (starting from zero)
======= ============ ========== ==========================================
COLUMNS DATA TYPE FIELD DEFINITION
======= ============ ========== ==========================================
0 - 5 Record name "ATOM "
6 - 10 Integer serial Atom serial number.
12 - 15 Atom name Atom name.
16 Character altLoc Alternate location indicator.
17 - 19 Residue name resName Residue name.
21 Character chainID Chain identifier.
22 - 25 Integer resSeq Residue sequence number.
26 AChar iCode Code for insertion of residues.
30 - 37 Real(8.3) x Orthogonal coordinates for X in Angstroms.
38 - 45 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
46 - 53 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
54 - 59 Real(6.2) occupancy Occupancy.
60 - 65 Real(6.2) tempFactor Temperature factor.
76 - 77 LString(2) element Element symbol, right-justified.
78 - 79 LString(2) charge Charge on the atom.
======= ============ ========== ==========================================
"""
with open(filename, "w") as f:
res_id = 1
old_resname = None
for i in range(molecule.size):
symbol = periodic[molecule.numbers[i]].symbol
if atomnames is None:
atomname = symbol
else:
atomname = atomnames[i]
if resnames is None:
resname = "OXO"
else:
resname = resnames[i]
if resname != old_resname:
res_id += 1
if chain_ids is None:
chain_id = "A"
else:
chain_id = chain_ids[i]
if occupancies is None:
occupancy = 1.0
else:
occupancy = occupancies[i]
if betas is None:
beta = 1.0
else:
beta = betas[i]
print("ATOM %4i %3s %3s %1s%4i %8.3f%8.3f%8.3f%6.2f%6.2f %2s " % (
i+1, atomname.ljust(3), resname.ljust(3), chain_id, res_id,
molecule.coordinates[i, 0]/angstrom,
molecule.coordinates[i, 1]/angstrom,
molecule.coordinates[i, 2]/angstrom,
occupancy, beta, symbol.ljust(2)
), file=f)
old_resname = resname | 0.003813 |
def filter(self, model=None, context=None):
"""
Perform filtering on the model. Will change model in place.
:param model: object or dict
:param context: object, dict or None
:return: None
"""
if model is None:
return
# properties
self.filter_properties(model, context=context)
# entities
self.filter_entities(model, context=context)
# collections
self.filter_collections(model, context=context) | 0.003899 |
def _should_trigger_abbreviation(self, buffer):
"""
Checks whether, based on the settings for the abbreviation and the given input,
the abbreviation should trigger.
@param buffer Input buffer to be checked (as string)
"""
return any(self.__checkInput(buffer, abbr) for abbr in self.abbreviations) | 0.011594 |
def add_properties(self, filename):
"""
Add properties to config based on filename replacing previous values.
:param filename: str path to YAML file to pull top level properties from
"""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
with open(filename, 'r') as yaml_file:
self.update_properties(yaml.safe_load(yaml_file)) | 0.007143 |
def modify_account(self, account, attrs):
"""
:param account: a zobjects.Account
:param attrs : a dictionary of attributes to set ({key:value,...})
"""
attrs = [{'n': k, '_content': v} for k, v in attrs.items()]
self.request('ModifyAccount', {
'id': self._get_or_fetch_id(account, self.get_account),
'a': attrs
}) | 0.005076 |
def new_screen(self):
"""Makes a new screen with a size of SCREEN_SIZE, and VIDEO_OPTION as flags. Sets the windows name to NAME."""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.display.set_caption(self.NAME)
screen_s = self.SCREEN_SIZE
video_options = self.VIDEO_OPTIONS
if FULLSCREEN & self.VIDEO_OPTIONS:
video_options ^= FULLSCREEN
video_options |= NOFRAME
screen_s = (0, 0)
screen = pygame.display.set_mode(screen_s, video_options)
if FULLSCREEN & self.VIDEO_OPTIONS:
self.SCREEN_SIZE = screen.get_size()
if not QUIT in self.EVENT_ALLOWED:
self.EVENT_ALLOWED = list(self.EVENT_ALLOWED)
self.EVENT_ALLOWED.append(QUIT)
pygame.event.set_allowed(self.EVENT_ALLOWED)
return screen | 0.004717 |
def to_cloudformation(self, **kwargs):
"""Returns the Lambda Permission resource allowing SNS to invoke the function this event source triggers.
:param dict kwargs: no existing resources need to be modified
:returns: a list of vanilla CloudFormation Resources, to which this SNS event expands
:rtype: list
"""
function = kwargs.get('function')
if not function:
raise TypeError("Missing required keyword argument: function")
return [self._construct_permission(function, source_arn=self.Topic),
self._inject_subscription(function, self.Topic, self.FilterPolicy)] | 0.007634 |
def export_obj(vertices, triangles, filename):
"""
Exports a mesh in the (.obj) format.
"""
with open(filename, 'w') as fh:
for v in vertices:
fh.write("v {} {} {}\n".format(*v))
for f in triangles:
fh.write("f {} {} {}\n".format(*(f + 1))) | 0.012384 |
def insert_element(self, vector, value, idx, name=''):
"""
Returns vector with vector[idx] replaced by value.
The result is undefined if the idx is larger or equal the vector length.
"""
instr = instructions.InsertElement(self.block, vector, value, idx,
name=name)
self._insert(instr)
return instr | 0.007576 |
def search(self, **kwargs):
"""
Method to search object group permissions general based on extends search.
:param search: Dict containing QuerySets to find object group permissions general.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing object group permissions general
"""
return super(ApiObjectGroupPermissionGeneral, self).get(self.prepare_url('api/v3/object-group-perm-general/',
kwargs)) | 0.008642 |
def retrieve_state_ids(self, activity, agent, registration=None, since=None):
"""Retrieve state id's from the LRS with the provided parameters
:param activity: Activity object of desired states
:type activity: :class:`tincan.activity.Activity`
:param agent: Agent object of desired states
:type agent: :class:`tincan.agent.Agent`
:param registration: Registration UUID of desired states
:type registration: str | unicode
:param since: Retrieve state id's since this time
:type since: str | unicode
:return: LRS Response object with the retrieved state id's as content
:rtype: :class:`tincan.lrs_response.LRSResponse`
"""
if not isinstance(activity, Activity):
activity = Activity(activity)
if not isinstance(agent, Agent):
agent = Agent(agent)
request = HTTPRequest(
method="GET",
resource="activities/state"
)
request.query_params = {
"activityId": activity.id,
"agent": agent.to_json(self.version)
}
if registration is not None:
request.query_params["registration"] = registration
if since is not None:
request.query_params["since"] = since
lrs_response = self._send_request(request)
if lrs_response.success:
lrs_response.content = json.loads(lrs_response.data)
return lrs_response | 0.00135 |
def render(self, name, value, attrs=None):
if value is None:
value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs)
final_attrs['name'] = name
assert 'id' in final_attrs, \
"TinyMCE widget attributes must contain 'id'"
mce_config = cms.plugins.text.settings.TINYMCE_CONFIG.copy()
mce_config.update(get_language_config(self.content_language))
if tinymce.settings.USE_FILEBROWSER:
mce_config['file_browser_callback'] = "djangoFileBrowser"
mce_config.update(self.mce_attrs)
mce_config['mode'] = 'exact'
mce_config['elements'] = final_attrs['id']
mce_config['strict_loading_mode'] = 1
"""
plugins = mce_config.get("plugins", "")
if len(plugins):
plugins += ","
plugins += "-cmsplugins"
mce_config['plugins'] = plugins
adv2 = mce_config.get('theme_advanced_buttons1', "")
if len(adv2):
adv2 = "," + adv2
adv2 = "cmsplugins,cmspluginsedit" + adv2
mce_config['theme_advanced_buttons1'] = adv2
"""
json = simplejson.dumps(mce_config)
html = [u'<textarea%s>%s</textarea>' % (flatatt(final_attrs),
escape(value))]
if tinymce.settings.USE_COMPRESSOR:
compressor_config = {
'plugins': mce_config.get('plugins', ''),
'themes': mce_config.get('theme', 'advanced'),
'languages': mce_config.get('language', ''),
'diskcache': True,
'debug': False,
}
c_json = simplejson.dumps(compressor_config)
html.append(
(u'<script type="text/javascript">'
'tinyMCE_GZ.init(%s);</script>') % (c_json))
html.append(
(u'<script type="text/javascript">%s;\ntinyMCE.init(%s);'
'</script>') % (
self.render_additions(
name,
value,
attrs),
json))
return mark_safe(u'\n'.join(html)) | 0.000931 |
def nth_combination(iterable, r, index):
"""Equivalent to ``list(combinations(iterable, r))[index]``.
The subsequences of *iterable* that are of length *r* can be ordered
lexicographically. :func:`nth_combination` computes the subsequence at
sort position *index* directly, without computing the previous
subsequences.
"""
pool = tuple(iterable)
n = len(pool)
if (r < 0) or (r > n):
raise ValueError
c = 1
k = min(r, n - r)
for i in range(1, k + 1):
c = c * (n - k + i) // i
if index < 0:
index += c
if (index < 0) or (index >= c):
raise IndexError
result = []
while r:
c, n, r = c * r // n, n - 1, r - 1
while index >= c:
index -= c
c, n = c * (n - r) // n, n - 1
result.append(pool[-1 - n])
return tuple(result) | 0.001152 |
def _check_item_type(item, field_name, allowed_types, expect_list=False,
required_channels='all'):
"""
Check the item's type against a set of allowed types.
Vary the print message regarding whether the item can be None.
Helper to `BaseRecord.check_field`.
Parameters
----------
item : any
The item to check.
field_name : str
The field name.
allowed_types : iterable
Iterable of types the item is allowed to be.
expect_list : bool, optional
Whether the item is expected to be a list.
required_channels : list, optional
List of integers specifying which channels of the item must be
present. May be set to 'all' to indicate all channels. Only used
if `expect_list` is True, ie. item is a list, and its
subelements are to be checked.
Notes
-----
This is called by `check_field`, which determines whether the item
should be a list or not. This function should generally not be
called by the user directly.
"""
if expect_list:
if not isinstance(item, list):
raise TypeError('Field `%s` must be a list.' % field_name)
# All channels of the field must be present.
if required_channels == 'all':
required_channels = list(range(len(item)))
for ch in range(len(item)):
# Check whether the field may be None
if ch in required_channels:
allowed_types_ch = allowed_types
else:
allowed_types_ch = allowed_types + (type(None),)
if not isinstance(item[ch], allowed_types_ch):
raise TypeError('Channel %d of field `%s` must be one of the following types:' % (ch, field_name),
allowed_types_ch)
else:
if not isinstance(item, allowed_types):
raise TypeError('Field `%s` must be one of the following types:',
allowed_types) | 0.001499 |
def _expand(self, str, local_vars={}):
"""Expand $vars in a string."""
return ninja_syntax.expand(str, self.vars, local_vars) | 0.014184 |
def load_multiple(paths=None, first_data_line="auto", filters="*.*", text="Select some files, FACEHEAD.", default_directory="default_directory", quiet=True, header_only=False, transpose=False, **kwargs):
"""
Loads a list of data files into a list of databox data objects.
Returns said list.
Parameters
----------
path=None
Supply a path to a data file; None means pop up a dialog.
first_data_line="auto"
Specify the index of the first data line, or have it figure this out
automatically.
filters="*.*"
Specify file filters.
text="Select some files, FACEHEAD."
Window title text.
default_directory="default_directory"
Which directory to start in (by key). This lives in spinmob.settings.
quiet=True
Don't print stuff while loading.
header_only=False
Load only the header information.
transpose = False
Return databox.transpose().
Optional keyword arguments are sent to spinmob.data.load(), so check there for more information.
"""
if paths == None: paths = _s.dialogs.load_multiple(filters, text, default_directory)
if paths is None : return
datas = []
for path in paths:
if _os.path.isfile(path): datas.append(load(path=path, first_data_line=first_data_line,
filters=filters, text=text, default_directory=default_directory,
header_only=header_only, transpose=transpose, **kwargs))
return datas | 0.01002 |
def _try_mask_first_value(value, row, all_close):
'''
mask first value in row
value1 : ~typing.Any
row : 1d masked array
all_close : bool
compare with np.isclose instead of ==
Return whether masked a value
'''
# Compare value to row
for i, value2 in enumerate(row):
if _value_equals(value, value2, all_close):
row[i] = ma.masked
return True
return False | 0.002299 |
def start_element (self, tag, attrs):
"""Search for meta robots.txt "nofollow" and "noindex" flags."""
if tag == 'meta' and attrs.get('name') == 'robots':
val = attrs.get_true('content', u'').lower().split(u',')
self.follow = u'nofollow' not in val
self.index = u'noindex' not in val
raise StopParse("found <meta name=robots> tag")
elif tag == 'body':
raise StopParse("found <body> tag") | 0.006369 |
def write_fasta_file(seq_records, outname, outdir=None, outext='.faa', force_rerun=False):
"""Write a FASTA file for a SeqRecord or a list of SeqRecord objects.
Args:
seq_records (SeqRecord, list): SeqRecord or a list of SeqRecord objects
outname: Name of the output file which will have outext appended to it
outdir: Path to directory to output sequences to
outext: Extension of FASTA file, default ".faa"
force_rerun: If file should be overwritten if it exists
Returns:
str: Path to output FASTA file.
"""
if not outdir:
outdir = ''
outfile = ssbio.utils.outfile_maker(inname='', outname=outname, outdir=outdir, outext=outext)
if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile):
SeqIO.write(seq_records, outfile, "fasta")
return outfile | 0.003525 |
def filter_values(column, default=None):
""" Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
"""
form_data = json.loads(request.form.get('form_data', '{}'))
return_val = []
for filter_type in ['filters', 'extra_filters']:
if filter_type not in form_data:
continue
for f in form_data[filter_type]:
if f['col'] == column:
for v in f['val']:
return_val.append(v)
if return_val:
return return_val
if default:
return [default]
else:
return [] | 0.00301 |
def do_refresh(self,args):
"""Refresh the view of the log group"""
print "stackResource: {}".format(self.stackResource)
self.roleDetails = AwsConnectionFactory.getIamClient().get_role(RoleName=self.stackResource.physical_resource_id)
print "== role details =="
pprint(self.roleDetails)
self.rolePolicies = self.loadRolePolicies()
print "== attached policies =="
pprint(self.rolePolicies) | 0.010846 |
def get_account_history_page(address, page, hostport=None, proxy=None):
"""
Get a page of the account's history
Returns the list of account operations on success
Returns {'error': ...} on error
"""
assert proxy or hostport, 'Need proxy or hostport'
if proxy is None:
proxy = connect_hostport(hostport)
page_schema = {
'type': 'object',
'properties': {
'history': {
'type': 'array',
'items': {
'type': 'object',
'properties': ACCOUNT_SCHEMA_PROPERTIES,
'required': ACCOUNT_SCHEMA_REQUIRED,
},
},
},
'required': [
'history'
],
}
schema = json_response_schema(page_schema)
try:
resp = proxy.get_account_history(address, page)
resp = json_validate(schema, resp)
if json_is_error(resp):
return resp
except ValidationError as e:
if BLOCKSTACK_DEBUG:
log.exception(e)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node. Try again with `--debug`.', 'http_status': 500}
return resp
return resp['history'] | 0.003083 |
def bytes_cast(maybe_str, encoding='utf-8'):
"""
Converts any string-like input to a bytes-like output, with respect to
python version
Parameters
----------
maybe_str : if this is a string-like object, it will be converted to bytes
encoding : str, default='utf-8'
encoding to be used when encoding string
"""
if isinstance(maybe_str, unicode_):
return maybe_str.encode(encoding)
else:
return maybe_str | 0.002141 |
def resize_cover(image, size, resample=Image.LANCZOS):
"""
Resize image according to size.
image: a Pillow image instance
size: a list of two integers [width, height]
"""
img_format = image.format
img = image.copy()
img_size = img.size
ratio = max(size[0] / img_size[0], size[1] / img_size[1])
new_size = [
int(math.ceil(img_size[0] * ratio)),
int(math.ceil(img_size[1] * ratio))
]
img = img.resize((new_size[0], new_size[1]), resample)
img = resize_crop(img, size)
img.format = img_format
return img | 0.001704 |
def restore(self, state):
"""Restore this state from the output of a previous call to dump().
Only those properties in this object and listed in state will be
updated. Other properties will not be modified and state may contain
keys that do not correspond with properties in this object.
Args:
state (dict): A serialized representation of this object.
"""
own_properties = set(self.get_properties())
state_properties = set(state)
to_restore = own_properties.intersection(state_properties)
for name in to_restore:
value = state.get(name)
if name in self._complex_properties:
value = self._complex_properties[name][1](value)
setattr(self, name, value) | 0.002497 |
def get_dashboard_version(self, id, version, **kwargs): # noqa: E501
"""Get a specific version of a specific dashboard # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_dashboard_version(id, version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:param int version: (required)
:return: ResponseContainerDashboard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501
else:
(data) = self.get_dashboard_version_with_http_info(id, version, **kwargs) # noqa: E501
return data | 0.00201 |
def make_pkey(key_type=crypto.TYPE_RSA, key_bits=4096):
"""Make a public/private key pair.
:param int key_type: The key type. For example,
:class:`OpenSSL.crypto.TYPE_RSA`.
:param int key_bits: The size of the key in bits.
:return: A private key.
:rtype: :class:`OpenSSL.crypto.PKey`
"""
key = crypto.PKey()
key.generate_key(key_type, key_bits)
return key | 0.013193 |
def graph_multiresolution(G, levels, sparsify=True, sparsify_eps=None,
downsampling_method='largest_eigenvector',
reduction_method='kron', compute_full_eigen=False,
reg_eps=0.005):
r"""Compute a pyramid of graphs (by Kron reduction).
'graph_multiresolution(G,levels)' computes a multiresolution of
graph by repeatedly downsampling and performing graph reduction. The
default downsampling method is the largest eigenvector method based on
the polarity of the components of the eigenvector associated with the
largest graph Laplacian eigenvalue. The default graph reduction method
is Kron reduction followed by a graph sparsification step.
*param* is a structure of optional parameters.
Parameters
----------
G : Graph structure
The graph to reduce.
levels : int
Number of level of decomposition
lambd : float
Stability parameter. It adds self loop to the graph to give the
algorithm some stability (default = 0.025). [UNUSED?!]
sparsify : bool
To perform a spectral sparsification step immediately after
the graph reduction (default is True).
sparsify_eps : float
Parameter epsilon used in the spectral sparsification
(default is min(10/sqrt(G.N),.3)).
downsampling_method: string
The graph downsampling method (default is 'largest_eigenvector').
reduction_method : string
The graph reduction method (default is 'kron')
compute_full_eigen : bool
To also compute the graph Laplacian eigenvalues and eigenvectors
for every graph in the multiresolution sequence (default is False).
reg_eps : float
The regularized graph Laplacian is :math:`\bar{L}=L+\epsilon I`.
A smaller epsilon may lead to better regularization, but will also
require a higher order Chebyshev approximation. (default is 0.005)
Returns
-------
Gs : list
A list of graph layers.
Examples
--------
>>> from pygsp import reduction
>>> levels = 5
>>> G = graphs.Sensor(N=512)
>>> G.compute_fourier_basis()
>>> Gs = reduction.graph_multiresolution(G, levels, sparsify=False)
>>> for idx in range(levels):
... fig, ax = Gs[idx].plot(title='Reduction level: {}'.format(idx))
"""
if sparsify_eps is None:
sparsify_eps = min(10. / np.sqrt(G.N), 0.3)
if compute_full_eigen:
G.compute_fourier_basis()
else:
G.estimate_lmax()
Gs = [G]
Gs[0].mr = {'idx': np.arange(G.N), 'orig_idx': np.arange(G.N)}
for i in range(levels):
if downsampling_method == 'largest_eigenvector':
if Gs[i]._U is not None:
V = Gs[i].U[:, -1]
else:
V = linalg.eigs(Gs[i].L, 1)[1][:, 0]
V *= np.sign(V[0])
ind = np.nonzero(V >= 0)[0]
else:
raise NotImplementedError('Unknown graph downsampling method.')
if reduction_method == 'kron':
Gs.append(kron_reduction(Gs[i], ind))
else:
raise NotImplementedError('Unknown graph reduction method.')
if sparsify and Gs[i+1].N > 2:
Gs[i+1] = graph_sparsify(Gs[i+1], min(max(sparsify_eps, 2. / np.sqrt(Gs[i+1].N)), 1.))
# TODO : Make in place modifications instead!
if compute_full_eigen:
Gs[i+1].compute_fourier_basis()
else:
Gs[i+1].estimate_lmax()
Gs[i+1].mr = {'idx': ind, 'orig_idx': Gs[i].mr['orig_idx'][ind], 'level': i}
L_reg = Gs[i].L + reg_eps * sparse.eye(Gs[i].N)
Gs[i].mr['K_reg'] = kron_reduction(L_reg, ind)
Gs[i].mr['green_kernel'] = filters.Filter(Gs[i], lambda x: 1./(reg_eps + x))
return Gs | 0.001044 |
def send(self, data):
"""
Send data to the child process through.
"""
self.stdin.write(data)
self.stdin.flush() | 0.013245 |
def get_reactions(self):
"""
:calls: `GET /repos/:owner/:repo/issues/:number/reactions <https://developer.github.com/v3/reactions/#list-reactions-for-an-issue>`_
:return: :class: :class:`github.PaginatedList.PaginatedList` of :class:`github.Reaction.Reaction`
"""
return github.PaginatedList.PaginatedList(
github.Reaction.Reaction,
self._requester,
self.url + "/reactions",
None,
headers={'Accept': Consts.mediaTypeReactionsPreview}
) | 0.007366 |
def _variant_vc(checkpoints):
"""Add variant calling to workflow, if configured.
"""
if not checkpoints.get("vc"):
return [], []
vc_wf = [s("get_parallel_regions", "batch-split",
[["batch_rec"]],
[cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc",
disk={"files": 2.0}, cores=1),
s("variantcall_batch_region", "batch-parallel",
[["batch_rec"], ["region_block"]],
[cwlout(["vrn_file_region"], ["File", "null"], [".tbi"]),
cwlout(["region_block"], {"type": "array", "items": "string"})],
"bcbio-vc", ["bcftools", "bedtools", "freebayes=1.1.0.46",
"gatk4", "vqsr_cnn", "deepvariant;env=dv", "sentieon;env=python2",
"htslib", "octopus", "picard", "platypus-variant;env=python2", "pythonpy",
"samtools", "pysam>=0.13.0", "strelka;env=python2", "vardict", "vardict-java",
"varscan", "moreutils", "vcfanno", "vcflib", "vt", "r=3.5.1", "r-base",
"perl"],
disk={"files": 2.0}),
s("concat_batch_variantcalls", "batch-merge",
[["batch_rec"], ["region_block"], ["vrn_file_region"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["bcftools", "htslib", "gatk4"],
disk={"files": 1.5}, cores=1)]
if not checkpoints.get("jointvc"):
vc_wf += [s("postprocess_variants", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout(["vrn_file"], "File", [".tbi"])],
"bcbio-vc", ["snpeff=4.3.1t"], disk={"files": 0.5})]
vc_rec_exclude = [["align_bam"]]
if not checkpoints.get("jointvc"):
vc_rec_exclude.append(["genome_resources", "variation"])
vc_wf += [s("compare_to_rm", "batch-single",
[["batch_rec"], ["vrn_file"]],
[cwlout("vc_rec", "record",
fields=[cwlout(["batch_samples"], ["null", {"type": "array", "items": "string"}]),
cwlout(["validate", "summary"], ["File", "null"]),
cwlout(["validate", "tp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fp"], ["File", "null"], [".tbi"]),
cwlout(["validate", "fn"], ["File", "null"], [".tbi"]),
cwlout("inherit", exclude=vc_rec_exclude)])],
"bcbio-vc", ["bcftools", "bedtools", "pythonpy", "gvcf-regions;env=python2",
"htslib", "rtg-tools", "vcfanno"],
disk={"files": 1.5})]
batch_in = [["analysis"], ["genome_build"], ["align_bam"], ["vrn_file"],
["metadata", "batch"], ["metadata", "phenotype"],
["config", "algorithm", "callable_regions"], ["regions", "sample_callable"],
["config", "algorithm", "variantcaller"],
["config", "algorithm", "ensemble"],
["config", "algorithm", "vcfanno"],
["config", "algorithm", "coverage_interval"],
["config", "algorithm", "effects"],
["config", "algorithm", "min_allele_fraction"],
["config", "algorithm", "exclude_regions"],
["config", "algorithm", "variant_regions"],
["config", "algorithm", "variant_regions_merged"],
["config", "algorithm", "validate"], ["config", "algorithm", "validate_regions"],
["config", "algorithm", "tools_on"],
["config", "algorithm", "tools_off"],
["reference", "fasta", "base"],
["reference", "rtg"], ["reference", "genome_context"],
["genome_resources", "variation", "clinvar"],
["genome_resources", "variation", "cosmic"], ["genome_resources", "variation", "dbsnp"],
["genome_resources", "variation", "esp"], ["genome_resources", "variation", "exac"],
["genome_resources", "variation", "gnomad_exome"],
["genome_resources", "variation", "1000g"],
["genome_resources", "variation", "lcr"], ["genome_resources", "variation", "polyx"],
["genome_resources", "variation", "encode_blacklist"],
["genome_resources", "aliases", "ensembl"], ["genome_resources", "aliases", "human"],
["genome_resources", "aliases", "snpeff"], ["reference", "snpeff", "genome_build"]]
if checkpoints.get("umi"):
batch_in.append(["config", "algorithm", "umi_type"])
if checkpoints.get("rnaseq"):
batch_in += [["genome_resources", "variation", "editing"]]
else:
batch_in += [["genome_resources", "variation", "train_hapmap"],
["genome_resources", "variation", "train_indels"]]
vc = [s("batch_for_variantcall", "multi-batch", batch_in,
[cwlout("batch_rec", "record",
fields=[cwlout(["config", "algorithm", "variantcaller_order"], "int"),
cwlout("inherit")])],
"bcbio-vc",
disk={"files": 2.0}, cores=1,
unlist=[["config", "algorithm", "variantcaller"]], no_files=True),
w("variantcall", "multi-parallel", vc_wf,
[["region"], ["region_block"], ["vrn_file_region"], ["vrn_file"], ["validate", "summary"]])]
if checkpoints.get("jointvc"):
vc += _variant_jointvc()
if checkpoints.get("ensemble"):
vc += _variant_ensemble(checkpoints)
summarize_in = [["jointvc_rec" if checkpoints.get("jointvc") else "vc_rec"]]
if checkpoints.get("ensemble"):
summarize_in += [["ensemble_rec"]]
vc += [s("summarize_vc", "multi-combined", summarize_in,
[cwlout(["variants", "calls"], {"type": "array", "items": ["File", "null"]}),
cwlout(["variants", "gvcf"], ["null", {"type": "array", "items": ["File", "null"]}]),
cwlout(["variants", "samples"], {"type": "array", "items": {"type": "array",
"items": ["File", "null"]}}),
cwlout(["validate", "grading_summary"], ["File", "null"]),
cwlout(["validate", "grading_plots"], {"type": "array", "items": ["File", "null"]})],
"bcbio-vc",
disk={"files": 2.0}, cores=1)]
return vc, [["validate", "grading_summary"], ["variants", "calls"], ["variants", "gvcf"]] | 0.004369 |
def ellipticity2phi_q(e1, e2):
"""
:param e1:
:param e2:
:return:
"""
phi = np.arctan2(e2, e1)/2
c = np.sqrt(e1**2+e2**2)
if c > 0.999:
c = 0.999
q = (1-c)/(1+c)
return phi, q | 0.004484 |
def contains(x):
"""Return true if this string or integer tuple appears in tables"""
if isinstance(x, str):
x = canonical_name(x)
return x in _TO_COLOR_USER or x in _TO_COLOR
else:
x = tuple(x)
return x in _TO_NAME_USER or x in _TO_NAME | 0.003571 |
def get_t_periastron(self, params):
"""
Return the time of periastron passage (calculated using `params.t0`).
"""
phase = self._get_phase(params, "primary")
return params.t0 - params.per*phase | 0.034653 |
def uniprot_ec(uniprot_id):
"""Retrieve the EC number annotation for a UniProt ID.
Args:
uniprot_id: Valid UniProt ID
Returns:
"""
r = requests.post('http://www.uniprot.org/uniprot/?query=%s&columns=ec&format=tab' % uniprot_id)
ec = r.content.decode('utf-8').splitlines()[1]
if len(ec) == 0:
ec = None
return ec | 0.005495 |
def initiate(self, transport, to = None):
"""Initiate an XMPP connection over the `transport`.
:Parameters:
- `transport`: an XMPP transport instance
- `to`: peer name (defaults to own jid domain part)
"""
if to is None:
to = JID(self.me.domain)
return StreamBase.initiate(self, transport, to) | 0.010811 |
def run(align_bams, items, ref_file, assoc_files, region=None, out_file=None):
"""Run tumor only smCounter2 calling.
"""
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and not paired.normal_bam, ("smCounter2 supports tumor-only variant calling: %s" %
(",".join([dd.get_sample_name(d) for d in items])))
vrs = bedutils.population_variant_regions(items)
target = shared.subset_variant_regions(vrs, region,
out_file, items=items, do_merge=True)
out_file = out_file.replace(".vcf.gz", ".vcf")
out_prefix = utils.splitext_plus(os.path.basename(out_file))[0]
if not utils.file_exists(out_file) and not utils.file_exists(out_file + ".gz"):
with file_transaction(paired.tumor_data, out_file) as tx_out_file:
cmd = ["smCounter2", "--runPath", os.path.dirname(tx_out_file),
"--outPrefix", out_prefix,
"--bedTarget", target, "--refGenome", ref_file,
"--bamFile", paired.tumor_bam, "--bamType", "consensus",
"--nCPU", dd.get_num_cores(paired.tumor_data)]
do.run(cmd, "smcounter2 variant calling")
for fname in glob.glob(os.path.join(os.path.dirname(tx_out_file), "*.smCounter*")):
shutil.move(fname, os.path.join(os.path.dirname(out_file), os.path.basename(fname)))
utils.symlink_plus(os.path.join(os.path.dirname(out_file),
"%s.smCounter.cut.vcf" % out_prefix),
out_file)
return vcfutils.bgzip_and_index(out_file, paired.tumor_data["config"], remove_orig=False,
prep_cmd="sed 's#FORMAT\t%s#FORMAT\t%s#' | %s" %
(out_prefix, dd.get_sample_name(paired.tumor_data),
vcfutils.add_contig_to_header_cl(dd.get_ref_file(paired.tumor_data), out_file))) | 0.006455 |
def logging_file_install(path):
"""
Install logger that will write to file. If this function has already installed a handler, replace it.
:param path: path to the log file, Use None for default file location.
"""
if path is None:
path = configuration_get_default_folder() / LOGGING_DEFAULTNAME
if not path.parent.exists():
log.error('File logger installation FAILED!')
log.error('The directory of the log file does not exist.')
return
formatter = logging.Formatter(LOGGING_FORMAT)
logger = logging.getLogger()
logger.removeHandler(LOGGING_HANDLERS['file'])
logFileHandler = logging.handlers.RotatingFileHandler(filename=str(path),
mode='a',
maxBytes=LOGGING_MAXBYTES,
backupCount=LOGGING_BACKUPCOUNT)
logFileHandler.setLevel(logging.DEBUG)
logFileHandler.setFormatter(formatter)
LOGGING_HANDLERS['file'] = logFileHandler
logger.addHandler(logFileHandler) | 0.003565 |
def find_clusters(struct, connected_list):
"""
Finds bonded clusters of atoms in the structure with periodic boundary conditions.
If there are atoms that are not bonded to anything, returns [0,1,0].(For faster computation time in FindDimension())
Args:
struct (Structure): Input structure
connected_list: Must be made from the same structure with FindConnected() function.
An array of shape (number of bonded pairs, 2); each row of is of the form [atomi, atomj].
Returns:
max_cluster: the size of the largest cluster in the crystal structure
min_cluster: the size of the smallest cluster in the crystal structure
clusters: list of bonded clusters found here, clusters are formatted as sets of indices of atoms
"""
n_atoms = len(struct.species)
if len(np.unique(connected_list)) != n_atoms:
return [0, 1, 0]
if n_atoms == 0:
return [0, 0, 0]
cluster_sizes = []
clusters = []
for atom in range(n_atoms):
connected_inds = np.where(connected_list == atom)[0]
atom_cluster = np.unique(connected_list[connected_inds])
atom_cluster = set(atom_cluster)
if len(clusters) == 0:
new_clusters = [atom_cluster]
new_cluster_sizes = [len(atom_cluster)]
else:
clusters_w_atom = [atom_cluster]
clusters_noatom = []
clusters_noatom_sizes = []
for cluster in clusters:
if len(cluster.intersection(atom_cluster)) > 0:
clusters_w_atom.append(cluster)
else:
clusters_noatom.append(cluster)
clusters_noatom_sizes.append(len(cluster))
if len(clusters_w_atom) > 1:
clusters_w_atom = [set.union(*clusters_w_atom)]
new_clusters = clusters_noatom + clusters_w_atom
new_cluster_sizes = clusters_noatom_sizes + [len(clusters_w_atom[0])]
clusters = list(new_clusters)
cluster_sizes = list(new_cluster_sizes)
if n_atoms in cluster_sizes:
break
max_cluster = max(cluster_sizes)
min_cluster = min(cluster_sizes)
return [max_cluster, min_cluster, clusters] | 0.003121 |
def username(self, value=None):
"""
Return or set the username
:param string value: the new username to use
:returns: string or new :class:`URL` instance
"""
if value is not None:
return URL._mutate(self, username=value)
return unicode_unquote(self._tuple.username) | 0.005988 |
def nutation(date, eop_correction=True, terms=106): # pragma: no cover
"""Nutation as a rotation matrix
"""
epsilon_bar, delta_psi, delta_eps = np.deg2rad(_nutation(date, eop_correction, terms))
epsilon = epsilon_bar + delta_eps
return rot1(-epsilon_bar) @ rot3(delta_psi) @ rot1(epsilon) | 0.006452 |
def use_federated_family_view(self):
"""Pass through to provider RelationshipLookupSession.use_federated_family_view"""
self._family_view = FEDERATED
# self._get_provider_session('relationship_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_family_view()
except AttributeError:
pass | 0.008889 |
def create_ssl_context():
"""Create and return SSL Context."""
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
return ssl_context | 0.007634 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'text') and self.text is not None:
_dict['text'] = self.text
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict | 0.004396 |
def _CheckDatabaseEncoding(cursor):
"""Enforces a sane UTF-8 encoding for the database."""
cur_character_set = _ReadVariable("character_set_database", cursor)
if cur_character_set != CHARACTER_SET:
raise EncodingEnforcementError(
"Require MySQL character_set_database of {}, got {}."
" To create your database, use: {}".format(CHARACTER_SET,
cur_character_set,
CREATE_DATABASE_QUERY)) | 0.007813 |
def start(self, blocking=False):
"""
Start the interface
:param blocking: Should the call block until stop() is called
(default: False)
:type blocking: bool
:rtype: None
:raises SensorStartException: Failed to start
"""
self.debug("()")
super(SensorClient, self).start(blocking=False)
try:
a_thread = threading.Thread(
target=self._thread_wrapper,
args=(self._packet_loop,)
)
a_thread.daemon = True
a_thread.start()
except:
self.exception("Failed to run packet loop")
raise SensorStartException("Packet loop failed")
self.info("Started")
# Blocking - call StartStopable.start
super(Sensor, self).start(blocking) | 0.003571 |
def outfiles(fmt, nfiles, progress, leave=True):
"""Get output file paths.
Parameters
----------
fmt : `str`
File path format string.
nfiles : `int`
Number of files.
progress : `bool`
Show progress bars.
leave : `bool`, optional
Leave progress bar (default: True).
Raises
------
ValueError
If nfiles <= 0.
Returns
-------
`generator` of `str`
Output file paths.
"""
if nfiles > 1:
fnames = (fmt % i for i in range(nfiles))
elif nfiles == 1:
fnames = (fmt,)
else:
raise ValueError('output file count <= 0')
if progress:
fnames = tqdm(fnames, total=nfiles,
desc='Generating', unit='file',
bar_format=BAR_FORMAT,
leave=leave, dynamic_ncols=True)
yield fnames
if progress:
fnames.close() | 0.001083 |
def raw_corpus_length_ratio(hypotheses: Iterable[str], references: Iterable[str]) -> float:
"""
Simple wrapper around length ratio implementation.
:param hypotheses: Hypotheses stream.
:param references: Reference stream.
:return: Length ratio score as float.
"""
ratios = [len(h.split())/len(r.split()) for h, r in zip(hypotheses, references)]
return sum(ratios)/len(ratios) if len(ratios) else 0.0 | 0.006944 |
def select_layout(self, layout_type):
"""
Select one of the predefined layouts.
"""
assert layout_type in LayoutTypes._ALL
# When there is only one pane, always choose EVEN_HORIZONTAL,
# Otherwise, we create VSplit/HSplit instances with an empty list of
# children.
if len(self.panes) == 1:
layout_type = LayoutTypes.EVEN_HORIZONTAL
# even-horizontal.
if layout_type == LayoutTypes.EVEN_HORIZONTAL:
self.root = HSplit(self.panes)
# even-vertical.
elif layout_type == LayoutTypes.EVEN_VERTICAL:
self.root = VSplit(self.panes)
# main-horizontal.
elif layout_type == LayoutTypes.MAIN_HORIZONTAL:
self.root = HSplit([
self.active_pane,
VSplit([p for p in self.panes if p != self.active_pane])
])
# main-vertical.
elif layout_type == LayoutTypes.MAIN_VERTICAL:
self.root = VSplit([
self.active_pane,
HSplit([p for p in self.panes if p != self.active_pane])
])
# tiled.
elif layout_type == LayoutTypes.TILED:
panes = self.panes
column_count = math.ceil(len(panes) ** .5)
rows = HSplit()
current_row = VSplit()
for p in panes:
current_row.append(p)
if len(current_row) >= column_count:
rows.append(current_row)
current_row = VSplit()
if current_row:
rows.append(current_row)
self.root = rows
self.previous_selected_layout = layout_type | 0.001169 |
def form_to_params(fn=None, return_json=True):
"""
Convert bottle forms request to parameters for the wrapped function.
Args:
return_json (bool, default True): Should the decorator automatically
convert returned value to JSON?
"""
def forms_to_params_decorator(fn):
@handle_type_error
@wraps(fn)
def forms_to_params_wrapper(*args, **kwargs):
kwargs.update(
dict(request.forms)
)
if not return_json:
return fn(*args, **kwargs)
return encode_json_body(
fn(*args, **kwargs)
)
return forms_to_params_wrapper
if fn: # python decorator with optional parameters bukkake
return forms_to_params_decorator(fn)
return forms_to_params_decorator | 0.001186 |
def start(self):
"""Start the pool of threads."""
for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName("CP Server " + worker.getName())
worker.start()
for worker in self._threads:
while not worker.ready:
time.sleep(.1) | 0.005277 |
def _configure_key_pair(config):
"""Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
"""
if "ssh_private_key" in config["auth"]:
return config
ssh_user = config["auth"]["ssh_user"]
project = compute.projects().get(
project=config["provider"]["project_id"]).execute()
# Key pairs associated with project meta data. The key pairs are general,
# and not just ssh keys.
ssh_keys_str = next(
(item for item in project["commonInstanceMetadata"].get("items", [])
if item["key"] == "ssh-keys"), {}).get("value", "")
ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else []
# Try a few times to get or create a good key pair.
key_found = False
for i in range(10):
key_name = key_pair_name(i, config["provider"]["region"],
config["provider"]["project_id"], ssh_user)
public_key_path, private_key_path = key_pair_paths(key_name)
for ssh_key in ssh_keys:
key_parts = ssh_key.split(" ")
if len(key_parts) != 3:
continue
if key_parts[2] == ssh_user and os.path.exists(private_key_path):
# Found a key
key_found = True
break
# Create a key since it doesn't exist locally or in GCP
if not key_found and not os.path.exists(private_key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
public_key, private_key = generate_rsa_key_pair()
_create_project_ssh_key_pair(project, public_key, ssh_user)
with open(private_key_path, "w") as f:
f.write(private_key)
os.chmod(private_key_path, 0o600)
with open(public_key_path, "w") as f:
f.write(public_key)
key_found = True
break
if key_found:
break
assert key_found, "SSH keypair for user {} not found for {}".format(
ssh_user, private_key_path)
assert os.path.exists(private_key_path), (
"Private key file {} not found for user {}"
"".format(private_key_path, ssh_user))
logger.info("_configure_key_pair: "
"Private key not specified in config, using"
"{}".format(private_key_path))
config["auth"]["ssh_private_key"] = private_key_path
return config | 0.000356 |
def add_ruleclause_name(self, ns_name, rid) -> bool:
"""Create a tree.Rule"""
ns_name.parser_tree = parsing.Rule(self.value(rid))
return True | 0.006536 |
def _to_rfc822(date):
"""_to_rfc822(datetime.datetime) -> str
The datetime `strftime` method is subject to locale-specific
day and month names, so this function hardcodes the conversion."""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
days = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
fmt = '{day}, {d:02} {month} {y:04} {h:02}:{m:02}:{s:02} GMT'
return fmt.format(
day=days[date.weekday()],
d=date.day,
month=months[date.month - 1],
y=date.year,
h=date.hour,
m=date.minute,
s=date.second,
) | 0.001541 |
def on_pbnBack_released(self):
"""Handle the Back button release.
.. note:: This is an automatic Qt slot
executed when the Back button is released.
"""
current_step = self.get_current_step()
if current_step.step_type == STEP_FC:
new_step = self.impact_function_steps.pop()
elif current_step.step_type == STEP_KW:
try:
new_step = self.keyword_steps.pop()
except IndexError:
new_step = self.impact_function_steps.pop()
else:
raise InvalidWizardStep
# set focus to table widgets, as the inactive selection style is gray
if new_step == self.step_fc_functions1:
self.step_fc_functions1.tblFunctions1.setFocus()
if new_step == self.step_fc_functions2:
self.step_fc_functions2.tblFunctions2.setFocus()
# Re-connect disconnected signals when coming back to the Extent step
if new_step == self.step_fc_extent:
self.step_fc_extent.set_widgets()
# Set Next button label
self.pbnNext.setText(tr('Next'))
self.pbnNext.setEnabled(True)
self.go_to_step(new_step) | 0.001661 |
def get_context_data(self, **kwargs):
"""
Add error and pattern in context.
"""
context = super(BaseEntrySearch, self).get_context_data(**kwargs)
context.update({'error': self.error, 'pattern': self.pattern})
return context | 0.00738 |
def _escape_string(text, _map={}):
"""
Escape the given bytestring for safe use as a LLVM array constant.
"""
if isinstance(text, str):
text = text.encode('ascii')
assert isinstance(text, (bytes, bytearray))
if not _map:
for ch in range(256):
if ch in _VALID_CHARS:
_map[ch] = chr(ch)
else:
_map[ch] = '\\%02x' % ch
if six.PY2:
_map[chr(ch)] = _map[ch]
buf = [_map[ch] for ch in text]
return ''.join(buf) | 0.001859 |
def challenge():
"""Creates an enum for contest type"""
enums = dict(
ACTIVE="active",
UPCOMING="upcoming",
HIRING="hiring",
ALL="all",
SHORT="short",
)
return type('Enum', (), enums) | 0.018868 |
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return {}
value = value or '{}'
if isinstance(value, six.binary_type):
value = six.text_type(value, 'utf-8')
if isinstance(value, six.string_types):
try:
# with django 1.6 i have '"{}"' as default value here
if value[0] == value[-1] == '"':
value = value[1:-1]
return json.loads(value)
except Exception as err:
raise ValidationError(str(err))
else:
return value | 0.002584 |
def filter_genes_dispersion(data,
flavor='seurat',
min_disp=None, max_disp=None,
min_mean=None, max_mean=None,
n_bins=20,
n_top_genes=None,
log=True,
subset=True,
copy=False):
"""Extract highly variable genes [Satija15]_ [Zheng17]_.
.. warning::
.. deprecated:: 1.3.6
Use :func:`~scanpy.api.pp.highly_variable_genes`
instead. The new function is equivalent to the present
function, except that
* the new function always expects logarithmized data
* `subset=False` in the new function, it suffices to
merely annotate the genes, tools like `pp.pca` will
detect the annotation
* you can now call: `sc.pl.highly_variable_genes(adata)`
* `copy` is replaced by `inplace`
If trying out parameters, pass the data matrix instead of AnnData.
Depending on `flavor`, this reproduces the R-implementations of Seurat
[Satija15]_ and Cell Ranger [Zheng17]_.
The normalized dispersion is obtained by scaling with the mean and standard
deviation of the dispersions for genes falling into a given bin for mean
expression of genes. This means that for each bin of mean expression, highly
variable genes are selected.
Use `flavor='cell_ranger'` with care and in the same way as in
:func:`~scanpy.api.pp.recipe_zheng17`.
Parameters
----------
data : :class:`~anndata.AnnData`, `np.ndarray`, `sp.sparse`
The (annotated) data matrix of shape `n_obs` × `n_vars`. Rows correspond
to cells and columns to genes.
flavor : {'seurat', 'cell_ranger'}, optional (default: 'seurat')
Choose the flavor for computing normalized dispersion. If choosing
'seurat', this expects non-logarithmized data - the logarithm of mean
and dispersion is taken internally when `log` is at its default value
`True`. For 'cell_ranger', this is usually called for logarithmized data
- in this case you should set `log` to `False`. In their default
workflows, Seurat passes the cutoffs whereas Cell Ranger passes
`n_top_genes`.
min_mean=0.0125, max_mean=3, min_disp=0.5, max_disp=`None` : `float`, optional
If `n_top_genes` unequals `None`, these cutoffs for the means and the
normalized dispersions are ignored.
n_bins : `int` (default: 20)
Number of bins for binning the mean gene expression. Normalization is
done with respect to each bin. If just a single gene falls into a bin,
the normalized dispersion is artificially set to 1. You'll be informed
about this if you set `settings.verbosity = 4`.
n_top_genes : `int` or `None` (default: `None`)
Number of highly-variable genes to keep.
log : `bool`, optional (default: `True`)
Use the logarithm of the mean to variance ratio.
subset : `bool`, optional (default: `True`)
Keep highly-variable genes only (if True) else write a bool array for h
ighly-variable genes while keeping all genes
copy : `bool`, optional (default: `False`)
If an :class:`~anndata.AnnData` is passed, determines whether a copy
is returned.
Returns
-------
If an AnnData `adata` is passed, returns or updates `adata` depending on
`copy`. It filters the `adata` and adds the annotations
**means** : adata.var
Means per gene. Logarithmized when `log` is `True`.
**dispersions** : adata.var
Dispersions per gene. Logarithmized when `log` is `True`.
**dispersions_norm** : adata.var
Normalized dispersions per gene. Logarithmized when `log` is `True`.
If a data matrix `X` is passed, the annotation is returned as `np.recarray`
with the same information stored in fields: `gene_subset`, `means`, `dispersions`, `dispersion_norm`.
"""
if n_top_genes is not None and not all([
min_disp is None, max_disp is None, min_mean is None, max_mean is None]):
logg.info('If you pass `n_top_genes`, all cutoffs are ignored.')
if min_disp is None: min_disp = 0.5
if min_mean is None: min_mean = 0.0125
if max_mean is None: max_mean = 3
if isinstance(data, AnnData):
adata = data.copy() if copy else data
result = filter_genes_dispersion(adata.X, log=log,
min_disp=min_disp, max_disp=max_disp,
min_mean=min_mean, max_mean=max_mean,
n_top_genes=n_top_genes,
flavor=flavor)
adata.var['means'] = result['means']
adata.var['dispersions'] = result['dispersions']
adata.var['dispersions_norm'] = result['dispersions_norm']
if subset:
adata._inplace_subset_var(result['gene_subset'])
else:
adata.var['highly_variable'] = result['gene_subset']
return adata if copy else None
logg.msg('extracting highly variable genes',
r=True, v=4)
X = data # no copy necessary, X remains unchanged in the following
mean, var = materialize_as_ndarray(_get_mean_var(X))
# now actually compute the dispersion
mean[mean == 0] = 1e-12 # set entries equal to zero to small value
dispersion = var / mean
if log: # logarithmized mean as in Seurat
dispersion[dispersion == 0] = np.nan
dispersion = np.log(dispersion)
mean = np.log1p(mean)
# all of the following quantities are "per-gene" here
import pandas as pd
df = pd.DataFrame()
df['mean'] = mean
df['dispersion'] = dispersion
if flavor == 'seurat':
df['mean_bin'] = pd.cut(df['mean'], bins=n_bins)
disp_grouped = df.groupby('mean_bin')['dispersion']
disp_mean_bin = disp_grouped.mean()
disp_std_bin = disp_grouped.std(ddof=1)
# retrieve those genes that have nan std, these are the ones where
# only a single gene fell in the bin and implicitly set them to have
# a normalized disperion of 1
one_gene_per_bin = disp_std_bin.isnull()
gen_indices = np.where(one_gene_per_bin[df['mean_bin'].values])[0].tolist()
if len(gen_indices) > 0:
logg.msg(
'Gene indices {} fell into a single bin: their '
'normalized dispersion was set to 1.\n '
'Decreasing `n_bins` will likely avoid this effect.'
.format(gen_indices), v=4)
# Circumvent pandas 0.23 bug. Both sides of the assignment have dtype==float32,
# but there’s still a dtype error without “.value”.
disp_std_bin[one_gene_per_bin] = disp_mean_bin[one_gene_per_bin.values].values
disp_mean_bin[one_gene_per_bin] = 0
# actually do the normalization
df['dispersion_norm'] = (df['dispersion'].values # use values here as index differs
- disp_mean_bin[df['mean_bin'].values].values) \
/ disp_std_bin[df['mean_bin'].values].values
elif flavor == 'cell_ranger':
from statsmodels import robust
df['mean_bin'] = pd.cut(df['mean'], np.r_[-np.inf,
np.percentile(df['mean'], np.arange(10, 105, 5)), np.inf])
disp_grouped = df.groupby('mean_bin')['dispersion']
disp_median_bin = disp_grouped.median()
# the next line raises the warning: "Mean of empty slice"
with warnings.catch_warnings():
warnings.simplefilter('ignore')
disp_mad_bin = disp_grouped.apply(robust.mad)
df['dispersion_norm'] = np.abs((df['dispersion'].values
- disp_median_bin[df['mean_bin'].values].values)) \
/ disp_mad_bin[df['mean_bin'].values].values
else:
raise ValueError('`flavor` needs to be "seurat" or "cell_ranger"')
dispersion_norm = df['dispersion_norm'].values.astype('float32')
if n_top_genes is not None:
dispersion_norm = dispersion_norm[~np.isnan(dispersion_norm)]
dispersion_norm[::-1].sort() # interestingly, np.argpartition is slightly slower
disp_cut_off = dispersion_norm[n_top_genes-1]
gene_subset = df['dispersion_norm'].values >= disp_cut_off
logg.msg('the {} top genes correspond to a normalized dispersion cutoff of'
.format(n_top_genes, disp_cut_off), v=5)
else:
max_disp = np.inf if max_disp is None else max_disp
dispersion_norm[np.isnan(dispersion_norm)] = 0 # similar to Seurat
gene_subset = np.logical_and.reduce((mean > min_mean, mean < max_mean,
dispersion_norm > min_disp,
dispersion_norm < max_disp))
logg.msg(' finished', time=True, v=4)
return np.rec.fromarrays((gene_subset,
df['mean'].values,
df['dispersion'].values,
df['dispersion_norm'].values.astype('float32', copy=False)),
dtype=[('gene_subset', bool),
('means', 'float32'),
('dispersions', 'float32'),
('dispersions_norm', 'float32')]) | 0.00262 |
def tsort(self):
"""Given a partial ordering, return a totally ordered list.
part is a dict of partial orderings. Each value is a set,
which the key depends on.
The return value is a list of sets, each of which has only
dependencies on items in previous entries in the list.
raise ValueError if ordering is not possible (check for circular or missing dependencies)"""
task_dict = {}
for key, task in self.tasks.iteritems():
task_dict[task] = task.dependencies
# parts = parts.copy()
parts = task_dict.copy()
result = []
while True:
level = set([name for name, deps in parts.iteritems() if not deps])
if not level:
break
result.append(level)
parts = dict([(name, deps - level) for name, deps in parts.iteritems() if name not in level])
if parts:
raise ValueError('total ordering not possible (check for circular or missing dependencies)')
return result | 0.00473 |
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(['stp', shape_id, str(i)]) for i in range(2)] | 0.005747 |
def get_data_home(data_home=None):
"""
Return the path of the revrand data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'revrand_data'
in the user home folder.
Alternatively, it can be set by the 'REVRAND_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
data_home_default = Path(__file__).ancestor(3).child('demos',
'_revrand_data')
if data_home is None:
data_home = os.environ.get('REVRAND_DATA', data_home_default)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home | 0.001124 |
def encrypt_file(file_path, sender, recipients):
"Returns encrypted binary file content if successful"
for recipient_key in recipients:
crypto.assert_type_and_length('recipient_key', recipient_key, (str, crypto.UserLock))
crypto.assert_type_and_length("sender_key", sender, crypto.UserLock)
if (not os.path.exists(file_path)) or (not os.path.isfile(file_path)):
raise OSError("Specified path does not point to a valid file: {}".format(file_path))
_, filename = os.path.split(file_path)
with open(file_path, "rb") as I:
crypted = crypto.MiniLockFile.new(filename, I.read(), sender, recipients)
return crypted.contents | 0.007485 |
def viterbi_tags(self,
logits: torch.Tensor,
mask: torch.Tensor) -> List[Tuple[List[int], float]]:
"""
Uses viterbi algorithm to find most likely tags for the given inputs.
If constraints are applied, disallows all other transitions.
"""
_, max_seq_length, num_tags = logits.size()
# Get the tensors out of the variables
logits, mask = logits.data, mask.data
# Augment transitions matrix with start and end transitions
start_tag = num_tags
end_tag = num_tags + 1
transitions = torch.Tensor(num_tags + 2, num_tags + 2).fill_(-10000.)
# Apply transition constraints
constrained_transitions = (
self.transitions * self._constraint_mask[:num_tags, :num_tags] +
-10000.0 * (1 - self._constraint_mask[:num_tags, :num_tags])
)
transitions[:num_tags, :num_tags] = constrained_transitions.data
if self.include_start_end_transitions:
transitions[start_tag, :num_tags] = (
self.start_transitions.detach() * self._constraint_mask[start_tag, :num_tags].data +
-10000.0 * (1 - self._constraint_mask[start_tag, :num_tags].detach())
)
transitions[:num_tags, end_tag] = (
self.end_transitions.detach() * self._constraint_mask[:num_tags, end_tag].data +
-10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
)
else:
transitions[start_tag, :num_tags] = (-10000.0 *
(1 - self._constraint_mask[start_tag, :num_tags].detach()))
transitions[:num_tags, end_tag] = -10000.0 * (1 - self._constraint_mask[:num_tags, end_tag].detach())
best_paths = []
# Pad the max sequence length by 2 to account for start_tag + end_tag.
tag_sequence = torch.Tensor(max_seq_length + 2, num_tags + 2)
for prediction, prediction_mask in zip(logits, mask):
sequence_length = torch.sum(prediction_mask)
# Start with everything totally unlikely
tag_sequence.fill_(-10000.)
# At timestep 0 we must have the START_TAG
tag_sequence[0, start_tag] = 0.
# At steps 1, ..., sequence_length we just use the incoming prediction
tag_sequence[1:(sequence_length + 1), :num_tags] = prediction[:sequence_length]
# And at the last timestep we must have the END_TAG
tag_sequence[sequence_length + 1, end_tag] = 0.
# We pass the tags and the transitions to ``viterbi_decode``.
viterbi_path, viterbi_score = util.viterbi_decode(tag_sequence[:(sequence_length + 2)], transitions)
# Get rid of START and END sentinels and append.
viterbi_path = viterbi_path[1:-1]
best_paths.append((viterbi_path, viterbi_score.item()))
return best_paths | 0.004648 |
def get_managers(self):
"""Get managers for the slave environments.
"""
if self._single_env:
return None
if not hasattr(self, '_managers'):
self._managers = self.env.get_slave_managers()
return self._managers | 0.007353 |
def __EncodedAttribute_generic_encode_rgb24(self, rgb24, width=0, height=0, quality=0, format=_ImageFormat.RawImage):
"""Internal usage only"""
if not is_seq(rgb24):
raise TypeError("Expected sequence (str, numpy.ndarray, list, tuple "
"or bytearray) as first argument")
is_str = is_pure_str(rgb24)
if is_str:
if not width or not height:
raise ValueError("When giving a string as data, you must also "
"supply width and height")
if np and isinstance(rgb24, np.ndarray):
if rgb24.ndim != 3:
if not width or not height:
raise ValueError("When giving a non 2D numpy array, width and "
"height must be supplied")
if rgb24.nbytes / 3 != width * height:
raise ValueError("numpy array size mismatch")
else:
if rgb24.itemsize != 1:
raise TypeError("Expected numpy array with itemsize == 1")
if not rgb24.flags.c_contiguous:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not rgb24.flags.aligned:
raise TypeError("Currently, only contiguous, aligned numpy arrays "
"are supported")
if not is_str and (not width or not height):
height = len(rgb24)
if height < 1:
raise IndexError("Expected sequence with at least one row")
row0 = rgb24[0]
if not is_seq(row0):
raise IndexError("Expected sequence (str, numpy.ndarray, list, tuple or "
"bytearray) inside a sequence")
width = len(row0)
if is_pure_str(row0) or type(row0) == bytearray:
width /= 3
if format == _ImageFormat.RawImage:
self._encode_rgb24(rgb24, width, height)
elif format == _ImageFormat.JpegImage:
self._encode_jpeg_rgb24(rgb24, width, height, quality) | 0.001485 |
def percentile(data, n):
"""Return the n-th percentile of the given data
Assume that the data are already sorted
"""
size = len(data)
idx = (n / 100.0) * size - 0.5
if idx < 0 or idx > size:
raise StatisticsError("Too few data points ({}) for {}th percentile".format(size, n))
return data[int(idx)] | 0.0059 |
def render_widgets(kb_app: kb,
sphinx_app: Sphinx,
doctree: doctree,
fromdocname: str,
):
""" Go through docs and replace widget directive with rendering """
builder: StandaloneHTMLBuilder = sphinx_app.builder
for node in doctree.traverse(widget):
# Render the output
w = sphinx_app.env.widgets.get(node.name)
context = builder.globalcontext.copy()
# Add in certain globals
context['resources'] = sphinx_app.env.resources
context['references'] = sphinx_app.env.references
output = w.render(sphinx_app, context)
# Put the output into the node contents
listing = [nodes.raw('', output, format='html')]
node.replace_self(listing) | 0.001253 |
def letter():
'''Parse a letter in alphabet.'''
@Parser
def letter_parser(text, index=0):
if index < len(text) and text[index].isalpha():
return Value.success(index + 1, text[index])
else:
return Value.failure(index, 'a letter')
return letter_parser | 0.003279 |
def load_into_collection_from_stream(collection, stream, content_type):
"""
Loads resources from the given resource data stream (of the specified MIME
content type) into the given collection resource.
"""
rpr = as_representer(collection, content_type)
with stream:
data_el = rpr.data_from_stream(stream)
rpr.resource_from_data(data_el, resource=collection) | 0.002551 |
def on_chat(self, data):
''' Transfert a message to everybody '''
# XXX: we cannot use on_message as it's 'official' one already used
# by sockjsroom to create multiple on_* elements (like on_chat),
# so we use on_chat instead of on_message
# data => message
if self.roomId != '-1':
self.publishToRoom(self.roomId, 'chat', {
'username': self.username,
'time': datetime.now(),
'message': str(data['message'])
}) | 0.003766 |
def disassemble_hex(bytecode, pc=0, fork=DEFAULT_FORK):
""" Disassemble an EVM bytecode
:param bytecode: canonical representation of an evm bytecode (hexadecimal)
:type bytecode: str
:param pc: program counter of the first instruction(optional)
:type pc: int
:param fork: fork name (optional)
:type fork: str
:return: the text representation of the assembler code
:rtype: str
Example use::
>>> disassemble_hex("0x6060604052600261010")
...
PUSH1 0x60
BLOCKHASH
MSTORE
PUSH1 0x2
PUSH2 0x100
"""
if bytecode.startswith('0x'):
bytecode = bytecode[2:]
bytecode = unhexlify(bytecode)
return disassemble(bytecode, pc=pc, fork=fork) | 0.002463 |
def _update_plotting_params(self, **kwargs):
"""Some plotting parameters can be changed through the tool; this
updataes those plotting parameters.
"""
scalars = kwargs.get('scalars', None)
if scalars is not None:
old = self.display_params['scalars']
self.display_params['scalars'] = scalars
if old != scalars:
self.plotter.subplot(*self.loc)
self.plotter.remove_actor(self._data_to_update, reset_camera=False)
self._need_to_update = True
self.valid_range = self.input_dataset.get_data_range(scalars)
# self.display_params['rng'] = self.valid_range
cmap = kwargs.get('cmap', None)
if cmap is not None:
self.display_params['cmap'] = cmap | 0.003663 |
def ilumin(method, target, et, fixref, abcorr, obsrvr, spoint):
"""
Find the illumination angles (phase, solar incidence, and
emission) at a specified surface point of a target body.
This routine supersedes illum.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ilumin_c.html
:param method: Computation method.
:type method: str
:param target: Name of target body.
:type target: str
:param et: Epoch in ephemeris seconds past J2000.
:type et: float
:param fixref: Body-fixed, body-centered target body frame.
:type fixref: str
:param abcorr: Desired aberration correction.
:type abcorr: str
:param obsrvr: Name of observing body.
:type obsrvr: str
:param spoint: Body-fixed coordinates of a target surface point.
:type spoint: 3-Element Array of floats
:return: Target surface point epoch, Vector from observer to target
surface point, Phase angle, Solar incidence angle, and Emission
angle at the surface point.
:rtype: tuple
"""
method = stypes.stringToCharP(method)
target = stypes.stringToCharP(target)
et = ctypes.c_double(et)
fixref = stypes.stringToCharP(fixref)
abcorr = stypes.stringToCharP(abcorr)
obsrvr = stypes.stringToCharP(obsrvr)
spoint = stypes.toDoubleVector(spoint)
trgepc = ctypes.c_double(0)
srfvec = stypes.emptyDoubleVector(3)
phase = ctypes.c_double(0)
solar = ctypes.c_double(0)
emissn = ctypes.c_double(0)
libspice.ilumin_c(method, target, et, fixref, abcorr, obsrvr, spoint,
ctypes.byref(trgepc),
srfvec, ctypes.byref(phase), ctypes.byref(solar),
ctypes.byref(emissn))
return trgepc.value, stypes.cVectorToPython(
srfvec), phase.value, solar.value, emissn.value | 0.000546 |
def get_texts_and_labels(sentence_chunk):
"""Given a sentence chunk, extract original texts and labels."""
words = sentence_chunk.split('\n')
texts = []
labels = []
for word in words:
word = word.strip()
if len(word) > 0:
toks = word.split('\t')
texts.append(toks[0].strip())
labels.append(toks[-1].strip())
return texts, labels | 0.002475 |
def parse_log_entry(text):
"""This function does all real job on log line parsing.
it setup two cases for restart parsing if a line
with wrong format was found.
Restarts:
- use_value: just retuns an object it was passed. This can
be any value.
- reparse: calls `parse_log_entry` again with other text value.
Beware, this call can lead to infinite recursion.
"""
text = text.strip()
if well_formed_log_entry_p(text):
return LogEntry(text)
else:
def use_value(obj):
return obj
def reparse(text):
return parse_log_entry(text)
with restarts(use_value,
reparse) as call:
return call(signal, MalformedLogEntryError(text)) | 0.002632 |
def rotate_du_by_yaw(self, du, heading):
"""Rotate all DOMs on DU by a given (yaw) heading."""
mask = (self.pmts.du == du)
dom_ids = np.unique(self.pmts.dom_id[mask])
for dom_id in dom_ids:
self.rotate_dom_by_yaw(dom_id, heading)
self.reset_caches() | 0.006645 |
def apply(script, value=None, vars={}, url=None, opener=default_opener, library_paths=[]):
"""
Transform value by script, returning all results as list.
"""
return all(script, value, vars, url, opener, library_paths) | 0.008621 |
def _build_pub_key_auth(self, context, nonce, auth_token, public_key):
"""
[MS-CSSP] 3.1.5 Processing Events and Sequencing Rules - Step 3
https://msdn.microsoft.com/en-us/library/cc226791.aspx
This step sends the final SPNEGO token to the server if required and
computes the value for the pubKeyAuth field for the protocol version
negotiated.
The format of the pubKeyAuth field depends on the version that the
server supports.
For version 2 to 4:
The pubKeyAuth field is just wrapped using the authenticated context
For versions 5 to 6:
The pubKeyAuth is a sha256 hash of the server's public key plus a nonce
and a magic string value. This hash is wrapped using the authenticated
context and the nonce is added to the TSRequest alongside the nonce
used in the hash calcs.
:param context: The authenticated context
:param nonce: If versions 5+, the nonce to use in the hash
:param auth_token: If NTLM, this is the last msg (authenticate msg) to
send in the same request
:param public_key: The server's public key
:return: The TSRequest as a byte string to send to the server
"""
ts_request = TSRequest()
if auth_token is not None:
nego_token = NegoToken()
nego_token['negoToken'] = auth_token
ts_request['negoTokens'].append(nego_token)
if nonce is not None:
ts_request['clientNonce'] = nonce
hash_input = b"CredSSP Client-To-Server Binding Hash\x00" + \
nonce + public_key
pub_value = hashlib.sha256(hash_input).digest()
else:
pub_value = public_key
enc_public_key = context.wrap(pub_value)
ts_request['pubKeyAuth'] = enc_public_key
return encoder.encode(ts_request) | 0.001041 |
def yn(self, prompt, default=None):
"""Prompts the user for yes/no confirmation, with optional default"""
if default is True:
opts = " [Y/n]: "
elif default is False:
opts = " [y/N]: "
else:
opts = " [y/n]: "
prompt += opts
return self.input(curry(filter_yn, default=default), prompt) | 0.005435 |
def collect(self):
"""
Collect s3 bucket stats
"""
if boto is None:
self.log.error("Unable to import boto python module")
return {}
for s3instance in self.config['s3']:
self.log.info("S3: byte_unit: %s" % self.config['byte_unit'])
aws_access = self.config['s3'][s3instance]['aws_access_key']
aws_secret = self.config['s3'][s3instance]['aws_secret_key']
for bucket_name in self.config['s3'][s3instance]['buckets']:
bucket = self.getBucket(aws_access, aws_secret, bucket_name)
# collect bucket size
total_size = self.getBucketSize(bucket)
for byte_unit in self.config['byte_unit']:
new_size = diamond.convertor.binary.convert(
value=total_size,
oldUnit='byte',
newUnit=byte_unit
)
self.publish("%s.size.%s" % (bucket_name, byte_unit),
new_size) | 0.001845 |
def _referer(self, extension):
"""
Return the referer for the given extension.
:param extension: A valid domain extension.
:type extension: str
:return: The whois server to use to get the WHOIS record.
:rtype: str
"""
# We get the a copy of the page.
iana_record = self.lookup.whois(
PyFunceble.CONFIGURATION["iana_whois_server"], "hello.%s" % extension
)
if iana_record and "refer" in iana_record:
# The record is not empty.
# We initiate a regex which will extract the referer.
regex_referer = r"(?s)refer\:\s+([a-zA-Z0-9._-]+)\n"
# We try to extract the referer.
matched = Regex(
iana_record, regex_referer, return_data=True, group=1
).match()
if matched:
# The referer was extracted successfully.
# We return the matched referer.
return matched
# * The referer was not extracted successfully.
# or
# * The iana record is empty.
if extension in self.manual_server:
# The extension is in the list of manual entries.
# We return the server which we set manually.
return self.manual_server[extension]
# We return None because we weren't able to get the server to call for
# the given extension.
return None | 0.002058 |
def _parse_singlefile(self, desired_type: Type[T], file_path: str, encoding: str, logger: Logger,
options: Dict[str, Dict[str, Any]]) -> T:
"""
Relies on the inner parsing function to parse the file.
If _streaming_mode is True, the file will be opened and closed by this method. Otherwise the parsing function
will be responsible to open and close.
:param desired_type:
:param file_path:
:param encoding:
:param options:
:return:
"""
opts = get_options_for_id(options, self.get_id_for_options())
if self._streaming_mode:
# We open the stream, and let the function parse from it
file_stream = None
try:
# Open the file with the appropriate encoding
file_stream = open(file_path, 'r', encoding=encoding)
# Apply the parsing function
if self.function_args is None:
return self._parser_func(desired_type, file_stream, logger, **opts)
else:
return self._parser_func(desired_type, file_stream, logger, **self.function_args, **opts)
except TypeError as e:
raise CaughtTypeError.create(self._parser_func, e)
finally:
if file_stream is not None:
# Close the File in any case
file_stream.close()
else:
# the parsing function will open the file itself
if self.function_args is None:
return self._parser_func(desired_type, file_path, encoding, logger, **opts)
else:
return self._parser_func(desired_type, file_path, encoding, logger, **self.function_args, **opts) | 0.004978 |
def column(self):
"""获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column
"""
from .column import Column
if 'column' in self.soup:
url = Column_Url + '/' + self.soup['column']['slug']
name = self.soup['column']['name']
return Column(url, name, session=self._session)
else:
return None | 0.005291 |
def cmd(send, msg, _):
"""Finds a random quote from tjbash.org given search criteria.
Syntax: {command} [searchstring]
"""
if not msg:
url = 'http://tjbash.org/random1.html'
params = {}
else:
targs = msg.split()
if len(targs) == 1 and targs[0].isnumeric():
url = 'http://tjbash.org/%s' % targs[0]
params = {}
else:
url = 'http://tjbash.org/search.html'
params = {'query': 'tag:%s' % '+'.join(targs)}
req = get(url, params=params)
doc = fromstring(req.text)
quotes = doc.find_class('quote-body')
if not quotes:
send("There were no results.")
return
quote = choice(quotes)
lines = [x.strip() for x in map(operator.methodcaller('strip'), quote.itertext())]
# Only send up to three lines.
for line in lines[:4]:
send(line)
tags = quote.getparent().find_class('quote-tags')
postid = quote.getparent().getparent().get('id').replace('quote-', '')
if tags:
tags = [x.text for x in tags[0].findall('.//a')]
send(" -- {} -- {}http://tjbash.org/{}".format(', '.join(tags), "continued: " if (len(lines) > 3) else "", postid))
else:
send(" -- http://tjbash.org/{}".format(postid)) | 0.002357 |
def popenCatch(command, stdinString=None):
"""Runs a command and return standard out.
"""
logger.debug("Running the command: %s" % command)
if stdinString != None:
process = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=-1)
output, nothing = process.communicate(stdinString)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
sts = process.wait()
if sts != 0:
raise RuntimeError("Command: %s with stdin string '%s' exited with non-zero status %i" % (command, stdinString, sts))
return output | 0.009044 |
def set_writer(self, writer):
"""
Changes the writer function to handle writing to the text edit.
A writer function must have the following prototype:
.. code-block:: python
def write(text_edit, text, color)
:param writer: write function as described above.
"""
if self._writer != writer and self._writer:
self._writer = None
if writer:
self._writer = writer | 0.004329 |
def normalizeFeatureText(value):
"""
Normalizes feature text.
* **value** must be a :ref:`type-string`.
* Returned value will be an unencoded ``unicode`` string.
"""
if not isinstance(value, basestring):
raise TypeError("Feature text must be a string, not %s."
% type(value).__name__)
return unicode(value) | 0.002725 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.