code stringlengths 75 104k | docstring stringlengths 1 46.9k |
|---|---|
def get_my_contributions(self, *args, **kwargs):
"""Return a get_content generator of subreddits.
The Subreddits generated are those where the session's user is a
contributor.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
return self.get_content(self.config['my_con_subreddits'], *args,
**kwargs) | Return a get_content generator of subreddits.
The Subreddits generated are those where the session's user is a
contributor.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered. |
def encode(self, word, version=2):
"""Return the Caverphone code for a word.
Parameters
----------
word : str
The word to transform
version : int
The version of Caverphone to employ for encoding (defaults to 2)
Returns
-------
str
The Caverphone value
Examples
--------
>>> pe = Caverphone()
>>> pe.encode('Christopher')
'KRSTFA1111'
>>> pe.encode('Niall')
'NA11111111'
>>> pe.encode('Smith')
'SMT1111111'
>>> pe.encode('Schmidt')
'SKMT111111'
>>> pe.encode('Christopher', 1)
'KRSTF1'
>>> pe.encode('Niall', 1)
'N11111'
>>> pe.encode('Smith', 1)
'SMT111'
>>> pe.encode('Schmidt', 1)
'SKMT11'
"""
word = word.lower()
word = ''.join(c for c in word if c in self._lc_set)
def _squeeze_replace(word, char):
"""Convert strings of char in word to one instance.
Parameters
----------
word : str
The partially converted word
char : str
A character to 'squeeze'
Returns
-------
str
The word with instances of char squeezed down to one
"""
while char * 2 in word:
word = word.replace(char * 2, char)
return word.replace(char, char.upper())
# the main replacement algorithm
if version != 1 and word[-1:] == 'e':
word = word[:-1]
if word:
if word[:5] == 'cough':
word = 'cou2f' + word[5:]
if word[:5] == 'rough':
word = 'rou2f' + word[5:]
if word[:5] == 'tough':
word = 'tou2f' + word[5:]
if word[:6] == 'enough':
word = 'enou2f' + word[6:]
if version != 1 and word[:6] == 'trough':
word = 'trou2f' + word[6:]
if word[:2] == 'gn':
word = '2n' + word[2:]
if word[-2:] == 'mb':
word = word[:-1] + '2'
for src, tar in (
('cq', '2q'),
('ci', 'si'),
('ce', 'se'),
('cy', 'sy'),
('tch', '2ch'),
('c', 'k'),
('q', 'k'),
('x', 'k'),
('v', 'f'),
('dg', '2g'),
('tio', 'sio'),
('tia', 'sia'),
('d', 't'),
('ph', 'fh'),
('b', 'p'),
('sh', 's2'),
('z', 's'),
):
word = word.replace(src, tar)
if word[0] in self._lc_v_set:
word = 'A' + word[1:]
for vowel in 'aeiou':
word = word.replace(vowel, '3')
if version != 1:
word = word.replace('j', 'y')
if word[:2] == 'y3':
word = 'Y3' + word[2:]
if word[:1] == 'y':
word = 'A' + word[1:]
word = word.replace('y', '3')
for src, tar in (('3gh3', '3kh3'), ('gh', '22'), ('g', 'k')):
word = word.replace(src, tar)
for char in 'stpkfmn':
word = _squeeze_replace(word, char)
word = word.replace('w3', 'W3')
if version == 1:
word = word.replace('wy', 'Wy')
word = word.replace('wh3', 'Wh3')
if version == 1:
word = word.replace('why', 'Why')
if version != 1 and word[-1:] == 'w':
word = word[:-1] + '3'
word = word.replace('w', '2')
if word[:1] == 'h':
word = 'A' + word[1:]
word = word.replace('h', '2')
word = word.replace('r3', 'R3')
if version == 1:
word = word.replace('ry', 'Ry')
if version != 1 and word[-1:] == 'r':
word = word[:-1] + '3'
word = word.replace('r', '2')
word = word.replace('l3', 'L3')
if version == 1:
word = word.replace('ly', 'Ly')
if version != 1 and word[-1:] == 'l':
word = word[:-1] + '3'
word = word.replace('l', '2')
if version == 1:
word = word.replace('j', 'y')
word = word.replace('y3', 'Y3')
word = word.replace('y', '2')
word = word.replace('2', '')
if version != 1 and word[-1:] == '3':
word = word[:-1] + 'A'
word = word.replace('3', '')
# pad with 1s, then extract the necessary length of code
word += '1' * 10
if version != 1:
word = word[:10]
else:
word = word[:6]
return word | Return the Caverphone code for a word.
Parameters
----------
word : str
The word to transform
version : int
The version of Caverphone to employ for encoding (defaults to 2)
Returns
-------
str
The Caverphone value
Examples
--------
>>> pe = Caverphone()
>>> pe.encode('Christopher')
'KRSTFA1111'
>>> pe.encode('Niall')
'NA11111111'
>>> pe.encode('Smith')
'SMT1111111'
>>> pe.encode('Schmidt')
'SKMT111111'
>>> pe.encode('Christopher', 1)
'KRSTF1'
>>> pe.encode('Niall', 1)
'N11111'
>>> pe.encode('Smith', 1)
'SMT111'
>>> pe.encode('Schmidt', 1)
'SKMT11' |
def register_plugin(self):
"""Register plugin in Spyder's main window"""
ipyconsole = self.main.ipyconsole
treewidget = self.fileexplorer.treewidget
self.main.add_dockwidget(self)
self.fileexplorer.sig_open_file.connect(self.main.open_file)
self.register_widget_shortcuts(treewidget)
treewidget.sig_edit.connect(self.main.editor.load)
treewidget.sig_removed.connect(self.main.editor.removed)
treewidget.sig_removed_tree.connect(self.main.editor.removed_tree)
treewidget.sig_renamed.connect(self.main.editor.renamed)
treewidget.sig_renamed_tree.connect(self.main.editor.renamed_tree)
treewidget.sig_create_module.connect(self.main.editor.new)
treewidget.sig_new_file.connect(lambda t: self.main.editor.new(text=t))
treewidget.sig_open_interpreter.connect(
ipyconsole.create_client_from_path)
treewidget.redirect_stdio.connect(
self.main.redirect_internalshell_stdio)
treewidget.sig_run.connect(
lambda fname:
ipyconsole.run_script(fname, osp.dirname(fname), '', False, False,
False, True))
treewidget.sig_open_dir.connect(
lambda dirname:
self.main.workingdirectory.chdir(dirname,
refresh_explorer=False,
refresh_console=True))
self.main.editor.open_dir.connect(self.chdir)
# Signal "set_explorer_cwd(QString)" will refresh only the
# contents of path passed by the signal in explorer:
self.main.workingdirectory.set_explorer_cwd.connect(
lambda directory: self.refresh_plugin(new_path=directory,
force_current=True)) | Register plugin in Spyder's main window |
def bootstrap_falsealarmprob(lspinfo,
times,
mags,
errs,
nbootstrap=250,
magsarefluxes=False,
sigclip=10.0,
npeaks=None):
'''Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
'''
# figure out how many periods to work on
if (npeaks and (0 < npeaks < len(lspinfo['nbestperiods']))):
nperiods = npeaks
else:
LOGWARNING('npeaks not specified or invalid, '
'getting FAP for all %s periodogram peaks' %
len(lspinfo['nbestperiods']))
nperiods = len(lspinfo['nbestperiods'])
nbestperiods = lspinfo['nbestperiods'][:nperiods]
nbestpeaks = lspinfo['nbestlspvals'][:nperiods]
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
allpeaks = []
allperiods = []
allfaps = []
alltrialbestpeaks = []
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
for ind, period, peak in zip(range(len(nbestperiods)),
nbestperiods,
nbestpeaks):
LOGINFO('peak %s: running %s trials...' % (ind+1, nbootstrap))
trialbestpeaks = []
for _trial in range(nbootstrap):
# get a scrambled index
tindex = np.random.randint(0,
high=mags.size,
size=mags.size)
# get the kwargs dict out of the lspinfo
if 'kwargs' in lspinfo:
kwargs = lspinfo['kwargs']
# update the kwargs with some local stuff
kwargs.update({'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False})
else:
kwargs = {'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False}
# run the periodogram with scrambled mags and errs
# and the appropriate keyword arguments
lspres = LSPMETHODS[lspinfo['method']](
times, mags[tindex], errs[tindex],
**kwargs
)
trialbestpeaks.append(lspres['bestlspval'])
trialbestpeaks = np.array(trialbestpeaks)
alltrialbestpeaks.append(trialbestpeaks)
# calculate the FAP for a trial peak j = FAP[j] =
# (1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
if lspinfo['method'] != 'pdm':
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks > peak].size) /
(trialbestpeaks.size + 1.0)
)
# for PDM, we're looking for a peak smaller than the best peak
# because values closer to 0.0 are more significant
else:
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks < peak].size) /
(trialbestpeaks.size + 1.0)
)
LOGINFO('FAP for peak %s, period: %.6f = %.3g' % (ind+1,
period,
falsealarmprob))
allpeaks.append(peak)
allperiods.append(period)
allfaps.append(falsealarmprob)
return {'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
else:
LOGERROR('not enough mag series points to calculate periodogram')
return None | Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks} |
def _harvest_lost_resources(self):
"""Return lost resources to pool."""
with self._lock:
for i in self._unavailable_range():
rtracker = self._reference_queue[i]
if rtracker is not None and rtracker.available():
self.put_resource(rtracker.resource) | Return lost resources to pool. |
def add_dataset_to_collection(dataset_id, collection_id, **kwargs):
"""
Add a single dataset to a dataset collection.
"""
collection_i = _get_collection(collection_id)
collection_item = _get_collection_item(collection_id, dataset_id)
if collection_item is not None:
raise HydraError("Dataset Collection %s already contains dataset %s", collection_id, dataset_id)
new_item = DatasetCollectionItem()
new_item.dataset_id=dataset_id
new_item.collection_id=collection_id
collection_i.items.append(new_item)
db.DBSession.flush()
return 'OK' | Add a single dataset to a dataset collection. |
async def create_table(**data):
"""
RPC method for creating table with custom name and fields
:return event id
"""
table = data.get('table')
try:
clickhouse_queries.create_table(table, data)
return 'Table was successfully created'
except ServerException as e:
exception_code = int(str(e)[5:8].strip())
if exception_code == 57:
return 'Table already exists'
elif exception_code == 50:
return 'Invalid params' | RPC method for creating table with custom name and fields
:return event id |
def serializer(metadata_prefix):
"""Return etree_dumper instances.
:param metadata_prefix: One of the metadata identifiers configured in
``OAISERVER_METADATA_FORMATS``.
"""
metadataFormats = current_app.config['OAISERVER_METADATA_FORMATS']
serializer_ = metadataFormats[metadata_prefix]['serializer']
if isinstance(serializer_, tuple):
return partial(import_string(serializer_[0]), **serializer_[1])
return import_string(serializer_) | Return etree_dumper instances.
:param metadata_prefix: One of the metadata identifiers configured in
``OAISERVER_METADATA_FORMATS``. |
def add(self, media_type, media_file, title=None, introduction=None):
"""
新增其它类型永久素材
详情请参考
http://mp.weixin.qq.com/wiki/14/7e6c03263063f4813141c3e17dd4350a.html
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media_file: 要上传的文件,一个 File-object
:param title: 视频素材标题,仅上传视频素材时需要
:param introduction: 视频素材简介,仅上传视频素材时需要
:return: 返回的 JSON 数据包
"""
params = {
'access_token': self.access_token,
'type': media_type
}
if media_type == 'video':
assert title, 'Video title must be set'
assert introduction, 'Video introduction must be set'
description = {
'title': title,
'introduction': introduction
}
params['description'] = json.dumps(description)
return self._post(
'material/add_material',
params=params,
files={
'media': media_file
}
) | 新增其它类型永久素材
详情请参考
http://mp.weixin.qq.com/wiki/14/7e6c03263063f4813141c3e17dd4350a.html
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media_file: 要上传的文件,一个 File-object
:param title: 视频素材标题,仅上传视频素材时需要
:param introduction: 视频素材简介,仅上传视频素材时需要
:return: 返回的 JSON 数据包 |
def generate_s3_url(files):
"""Takes files from React side, creates
SolveBio Object containing signed S3 URL."""
if files:
vault = g.client.Vault.get_personal_vault()
files = json.loads(files)
objects = []
for i in xrange(len(files)):
obj = g.client.Object.create(
vault_id=vault.id,
object_type='file',
filename=files[i].get('filename'),
mimetype=files[i].get('mimetype'),
size=files[i].get('size')
)
objects.append({
'id': obj.id,
'filename': obj.filename,
'upload_url': obj.upload_url
})
return json.dumps(objects) | Takes files from React side, creates
SolveBio Object containing signed S3 URL. |
def generate_url(self, name: str, **kwargs) -> str:
""" generate url with urlgenerator used by urldispatch"""
return self.urlmapper.generate(name, **kwargs) | generate url with urlgenerator used by urldispatch |
def process(self, metric):
"""
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
"""
for rule in self.rules:
rule.process(metric, self) | process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None |
def background_color(self, node, depth):
"""Create a (unique-ish) background color for each node"""
if self.color_mapping is None:
self.color_mapping = {}
color = self.color_mapping.get(node.key)
if color is None:
depth = len(self.color_mapping)
red = (depth * 10) % 255
green = 200 - ((depth * 5) % 200)
blue = (depth * 25) % 200
self.color_mapping[node.key] = color = wx.Colour(red, green, blue)
return color | Create a (unique-ish) background color for each node |
def name(self):
"""Global name."""
return ffi.string(
lib.EnvGetDefglobalName(self._env, self._glb)).decode() | Global name. |
def form_invalid(self, form, prefix=None):
""" If form invalid return error list in JSON response """
response = super(FormAjaxMixin, self).form_invalid(form)
if self.request.is_ajax():
data = {
"errors_list": self.add_prefix(form.errors, prefix),
}
return self.json_to_response(status=400, json_data=data,
json_status=AjaxResponseStatus.ERROR)
return response | If form invalid return error list in JSON response |
def scaleBy(self, value, origin=None):
"""
Scale the object.
>>> obj.transformBy(2.0)
>>> obj.transformBy((0.5, 2.0), origin=(500, 500))
**value** must be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to scale the object by. **origin** defines the
point at with the scale should originate. It must be
a :ref:`type-coordinate` or ``None``. The default is
``(0, 0)``.
"""
value = normalizers.normalizeTransformationScale(value)
if origin is None:
origin = (0, 0)
origin = normalizers.normalizeCoordinateTuple(origin)
self._scaleBy(value, origin=origin) | Scale the object.
>>> obj.transformBy(2.0)
>>> obj.transformBy((0.5, 2.0), origin=(500, 500))
**value** must be an iterable containing two
:ref:`type-int-float` values defining the x and y
values to scale the object by. **origin** defines the
point at with the scale should originate. It must be
a :ref:`type-coordinate` or ``None``. The default is
``(0, 0)``. |
def make_request_fn():
"""Returns a request function."""
if FLAGS.cloud_mlengine_model_name:
request_fn = serving_utils.make_cloud_mlengine_request_fn(
credentials=GoogleCredentials.get_application_default(),
model_name=FLAGS.cloud_mlengine_model_name,
version=FLAGS.cloud_mlengine_model_version)
else:
request_fn = serving_utils.make_grpc_request_fn(
servable_name=FLAGS.servable_name,
server=FLAGS.server,
timeout_secs=FLAGS.timeout_secs)
return request_fn | Returns a request function. |
def rpc_get_subdomains_owned_by_address(self, address, **con_info):
"""
Get the list of subdomains owned by an address.
Return {'status': True, 'subdomains': ...} on success
Return {'error': ...} on error
"""
if not check_address(address):
return {'error': 'Invalid address', 'http_status': 400}
res = get_subdomains_owned_by_address(address)
return self.success_response({'subdomains': res}) | Get the list of subdomains owned by an address.
Return {'status': True, 'subdomains': ...} on success
Return {'error': ...} on error |
def get_oauth_data(self, code, client_id, client_secret, state):
''' Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object
'''
request = self._get_request()
response = request.post(self.OAUTH_TOKEN_URL, {
"state": state,
"code": code,
"grant_type": "authorization_code",
"client_id": client_id,
"client_secret": client_secret
})
return HSAccessTokenAuth.from_response(response) | Get Oauth data from HelloSign
Args:
code (str): Code returned by HelloSign for our callback url
client_id (str): Client id of the associated app
client_secret (str): Secret token of the associated app
Returns:
A HSAccessTokenAuth object |
def next_call(self, for_method=None):
"""Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order.
"""
last_call_name = self._last_declared_call_name
if for_method:
if for_method not in self._declared_calls:
raise FakeDeclarationError(
"next_call(for_method=%r) is not possible; "
"declare expects(%r) or provides(%r) first" % (
for_method, for_method, for_method))
else:
# set this for the local function:
last_call_name = for_method
# reset this for subsequent methods:
self._last_declared_call_name = last_call_name
if last_call_name:
exp = self._declared_calls[last_call_name]
elif self._callable:
exp = self._callable
else:
raise FakeDeclarationError('next_call() must follow provides(), '
'expects() or is_callable()')
if getattr(exp, 'expected_times_called', None) is not None:
raise FakeDeclarationError("Cannot use next_call() in combination with times_called()")
if not isinstance(exp, CallStack):
# lazily create a stack with the last defined
# expected call as the first on the stack:
stack = CallStack(self, initial_calls=[exp],
expected=isinstance(exp, ExpectedCall),
call_name=exp.call_name)
# replace the old call obj using the same name:
if last_call_name:
self._declare_call(last_call_name, stack)
elif self._callable:
self._callable = stack
else:
stack = exp
# hmm, we need a copy here so that the last call
# falls off the stack.
if stack.expected:
next_call = ExpectedCall(self, call_name=exp.call_name, call_order=self._expected_call_order)
else:
next_call = Call(self, call_name=exp.call_name)
stack.add_call(next_call)
return self | Start expecting or providing multiple calls.
.. note:: next_call() cannot be used in combination with :func:`fudge.Fake.times_called`
Up until calling this method, calls are infinite.
For example, before next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f.status()
'Awake!'
>>> f.status()
'Awake!'
After next_call() ... ::
>>> from fudge import Fake
>>> f = Fake().provides('status').returns('Awake!')
>>> f = f.next_call().returns('Asleep')
>>> f = f.next_call().returns('Dreaming')
>>> f.status()
'Awake!'
>>> f.status()
'Asleep'
>>> f.status()
'Dreaming'
>>> f.status()
Traceback (most recent call last):
...
AssertionError: This attribute of fake:unnamed can only be called 3 time(s). Call reset() if necessary or fudge.clear_calls().
If you need to affect the next call of something other than the last declared call,
use ``next_call(for_method="other_call")``. Here is an example using getters and setters
on a session object ::
>>> from fudge import Fake
>>> sess = Fake('session').provides('get_count').returns(1)
>>> sess = sess.provides('set_count').with_args(5)
Now go back and adjust return values for get_count() ::
>>> sess = sess.next_call(for_method='get_count').returns(5)
This allows these calls to be made ::
>>> sess.get_count()
1
>>> sess.set_count(5)
>>> sess.get_count()
5
When using :func:`fudge.Fake.remember_order` in combination with :func:`fudge.Fake.expects` and :func:`fudge.Fake.next_call` each new call will be part of the expected order. |
def subgraph(self, nodelist):
"""Return a CouplingMap object for a subgraph of self.
nodelist (list): list of integer node labels
"""
subcoupling = CouplingMap()
subcoupling.graph = self.graph.subgraph(nodelist)
for node in nodelist:
if node not in subcoupling.physical_qubits:
subcoupling.add_physical_qubit(node)
return subcoupling | Return a CouplingMap object for a subgraph of self.
nodelist (list): list of integer node labels |
def _opt_soft(eigvectors, rot_matrix, n_clusters):
"""
Optimizes the PCCA+ rotation matrix such that the memberships are exclusively nonnegative.
Parameters
----------
eigenvectors : ndarray
A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should
be first, then the one to the slowest relaxation process, etc.
rot_mat : ndarray (m x m)
nonoptimized rotation matrix
n_clusters : int
Number of clusters to group to.
Returns
-------
rot_mat : ndarray (m x m)
Optimized rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
application to Markov state models and data classification.
Adv Data Anal Classif 7, 147-179 (2013).
"""
# only consider first n_clusters eigenvectors
eigvectors = eigvectors[:, :n_clusters]
# crop first row and first column from rot_matrix
# rot_crop_matrix = rot_matrix[1:,1:]
rot_crop_matrix = rot_matrix[1:][:, 1:]
(x, y) = rot_crop_matrix.shape
# reshape rot_crop_matrix into linear vector
rot_crop_vec = np.reshape(rot_crop_matrix, x * y)
# Susanna Roeblitz' target function for optimization
def susanna_func(rot_crop_vec, eigvectors):
# reshape into matrix
rot_crop_matrix = np.reshape(rot_crop_vec, (x, y))
# fill matrix
rot_matrix = _fill_matrix(rot_crop_matrix, eigvectors)
result = 0
for i in range(0, n_clusters):
for j in range(0, n_clusters):
result += np.power(rot_matrix[j, i], 2) / rot_matrix[0, i]
return -result
from scipy.optimize import fmin
rot_crop_vec_opt = fmin(susanna_func, rot_crop_vec, args=(eigvectors,), disp=False)
rot_crop_matrix = np.reshape(rot_crop_vec_opt, (x, y))
rot_matrix = _fill_matrix(rot_crop_matrix, eigvectors)
return rot_matrix | Optimizes the PCCA+ rotation matrix such that the memberships are exclusively nonnegative.
Parameters
----------
eigenvectors : ndarray
A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should
be first, then the one to the slowest relaxation process, etc.
rot_mat : ndarray (m x m)
nonoptimized rotation matrix
n_clusters : int
Number of clusters to group to.
Returns
-------
rot_mat : ndarray (m x m)
Optimized rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] S. Roeblitz and M. Weber, Fuzzy spectral clustering by PCCA+:
application to Markov state models and data classification.
Adv Data Anal Classif 7, 147-179 (2013). |
def create_fresh_child_cgroup(self, *subsystems):
"""
Create child cgroups of the current cgroup for at least the given subsystems.
@return: A Cgroup instance representing the new child cgroup(s).
"""
assert set(subsystems).issubset(self.per_subsystem.keys())
createdCgroupsPerSubsystem = {}
createdCgroupsPerParent = {}
for subsystem in subsystems:
parentCgroup = self.per_subsystem[subsystem]
if parentCgroup in createdCgroupsPerParent:
# reuse already created cgroup
createdCgroupsPerSubsystem[subsystem] = createdCgroupsPerParent[parentCgroup]
continue
cgroup = tempfile.mkdtemp(prefix=CGROUP_NAME_PREFIX, dir=parentCgroup)
createdCgroupsPerSubsystem[subsystem] = cgroup
createdCgroupsPerParent[parentCgroup] = cgroup
# add allowed cpus and memory to cgroup if necessary
# (otherwise we can't add any tasks)
def copy_parent_to_child(name):
shutil.copyfile(os.path.join(parentCgroup, name), os.path.join(cgroup, name))
try:
copy_parent_to_child('cpuset.cpus')
copy_parent_to_child('cpuset.mems')
except IOError:
# expected to fail if cpuset subsystem is not enabled in this hierarchy
pass
return Cgroup(createdCgroupsPerSubsystem) | Create child cgroups of the current cgroup for at least the given subsystems.
@return: A Cgroup instance representing the new child cgroup(s). |
def closure(self, relation, depth=float('inf')):
"""Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations.
"""
ancestors = []
unvisited_ancestors = [(synset,1) for synset in self.get_related_synsets(relation)]
while len(unvisited_ancestors) > 0:
ancestor_depth = unvisited_ancestors.pop()
if ancestor_depth[1] > depth:
continue
unvisited_ancestors.extend([(synset,ancestor_depth[1]+1) for synset in ancestor_depth[0].get_related_synsets(relation)])
ancestors.append(ancestor_depth[0])
return list(set(ancestors)) | Finds all the ancestors of the synset using provided relation.
Parameters
----------
relation : str
Name of the relation which is recursively used to fetch the ancestors.
Returns
-------
list of Synsets
Returns the ancestors of the synset via given relations. |
def PyParseIntCast(string, location, tokens):
"""Return an integer from a string.
This is a pyparsing callback method that converts the matched
string into an integer.
The method modifies the content of the tokens list and converts
them all to an integer value.
Args:
string (str): original string.
location (int): location in the string where the match was made.
tokens (list[str]): extracted tokens, where the string to be converted
is stored.
"""
# Cast the regular tokens.
for index, token in enumerate(tokens):
try:
tokens[index] = int(token)
except ValueError:
logger.error('Unable to cast [{0:s}] to an int, setting to 0'.format(
token))
tokens[index] = 0
# We also need to cast the dictionary built tokens.
for key in tokens.keys():
try:
tokens[key] = int(tokens[key], 10)
except ValueError:
logger.error(
'Unable to cast [{0:s} = {1:d}] to an int, setting to 0'.format(
key, tokens[key]))
tokens[key] = 0 | Return an integer from a string.
This is a pyparsing callback method that converts the matched
string into an integer.
The method modifies the content of the tokens list and converts
them all to an integer value.
Args:
string (str): original string.
location (int): location in the string where the match was made.
tokens (list[str]): extracted tokens, where the string to be converted
is stored. |
def get_backward_star(self, node):
"""Given a node, get a copy of that node's backward star.
:param node: node to retrieve the backward-star of.
:returns: set -- set of hyperedge_ids for the hyperedges
in the node's backward star.
:raises: ValueError -- No such node exists.
"""
if node not in self._node_attributes:
raise ValueError("No such node exists.")
return self._backward_star[node].copy() | Given a node, get a copy of that node's backward star.
:param node: node to retrieve the backward-star of.
:returns: set -- set of hyperedge_ids for the hyperedges
in the node's backward star.
:raises: ValueError -- No such node exists. |
def t_B_SEQUENCE_COMPACT_START(self, t):
r"""
\-\ + (?= -\ )
# ^ ^ sequence indicator
| \-\ + (?= [\{\[]\ | [^:\n]*:\s )
# ^ ^ ^^^ map indicator
# ^ ^ flow indicator
"""
indent_status, curr_depth, next_depth = self.get_indent_status(t)
if indent_status == 'INDENT':
self.indent_stack.append(next_depth)
return t
msg = dedent("""
expected 'INDENT', got {indent_status!r}
current_depth: {curr_depth}
next_depth: {next_depth}
token: {t}
""").format(**vars())
raise YAMLUnknownSyntaxError(msg) | r"""
\-\ + (?= -\ )
# ^ ^ sequence indicator
| \-\ + (?= [\{\[]\ | [^:\n]*:\s )
# ^ ^ ^^^ map indicator
# ^ ^ flow indicator |
def GetCellValueNoFail (self, column, row = None):
""" get a cell, if it does not exist fail
note that column at row START AT 1 same as excel
"""
if row == None:
(row, column) = ParseCellSpec(column)
cell = GetCellValue(self, column, row)
if cell == None:
raise ValueError("cell %d:%d does not exist" % (column, row))
return cell | get a cell, if it does not exist fail
note that column at row START AT 1 same as excel |
def ParseOptions(cls, options, output_module):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetServerInformation method.
"""
if not hasattr(output_module, 'SetServerInformation'):
raise errors.BadConfigObject('Unable to set server information.')
server = cls._ParseStringOption(
options, 'server', default_value=cls._DEFAULT_SERVER)
port = cls._ParseNumericOption(
options, 'port', default_value=cls._DEFAULT_PORT)
output_module.SetServerInformation(server, port) | Parses and validates options.
Args:
options (argparse.Namespace): parser options.
output_module (OutputModule): output module to configure.
Raises:
BadConfigObject: when the output module object does not have the
SetServerInformation method. |
def stop(self):
""" Stop logging with this logger.
"""
if not self.active:
return
self.removeHandler(self.handlers[-1])
self.active = False
return | Stop logging with this logger. |
def process_files(self):
"""
Processes all the files associated with this Node. Files are downloaded if not present in the local storage.
Creates and processes a NodeFile containing this Node's metadata.
:return: A list of names of all the processed files.
"""
file_names = []
for f in self.files:
file_names.append(f.process_file())
if not self.has_thumbnail() and config.THUMBNAILS:
file_names.append(self.derive_thumbnail())
# node_file = NodeFile(self.to_dict())
# self.hashed_file_name = node_file.process_file()
# file_names.append(self.hashed_file_name)
return file_names | Processes all the files associated with this Node. Files are downloaded if not present in the local storage.
Creates and processes a NodeFile containing this Node's metadata.
:return: A list of names of all the processed files. |
def GetColorfulSearchPropertiesStr(self, keyColor='DarkGreen', valueColor='DarkCyan') -> str:
"""keyColor, valueColor: str, color name in class ConsoleColor"""
strs = ['<Color={}>{}</Color>: <Color={}>{}</Color>'.format(keyColor if k in Control.ValidKeys else 'DarkYellow', k, valueColor,
ControlTypeNames[v] if k == 'ControlType' else repr(v)) for k, v in self.searchProperties.items()]
return '{' + ', '.join(strs) + '}' | keyColor, valueColor: str, color name in class ConsoleColor |
def cudnn_compatible_lstm(units, n_hidden, n_layers=1, trainable_initial_states=None, seq_lengths=None, initial_h=None,
initial_c=None, name='cudnn_lstm', reuse=False):
""" CuDNN Compatible LSTM implementation.
It should be used to load models saved with CudnnLSTMCell to run on CPU.
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
n_layers: number of layers
trainable_initial_states: whether to create a special trainable variable
to initialize the hidden states of the network or use just zeros
seq_lengths: tensor of sequence lengths with dimension [B]
initial_h: optional initial hidden state, masks trainable_initial_states
if provided
initial_c: optional initial cell state, masks trainable_initial_states
if provided
name: name of the variable scope to use
reuse:whether to reuse already initialized variable
Returns:
h - all hidden states along T dimension,
tf.Tensor with dimensionality [B x T x F]
h_last - last hidden state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units
c_last - last cell state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units
"""
with tf.variable_scope(name, reuse=reuse):
if trainable_initial_states:
init_h = tf.get_variable('init_h', [n_layers, 1, n_hidden])
init_h = tf.tile(init_h, (1, tf.shape(units)[0], 1))
init_c = tf.get_variable('init_c', [n_layers, 1, n_hidden])
init_c = tf.tile(init_c, (1, tf.shape(units)[0], 1))
else:
init_h = init_c = tf.zeros([n_layers, tf.shape(units)[0], n_hidden])
initial_h = initial_h or init_h
initial_c = initial_c or init_c
with tf.variable_scope('cudnn_lstm', reuse=reuse):
def single_cell(): return tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(n_hidden)
cell = tf.nn.rnn_cell.MultiRNNCell([single_cell() for _ in range(n_layers)])
units = tf.transpose(units, (1, 0, 2))
init = tuple([tf.nn.rnn_cell.LSTMStateTuple(ic, ih) for ih, ic in
zip(tf.unstack(initial_h, axis=0), tf.unstack(initial_c, axis=0))])
h, state = tf.nn.dynamic_rnn(cell=cell, inputs=units, time_major=True, initial_state=init)
h = tf.transpose(h, (1, 0, 2))
h_last = state[-1].h
c_last = state[-1].c
# Extract last states if they are provided
if seq_lengths is not None:
indices = tf.stack([tf.range(tf.shape(h)[0]), seq_lengths-1], axis=1)
h_last = tf.gather_nd(h, indices)
return h, (h_last, c_last) | CuDNN Compatible LSTM implementation.
It should be used to load models saved with CudnnLSTMCell to run on CPU.
Args:
units: tf.Tensor with dimensions [B x T x F], where
B - batch size
T - number of tokens
F - features
n_hidden: dimensionality of hidden state
n_layers: number of layers
trainable_initial_states: whether to create a special trainable variable
to initialize the hidden states of the network or use just zeros
seq_lengths: tensor of sequence lengths with dimension [B]
initial_h: optional initial hidden state, masks trainable_initial_states
if provided
initial_c: optional initial cell state, masks trainable_initial_states
if provided
name: name of the variable scope to use
reuse:whether to reuse already initialized variable
Returns:
h - all hidden states along T dimension,
tf.Tensor with dimensionality [B x T x F]
h_last - last hidden state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units
c_last - last cell state, tf.Tensor with dimensionality [B x H]
where H - number of hidden units |
def _reversedict(d):
"""
Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped.
"""
return dict(list(zip(list(d.values()), list(d.keys())))) | Internal helper for generating reverse mappings; given a
dictionary, returns a new dictionary with keys and values swapped. |
def date_range(data):
"""Returns the minimum activity start time and the maximum activity end time
from the active entities response. These dates are modified in the following
way. The hours (and minutes and so on) are removed from the start and end
times and a *day* is added to the end time. These are the dates that should
be used in the subsequent analytics request.
"""
start = min([parse(d['activity_start_time']) for d in data])
end = max([parse(d['activity_end_time']) for d in data])
start = remove_hours(start)
end = remove_hours(end) + timedelta(days=1)
return start, end | Returns the minimum activity start time and the maximum activity end time
from the active entities response. These dates are modified in the following
way. The hours (and minutes and so on) are removed from the start and end
times and a *day* is added to the end time. These are the dates that should
be used in the subsequent analytics request. |
def deleteMultipleByPks(self, pks):
'''
deleteMultipleByPks - Delete multiple objects given their primary keys
@param pks - List of primary keys
@return - Number of objects deleted
'''
if type(pks) == set:
pks = list(pks)
if len(pks) == 1:
return self.deleteByPk(pks[0])
objs = self.mdl.objects.getMultipleOnlyIndexedFields(pks)
return self.deleteMultiple(objs) | deleteMultipleByPks - Delete multiple objects given their primary keys
@param pks - List of primary keys
@return - Number of objects deleted |
def imp_print(self, text, end):
"""Catch UnicodeEncodeError"""
try:
PRINT(text, end=end)
except UnicodeEncodeError:
for i in text:
try:
PRINT(i, end="")
except UnicodeEncodeError:
PRINT("?", end="")
PRINT("", end=end) | Catch UnicodeEncodeError |
def gauss_fltr_astropy(dem, size=None, sigma=None, origmask=False, fill_interior=False):
"""Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma
"""
#import astropy.nddata
import astropy.convolution
dem = malib.checkma(dem)
#Generate 2D gaussian kernel for input sigma and size
#Default size is 8*sigma in x and y directions
#kernel = astropy.nddata.make_kernel([size, size], sigma, 'gaussian')
#Size must be odd
if size is not None:
size = int(np.floor(size/2)*2 + 1)
size = max(size, 3)
#Truncate the filter at this many standard deviations. Default is 4.0
truncate = 3.0
if size is not None and sigma is None:
sigma = (size - 1) / (2*truncate)
elif size is None and sigma is not None:
#Round up to nearest odd int
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
elif size is None and sigma is None:
#Use default parameters
sigma = 1
size = int(np.ceil((sigma * (2*truncate) + 1)/2)*2 - 1)
size = max(size, 3)
kernel = astropy.convolution.Gaussian2DKernel(sigma, x_size=size, y_size=size, mode='oversample')
print("Applying gaussian smoothing filter with size %i and sigma %0.3f (sum %0.3f)" % \
(size, sigma, kernel.array.sum()))
#This will fill holes
#np.nan is float
#dem_filt_gauss = astropy.nddata.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan)
#Added normalization to ensure filtered values are not brightened/darkened if kernelsum != 1
dem_filt_gauss = astropy.convolution.convolve(dem.astype(float).filled(np.nan), kernel, boundary='fill', fill_value=np.nan, normalize_kernel=True)
#This will preserve original ndv pixels, applying original mask after filtering
if origmask:
print("Applying original mask")
#Allow filling of interior holes, but use original outer edge
if fill_interior:
mask = malib.maskfill(dem)
else:
mask = dem.mask
dem_filt_gauss = np.ma.array(dem_filt_gauss, mask=mask, fill_value=dem.fill_value)
out = np.ma.fix_invalid(dem_filt_gauss, copy=False, fill_value=dem.fill_value)
out.set_fill_value(dem.fill_value.astype(dem.dtype))
return out.astype(dem.dtype) | Astropy gaussian filter properly handles convolution with NaN
http://stackoverflow.com/questions/23832852/by-which-measures-should-i-set-the-size-of-my-gaussian-filter-in-matlab
width1 = 3; sigma1 = (width1-1) / 6;
Specify width for smallest feature of interest and determine sigma appropriately
sigma is width of 1 std in pixels (not multiplier)
scipy and astropy both use cutoff of 4*sigma on either side of kernel - 99.994%
3*sigma on either side of kernel - 99.7%
If sigma is specified, filter width will be a multiple of 8 times sigma
Alternatively, specify filter size, then compute sigma: sigma = (size - 1) / 8.
If size is < the required width for 6-8 sigma, need to use different mode to create kernel
mode 'oversample' and 'center' are essentially identical for sigma 1, but very different for sigma 0.3
The sigma/size calculations below should work for non-integer sigma |
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only) | Yield distributions accessible via `path_item` |
def cancel_order(self, order_id: str) -> str:
"""Cancel an order by ID."""
self.log.debug(f'Canceling order id={order_id} on {self.name}')
if self.dry_run: # Don't cancel if dry run
self.log.warning(f'DRY RUN: Order cancelled on {self.name}: id={order_id}')
return order_id
try: # Cancel order
self._cancel_order(order_id)
except Exception as e:
raise self.exception(OrderNotFound, f'Failed to cancel order: id={order_id}', e) from e
self.log.info(f'Order cancelled on {self.name}: id={order_id}')
return order_id | Cancel an order by ID. |
def format_section(stream, section, options, doc=None):
"""format an options section using the INI format"""
if doc:
print(_comment(doc), file=stream)
print("[%s]" % section, file=stream)
_ini_format(stream, options) | format an options section using the INI format |
def sanitize_config_loglevel(level):
'''
Kinda sorta backport of loglevel sanitization for Python 2.6.
'''
if sys.version_info[:2] != (2, 6) or isinstance(level, (int, long)):
return level
lvl = None
if isinstance(level, basestring):
lvl = logging._levelNames.get(level)
if not lvl:
raise ValueError('Invalid log level, %s' % level)
return lvl | Kinda sorta backport of loglevel sanitization for Python 2.6. |
def recompute_if_necessary(self, ui):
"""Recompute the data on a thread, if necessary.
If the data has recently been computed, this call will be rescheduled for the future.
If the data is currently being computed, it do nothing."""
self.__initialize_cache()
if self.__cached_value_dirty:
with self.__is_recomputing_lock:
is_recomputing = self.__is_recomputing
self.__is_recomputing = True
if is_recomputing:
pass
else:
# the only way to get here is if we're not currently computing
# this has the side effect of limiting the number of threads that
# are sleeping.
def recompute():
try:
if self.__recompute_thread_cancel.wait(0.01): # helps tests run faster
return
minimum_time = 0.5
current_time = time.time()
if current_time < self.__cached_value_time + minimum_time:
if self.__recompute_thread_cancel.wait(self.__cached_value_time + minimum_time - current_time):
return
self.recompute_data(ui)
finally:
self.__is_recomputing = False
self.__recompute_thread = None
with self.__is_recomputing_lock:
self.__recompute_thread = threading.Thread(target=recompute)
self.__recompute_thread.start() | Recompute the data on a thread, if necessary.
If the data has recently been computed, this call will be rescheduled for the future.
If the data is currently being computed, it do nothing. |
def predict(self, y, t=None, return_cov=True, return_var=False):
"""
Compute the conditional predictive distribution of the model
You must call :func:`GP.compute` before this method.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
t (Optional[array[ntest]]): The independent coordinates where the
prediction should be made. If this is omitted the coordinates
will be assumed to be ``x`` from :func:`GP.compute` and an
efficient method will be used to compute the prediction.
return_cov (Optional[bool]): If ``True``, the full covariance
matrix is computed and returned. Otherwise, only the mean
prediction is computed. (default: ``True``)
return_var (Optional[bool]): If ``True``, only return the diagonal
of the predictive covariance; this will be faster to compute
than the full covariance matrix. This overrides ``return_cov``
so, if both are set to ``True``, only the diagonal is computed.
(default: ``False``)
Returns:
``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of
``return_cov`` and ``return_var``. These output values are:
(a) **mu** ``(ntest,)``: mean of the predictive distribution,
(b) **cov** ``(ntest, ntest)``: the predictive covariance matrix,
and
(c) **var** ``(ntest,)``: the diagonal elements of ``cov``.
Raises:
ValueError: For mismatched dimensions.
"""
y = self._process_input(y)
if len(y.shape) > 1:
raise ValueError("dimension mismatch")
if t is None:
xs = self._t
else:
xs = np.ascontiguousarray(t, dtype=float)
if len(xs.shape) > 1:
raise ValueError("dimension mismatch")
# Make sure that the model is computed
self._recompute()
# Compute the predictive mean.
resid = y - self.mean.get_value(self._t)
if t is None:
alpha = self.solver.solve(resid).flatten()
alpha = resid - (self._yerr**2 + self.kernel.jitter) * alpha
elif not len(self._A):
alpha = self.solver.predict(resid, xs)
else:
Kxs = self.get_matrix(xs, self._t)
alpha = np.dot(Kxs, alpha)
mu = self.mean.get_value(xs) + alpha
if not (return_var or return_cov):
return mu
# Predictive variance.
Kxs = self.get_matrix(xs, self._t)
KxsT = np.ascontiguousarray(Kxs.T, dtype=np.float64)
if return_var:
var = -np.sum(KxsT*self.apply_inverse(KxsT), axis=0)
var += self.kernel.get_value(0.0)
return mu, var
# Predictive covariance
cov = self.kernel.get_value(xs[:, None] - xs[None, :])
cov -= np.dot(Kxs, self.apply_inverse(KxsT))
return mu, cov | Compute the conditional predictive distribution of the model
You must call :func:`GP.compute` before this method.
Args:
y (array[n]): The observations at coordinates ``x`` from
:func:`GP.compute`.
t (Optional[array[ntest]]): The independent coordinates where the
prediction should be made. If this is omitted the coordinates
will be assumed to be ``x`` from :func:`GP.compute` and an
efficient method will be used to compute the prediction.
return_cov (Optional[bool]): If ``True``, the full covariance
matrix is computed and returned. Otherwise, only the mean
prediction is computed. (default: ``True``)
return_var (Optional[bool]): If ``True``, only return the diagonal
of the predictive covariance; this will be faster to compute
than the full covariance matrix. This overrides ``return_cov``
so, if both are set to ``True``, only the diagonal is computed.
(default: ``False``)
Returns:
``mu``, ``(mu, cov)``, or ``(mu, var)`` depending on the values of
``return_cov`` and ``return_var``. These output values are:
(a) **mu** ``(ntest,)``: mean of the predictive distribution,
(b) **cov** ``(ntest, ntest)``: the predictive covariance matrix,
and
(c) **var** ``(ntest,)``: the diagonal elements of ``cov``.
Raises:
ValueError: For mismatched dimensions. |
def from_tuple(self, t):
"""
Set this person from tuple
:param t: Tuple representing a person (sitting[, id])
:type t: (bool) | (bool, None | str | unicode | int)
:rtype: Person
"""
if len(t) > 1:
self.id = t[0]
self.sitting = t[1]
else:
self.sitting = t[0]
self.id = None
return self | Set this person from tuple
:param t: Tuple representing a person (sitting[, id])
:type t: (bool) | (bool, None | str | unicode | int)
:rtype: Person |
def parse_path(path):
"""Parse a rfc 6901 path."""
if not path:
raise ValueError("Invalid path")
if isinstance(path, str):
if path == "/":
raise ValueError("Invalid path")
if path[0] != "/":
raise ValueError("Invalid path")
return path.split(_PATH_SEP)[1:]
elif isinstance(path, (tuple, list)):
return path
else:
raise ValueError("A path must be a string, tuple or list") | Parse a rfc 6901 path. |
def repair(self, verbose=False, joincomp=False,
remove_smallest_components=True):
"""Performs mesh repair using MeshFix's default repair
process.
Parameters
----------
verbose : bool, optional
Enables or disables debug printing. Disabled by default.
joincomp : bool, optional
Attempts to join nearby open components.
remove_smallest_components : bool, optional
Remove all but the largest isolated component from the
mesh before beginning the repair process. Default True
Notes
-----
Vertex and face arrays are updated inplace. Access them with:
meshfix.v
meshfix.f
"""
assert self.f.shape[1] == 3, 'Face array must contain three columns'
assert self.f.ndim == 2, 'Face array must be 2D'
self.v, self.f = _meshfix.clean_from_arrays(self.v, self.f,
verbose, joincomp,
remove_smallest_components) | Performs mesh repair using MeshFix's default repair
process.
Parameters
----------
verbose : bool, optional
Enables or disables debug printing. Disabled by default.
joincomp : bool, optional
Attempts to join nearby open components.
remove_smallest_components : bool, optional
Remove all but the largest isolated component from the
mesh before beginning the repair process. Default True
Notes
-----
Vertex and face arrays are updated inplace. Access them with:
meshfix.v
meshfix.f |
def setup(cls, configuration=None, **kwargs):
# type: (Optional['Configuration'], Any) -> None
"""
Set up the HDX configuration
Args:
configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments.
**kwargs: See below
user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not.
user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied.
hdx_url (str): HDX url to use. Overrides hdx_site.
hdx_site (str): HDX site to use eg. prod, test.
hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False.
hdx_key (str): Your HDX key. Ignored if hdx_read_only = True.
hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR
hdx_config_json (str): Path to JSON HDX configuration OR
hdx_config_yaml (str): Path to YAML HDX configuration
project_config_dict (dict): Project configuration dictionary OR
project_config_json (str): Path to JSON Project configuration OR
project_config_yaml (str): Path to YAML Project configuration
hdx_base_config_dict (dict): HDX base configuration dictionary OR
hdx_base_config_json (str): Path to JSON HDX base configuration OR
hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml.
Returns:
None
"""
if configuration is None:
cls._configuration = Configuration(**kwargs)
else:
cls._configuration = configuration | Set up the HDX configuration
Args:
configuration (Optional[Configuration]): Configuration instance. Defaults to setting one up from passed arguments.
**kwargs: See below
user_agent (str): User agent string. HDXPythonLibrary/X.X.X- is prefixed. Must be supplied if remoteckan is not.
user_agent_config_yaml (str): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (str): Lookup key for YAML. Ignored if user_agent supplied.
hdx_url (str): HDX url to use. Overrides hdx_site.
hdx_site (str): HDX site to use eg. prod, test.
hdx_read_only (bool): Whether to access HDX in read only mode. Defaults to False.
hdx_key (str): Your HDX key. Ignored if hdx_read_only = True.
hdx_config_dict (dict): HDX configuration dictionary to use instead of above 3 parameters OR
hdx_config_json (str): Path to JSON HDX configuration OR
hdx_config_yaml (str): Path to YAML HDX configuration
project_config_dict (dict): Project configuration dictionary OR
project_config_json (str): Path to JSON Project configuration OR
project_config_yaml (str): Path to YAML Project configuration
hdx_base_config_dict (dict): HDX base configuration dictionary OR
hdx_base_config_json (str): Path to JSON HDX base configuration OR
hdx_base_config_yaml (str): Path to YAML HDX base configuration. Defaults to library's internal hdx_base_configuration.yml.
Returns:
None |
def cloud_cover_to_ghi_linear(self, cloud_cover, ghi_clear, offset=35,
**kwargs):
"""
Convert cloud cover to GHI using a linear relationship.
0% cloud cover returns ghi_clear.
100% cloud cover returns offset*ghi_clear.
Parameters
----------
cloud_cover: numeric
Cloud cover in %.
ghi_clear: numeric
GHI under clear sky conditions.
offset: numeric, default 35
Determines the minimum GHI.
kwargs
Not used.
Returns
-------
ghi: numeric
Estimated GHI.
References
----------
Larson et. al. "Day-ahead forecasting of solar power output from
photovoltaic plants in the American Southwest" Renewable Energy
91, 11-20 (2016).
"""
offset = offset / 100.
cloud_cover = cloud_cover / 100.
ghi = (offset + (1 - offset) * (1 - cloud_cover)) * ghi_clear
return ghi | Convert cloud cover to GHI using a linear relationship.
0% cloud cover returns ghi_clear.
100% cloud cover returns offset*ghi_clear.
Parameters
----------
cloud_cover: numeric
Cloud cover in %.
ghi_clear: numeric
GHI under clear sky conditions.
offset: numeric, default 35
Determines the minimum GHI.
kwargs
Not used.
Returns
-------
ghi: numeric
Estimated GHI.
References
----------
Larson et. al. "Day-ahead forecasting of solar power output from
photovoltaic plants in the American Southwest" Renewable Energy
91, 11-20 (2016). |
def insert_child(self, child_pid):
"""Add the given PID to the list of children PIDs."""
self._check_child_limits(child_pid)
try:
# TODO: Here add the check for the max parents and the max children
with db.session.begin_nested():
if not isinstance(child_pid, PersistentIdentifier):
child_pid = resolve_pid(child_pid)
return PIDRelation.create(
self._resolved_pid, child_pid, self.relation_type.id, None
)
except IntegrityError:
raise PIDRelationConsistencyError("PID Relation already exists.") | Add the given PID to the list of children PIDs. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'interval') and self.interval is not None:
_dict['interval'] = self.interval
if hasattr(self, 'event_type') and self.event_type is not None:
_dict['event_type'] = self.event_type
if hasattr(self, 'results') and self.results is not None:
_dict['results'] = [x._to_dict() for x in self.results]
return _dict | Return a json dictionary representing this model. |
def create_instances(self, config_list):
"""Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi
"""
tags = [conf.pop('tags', None) for conf in config_list]
resp = self.guest.createObjects([self._generate_create_dict(**kwargs)
for kwargs in config_list])
for instance, tag in zip(resp, tags):
if tag is not None:
self.set_tags(tag, guest_id=instance['id'])
return resp | Creates multiple virtual server instances.
This takes a list of dictionaries using the same arguments as
create_instance().
.. warning::
This will add charges to your account
Example::
# Define the instance we want to create.
new_vsi = {
'domain': u'test01.labs.sftlyr.ws',
'hostname': u'minion05',
'datacenter': u'hkg02',
'flavor': 'BL1_1X2X100'
'dedicated': False,
'private': False,
'os_code' : u'UBUNTU_LATEST',
'hourly': True,
'ssh_keys': [1234],
'disks': ('100','25'),
'local_disk': True,
'tags': 'test, pleaseCancel',
'public_security_groups': [12, 15]
}
# using .copy() so we can make changes to individual nodes
instances = [new_vsi.copy(), new_vsi.copy(), new_vsi.copy()]
# give each its own hostname, not required.
instances[0]['hostname'] = "multi-test01"
instances[1]['hostname'] = "multi-test02"
instances[2]['hostname'] = "multi-test03"
vsi = mgr.create_instances(config_list=instances)
#vsi will be a dictionary of all the new virtual servers
print vsi |
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
result = nanops.nanmin(self.asi8, skipna=skipna, mask=self.isna())
if isna(result):
# Period._from_ordinal does not handle np.nan gracefully
return NaT
return self._box_func(result) | Return the minimum value of the Array or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Index.min : Return the minimum value in an Index.
Series.min : Return the minimum value in a Series. |
def _findLocation(self, reference_name, start, end):
"""
return a location key form the locationMap
"""
try:
# TODO - sequence_annotations does not have build?
return self._locationMap['hg19'][reference_name][start][end]
except:
return None | return a location key form the locationMap |
def columnInfo(self):
"""
display metadata about the table, size, number of rows, columns and their data type
"""
code = "proc contents data=" + self.libref + '.' + self.table + ' ' + self._dsopts() + ";ods select Variables;run;"
if self.sas.nosub:
print(code)
return
if self.results.upper() == 'PANDAS':
code = "proc contents data=%s.%s %s ;ods output Variables=work._variables ;run;" % (self.libref, self.table, self._dsopts())
pd = self._returnPD(code, '_variables')
pd['Type'] = pd['Type'].str.rstrip()
return pd
else:
ll = self._is_valid()
if self.HTML:
if not ll:
ll = self.sas._io.submit(code)
if not self.sas.batch:
self.sas.DISPLAY(self.sas.HTML(ll['LST']))
else:
return ll
else:
if not ll:
ll = self.sas._io.submit(code, "text")
if not self.sas.batch:
print(ll['LST'])
else:
return ll | display metadata about the table, size, number of rows, columns and their data type |
def GetSortedEvents(self, time_range=None):
"""Retrieves the events in increasing chronological order.
Args:
time_range (Optional[TimeRange]): time range used to filter events
that fall in a specific period.
Yield:
EventObject: event.
"""
filter_expression = None
if time_range:
filter_expression = []
if time_range.start_timestamp:
filter_expression.append(
'_timestamp >= {0:d}'.format(time_range.start_timestamp))
if time_range.end_timestamp:
filter_expression.append(
'_timestamp <= {0:d}'.format(time_range.end_timestamp))
filter_expression = ' AND '.join(filter_expression)
event_generator = self._GetAttributeContainers(
self._CONTAINER_TYPE_EVENT, filter_expression=filter_expression,
order_by='_timestamp')
for event in event_generator:
if hasattr(event, 'event_data_row_identifier'):
event_data_identifier = identifiers.SQLTableIdentifier(
'event_data', event.event_data_row_identifier)
event.SetEventDataIdentifier(event_data_identifier)
del event.event_data_row_identifier
yield event | Retrieves the events in increasing chronological order.
Args:
time_range (Optional[TimeRange]): time range used to filter events
that fall in a specific period.
Yield:
EventObject: event. |
def empty_line_count_at_the_end(self):
"""
Return number of empty lines at the end of the document.
"""
count = 0
for line in self.lines[::-1]:
if not line or line.isspace():
count += 1
else:
break
return count | Return number of empty lines at the end of the document. |
def _add_params_docstring(params):
""" Add params to doc string
"""
p_string = "\nAccepts the following paramters: \n"
for param in params:
p_string += "name: %s, required: %s, description: %s \n" % (param['name'], param['required'], param['description'])
return p_string | Add params to doc string |
def sc_zoom_coarse(self, viewer, event, msg=True):
"""Interactively zoom the image by scrolling motion.
This zooms by adjusting the scale in x and y coarsely.
"""
if not self.canzoom:
return True
zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0)
# change scale by 20%
amount = self._scale_adjust(1.2, event.amount, zoom_accel, max_limit=4.0)
self._scale_image(viewer, event.direction, amount, msg=msg)
return True | Interactively zoom the image by scrolling motion.
This zooms by adjusting the scale in x and y coarsely. |
def export_configuration_generator(self, sql, sql_args):
"""
Generator for :class:`meteorpi_model.ExportConfiguration`
:param sql:
A SQL statement which must return rows describing export configurations
:param sql_args:
Any variables required to populate the query provided in 'sql'
:return:
A generator which produces :class:`meteorpi_model.ExportConfiguration` instances from the supplied SQL,
closing any opened cursors on completion.
"""
self.con.execute(sql, sql_args)
results = self.con.fetchall()
output = []
for result in results:
if result['exportType'] == "observation":
search = mp.ObservationSearch.from_dict(json.loads(result['searchString']))
elif result['exportType'] == "file":
search = mp.FileRecordSearch.from_dict(json.loads(result['searchString']))
else:
search = mp.ObservatoryMetadataSearch.from_dict(json.loads(result['searchString']))
conf = mp.ExportConfiguration(target_url=result['targetURL'], user_id=result['targetUser'],
password=result['targetPassword'], search=search,
name=result['exportName'], description=result['description'],
enabled=result['active'], config_id=result['exportConfigId'])
output.append(conf)
return output | Generator for :class:`meteorpi_model.ExportConfiguration`
:param sql:
A SQL statement which must return rows describing export configurations
:param sql_args:
Any variables required to populate the query provided in 'sql'
:return:
A generator which produces :class:`meteorpi_model.ExportConfiguration` instances from the supplied SQL,
closing any opened cursors on completion. |
def _mutect2_filter(broad_runner, in_file, out_file, ref_file):
"""Filter of MuTect2 calls, a separate step in GATK4.
"""
params = ["-T", "FilterMutectCalls", "--reference", ref_file, "--variant", in_file, "--output", out_file]
return broad_runner.cl_gatk(params, os.path.dirname(out_file)) | Filter of MuTect2 calls, a separate step in GATK4. |
def decode_list_offset_response(cls, response):
"""
Decode OffsetResponse_v2 into ListOffsetResponsePayloads
Arguments:
response: OffsetResponse_v2
Returns: list of ListOffsetResponsePayloads
"""
return [
kafka.structs.ListOffsetResponsePayload(topic, partition, error, timestamp, offset)
for topic, partitions in response.topics
for partition, error, timestamp, offset in partitions
] | Decode OffsetResponse_v2 into ListOffsetResponsePayloads
Arguments:
response: OffsetResponse_v2
Returns: list of ListOffsetResponsePayloads |
def gatk_rnaseq_calling(data):
"""Use GATK to perform gVCF variant calling on RNA-seq data
"""
from bcbio.bam import callable
data = utils.deepish_copy(data)
tools_on = dd.get_tools_on(data)
if not tools_on:
tools_on = []
tools_on.append("gvcf")
data = dd.set_tools_on(data, tools_on)
data = dd.set_jointcaller(data, ["%s-joint" % v for v in dd.get_variantcaller(data)])
out_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype"))
data = _setup_variant_regions(data, out_dir)
out_file = os.path.join(out_dir, "%s-gatk-haplotype.vcf.gz" % dd.get_sample_name(data))
if not utils.file_exists(out_file):
region_files = []
regions = []
for cur_region in callable.get_split_regions(dd.get_variant_regions(data), data):
str_region = "_".join([str(x) for x in cur_region])
region_file = os.path.join(utils.safe_makedir(os.path.join(dd.get_work_dir(data),
"variation", "rnaseq", "gatk-haplotype",
"regions")),
"%s-%s-gatk-haplotype.vcf.gz" % (dd.get_sample_name(data), str_region))
region_file = gatk.haplotype_caller([dd.get_split_bam(data)], [data], dd.get_ref_file(data), {},
region=cur_region, out_file=region_file)
region_files.append(region_file)
regions.append(cur_region)
out_file = vcfutils.concat_variant_files(region_files, out_file, regions,
dd.get_ref_file(data), data["config"])
return dd.set_vrn_file(data, out_file) | Use GATK to perform gVCF variant calling on RNA-seq data |
def doFindAny(self, WHAT={}, SORT=[], SKIP=None, MAX=None, LOP='AND', **params):
"""This function will perform the command -findany."""
self._preFind(WHAT, SORT, SKIP, MAX, LOP)
for key in params:
self._addDBParam(key, params[key])
return self._doAction('-findany') | This function will perform the command -findany. |
def find_by_ids(ids, _connection=None, page_size=100, page_number=0,
sort_by=enums.DEFAULT_SORT_BY, sort_order=enums.DEFAULT_SORT_ORDER):
"""
List all videos identified by a list of Brightcove video ids
"""
if not isinstance(ids, (list, tuple)):
err = "Video.find_by_ids expects an iterable argument"
raise exceptions.PyBrightcoveError(err)
ids = ','.join([str(i) for i in ids])
return connection.ItemResultSet('find_videos_by_ids',
Video, _connection, page_size, page_number, sort_by, sort_order,
video_ids=ids) | List all videos identified by a list of Brightcove video ids |
def _try_switches(self, lines, index):
"""
For each switch in the Collector object, pass a list of string,
representing lines of text in a file, and an index to the current
line to try to flip the switch. A switch will only flip on if the line
passes its 'test_on' method, and will only flip off if the line
passes its 'test_off' method.
:param lines: List of strings, usually the lines in a text file
:param index: Number index pointing to the current line
"""
for s in self._switches:
s.switch(lines, index) | For each switch in the Collector object, pass a list of string,
representing lines of text in a file, and an index to the current
line to try to flip the switch. A switch will only flip on if the line
passes its 'test_on' method, and will only flip off if the line
passes its 'test_off' method.
:param lines: List of strings, usually the lines in a text file
:param index: Number index pointing to the current line |
def get_least_orbits(atom_index, cell, site_symmetry, symprec=1e-5):
"""Find least orbits for a centering atom"""
orbits = _get_orbits(atom_index, cell, site_symmetry, symprec)
mapping = np.arange(cell.get_number_of_atoms())
for i, orb in enumerate(orbits):
for num in np.unique(orb):
if mapping[num] > mapping[i]:
mapping[num] = mapping[i]
return np.unique(mapping) | Find least orbits for a centering atom |
def _set_preferences(self, node):
'''
Set preferences.
:return:
'''
pref = etree.SubElement(node, 'preferences')
pacman = etree.SubElement(pref, 'packagemanager')
pacman.text = self._get_package_manager()
p_version = etree.SubElement(pref, 'version')
p_version.text = '0.0.1'
p_type = etree.SubElement(pref, 'type')
p_type.set('image', 'vmx')
for disk_id, disk_data in self._data.system.get('disks', {}).items():
if disk_id.startswith('/dev'):
p_type.set('filesystem', disk_data.get('type') or 'ext3')
break
p_type.set('installiso', 'true')
p_type.set('boot', "vmxboot/suse-leap42.1")
p_type.set('format', self.format)
p_type.set('bootloader', 'grub2')
p_type.set('timezone', __salt__['timezone.get_zone']())
p_type.set('hwclock', __salt__['timezone.get_hwclock']())
return pref | Set preferences.
:return: |
def get_tms_layers(self,
catid,
bands='4,2,1',
gamma=1.3,
highcutoff=0.98,
lowcutoff=0.02,
brightness=1.0,
contrast=1.0):
"""Get list of urls and bounding boxes corrsponding to idaho images for a given catalog id.
Args:
catid (str): Catalog id
bands (str): Bands to display, separated by commas (0-7).
gamma (float): gamma coefficient. This is for on-the-fly pansharpening.
highcutoff (float): High cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
lowcutoff (float): Low cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
brightness (float): Brightness coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
contrast (float): Contrast coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
Returns:
urls (list): TMS urls.
bboxes (list of tuples): Each tuple is (W, S, E, N) where (W,S,E,N) are the bounds of the corresponding idaho part.
"""
description = self.describe_images(self.get_images_by_catid(catid))
service_url = 'http://idaho.geobigdata.io/v1/tile/'
urls, bboxes = [], []
for catid, images in description.items():
for partnum, part in images['parts'].items():
if 'PAN' in part.keys():
pan_id = part['PAN']['id']
if 'WORLDVIEW_8_BAND' in part.keys():
ms_id = part['WORLDVIEW_8_BAND']['id']
ms_partname = 'WORLDVIEW_8_BAND'
elif 'RGBN' in part.keys():
ms_id = part['RGBN']['id']
ms_partname = 'RGBN'
if ms_id:
if pan_id:
band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands + '&panId=' + pan_id
else:
band_str = ms_id + '/{z}/{x}/{y}?bands=' + bands
bbox = from_wkt(part[ms_partname]['boundstr']).bounds
elif not ms_id and pan_id:
band_str = pan_id + '/{z}/{x}/{y}?bands=0'
bbox = from_wkt(part['PAN']['boundstr']).bounds
else:
continue
bboxes.append(bbox)
# Get the bucket. It has to be the same for all entries in the part.
bucket = part[list(part.keys())[0]]['bucket']
# Get the token
token = self.gbdx_connection.access_token
# Assemble url
url = (service_url + bucket + '/'
+ band_str
+ """&gamma={}
&highCutoff={}
&lowCutoff={}
&brightness={}
&contrast={}
&token={}""".format(gamma,
highcutoff,
lowcutoff,
brightness,
contrast,
token))
urls.append(url)
return urls, bboxes | Get list of urls and bounding boxes corrsponding to idaho images for a given catalog id.
Args:
catid (str): Catalog id
bands (str): Bands to display, separated by commas (0-7).
gamma (float): gamma coefficient. This is for on-the-fly pansharpening.
highcutoff (float): High cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
lowcutoff (float): Low cut off coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
brightness (float): Brightness coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
contrast (float): Contrast coefficient (0.0 to 1.0). This is for on-the-fly pansharpening.
Returns:
urls (list): TMS urls.
bboxes (list of tuples): Each tuple is (W, S, E, N) where (W,S,E,N) are the bounds of the corresponding idaho part. |
def getAltitudeFromLatLon(self, lat, lon):
"""Get the altitude of a lat lon pair, using the four neighbouring
pixels for interpolation.
"""
# print "-----\nFromLatLon", lon, lat
lat -= self.lat
lon -= self.lon
# print "lon, lat", lon, lat
if lat < 0.0 or lat >= 1.0 or lon < 0.0 or lon >= 1.0:
raise WrongTileError(self.lat, self.lon, self.lat+lat, self.lon+lon)
x = lon * (self.size - 1)
y = lat * (self.size - 1)
# print "x,y", x, y
x_int = int(x)
x_frac = x - int(x)
y_int = int(y)
y_frac = y - int(y)
# print "frac", x_int, x_frac, y_int, y_frac
value00 = self.getPixelValue(x_int, y_int)
value10 = self.getPixelValue(x_int+1, y_int)
value01 = self.getPixelValue(x_int, y_int+1)
value11 = self.getPixelValue(x_int+1, y_int+1)
value1 = self._avg(value00, value10, x_frac)
value2 = self._avg(value01, value11, x_frac)
value = self._avg(value1, value2, y_frac)
# print "%4d %4d | %4d\n%4d %4d | %4d\n-------------\n%4d" % (
# value00, value10, value1, value01, value11, value2, value)
return value | Get the altitude of a lat lon pair, using the four neighbouring
pixels for interpolation. |
def _process_outgoing_msg(self, sink_iter):
"""For every message we construct a corresponding RPC message to be
sent over the given socket inside given RPC session.
This function should be launched in a new green thread as
it loops forever.
"""
LOG.debug('NetworkController processing outgoing request list.')
# TODO(PH): We should try not to sent routes from bgp peer that is not
# in established state.
from ryu.services.protocols.bgp.model import (
FlexinetOutgoingRoute)
while self.is_connected:
# sink iter is Sink instance and next is blocking so this isn't
# active wait.
for outgoing_msg in sink_iter:
if not self.is_connected:
self._socket.close()
return
if isinstance(outgoing_msg, FlexinetOutgoingRoute):
rpc_msg = _create_prefix_notification(outgoing_msg, self)
else:
raise NotImplementedError(
'Do not handle out going message of type %s' %
outgoing_msg.__class__)
if rpc_msg:
self._sendall(rpc_msg)
self.pause(0)
# Stop incoming connection.
if self.green_in:
self.green_in.kill() | For every message we construct a corresponding RPC message to be
sent over the given socket inside given RPC session.
This function should be launched in a new green thread as
it loops forever. |
def format_label(sl, fmt=None):
"""
Combine a list of strings to a single str, joined by sep.
Passes through single strings.
:param sl:
:return:
"""
if isinstance(sl, str):
# Already is a string.
return sl
if fmt:
return fmt.format(*sl)
return ' '.join(str(s) for s in sl) | Combine a list of strings to a single str, joined by sep.
Passes through single strings.
:param sl:
:return: |
def get_confidence_interval(self, node, interval = (0.05, 0.95)):
'''
If temporal reconstruction was done using the marginal ML mode, the entire distribution of
times is available. This function determines the 90% (or other) confidence interval, defined as the
range where 5% of probability is below and above. Note that this does not necessarily contain
the highest probability position.
In absense of marginal reconstruction, it will return uncertainty based on rate
variation. If both are present, the wider interval will be returned.
Parameters
----------
node : PhyloTree.Clade
The node for which the confidence interval is to be calculated
interval : tuple, list
Array of length two, or tuple, defining the bounds of the confidence interval
Returns
-------
confidence_interval : numpy array
Array with two numerical dates delineating the confidence interval
'''
rate_contribution = self.date_uncertainty_due_to_rate(node, interval)
if hasattr(node, "marginal_inverse_cdf"):
min_date, max_date = [self.date2dist.to_numdate(x) for x in
(node.marginal_pos_LH.xmax, node.marginal_pos_LH.xmin)]
if node.marginal_inverse_cdf=="delta":
return np.array([node.numdate, node.numdate])
else:
mutation_contribution = self.date2dist.to_numdate(node.marginal_inverse_cdf(np.array(interval))[::-1])
else:
min_date, max_date = [-np.inf, np.inf]
return self.combine_confidence(node.numdate, (min_date, max_date),
c1=rate_contribution, c2=mutation_contribution) | If temporal reconstruction was done using the marginal ML mode, the entire distribution of
times is available. This function determines the 90% (or other) confidence interval, defined as the
range where 5% of probability is below and above. Note that this does not necessarily contain
the highest probability position.
In absense of marginal reconstruction, it will return uncertainty based on rate
variation. If both are present, the wider interval will be returned.
Parameters
----------
node : PhyloTree.Clade
The node for which the confidence interval is to be calculated
interval : tuple, list
Array of length two, or tuple, defining the bounds of the confidence interval
Returns
-------
confidence_interval : numpy array
Array with two numerical dates delineating the confidence interval |
def store_tmp(self, tmp, content, reg_deps=None, tmp_deps=None, deps=None):
"""
Stores a Claripy expression in a VEX temp value.
If in symbolic mode, this involves adding a constraint for the tmp's symbolic variable.
:param tmp: the number of the tmp
:param content: a Claripy expression of the content
:param reg_deps: the register dependencies of the content
:param tmp_deps: the temporary value dependencies of the content
"""
self.state._inspect('tmp_write', BP_BEFORE, tmp_write_num=tmp, tmp_write_expr=content)
tmp = self.state._inspect_getattr('tmp_write_num', tmp)
content = self.state._inspect_getattr('tmp_write_expr', content)
if o.SYMBOLIC_TEMPS not in self.state.options:
# Non-symbolic
self.temps[tmp] = content
else:
# Symbolic
self.state.add_constraints(self.temps[tmp] == content)
# get the size, and record the write
if o.TRACK_TMP_ACTIONS in self.state.options:
data_ao = SimActionObject(content, reg_deps=reg_deps, tmp_deps=tmp_deps, deps=deps, state=self.state)
r = SimActionData(self.state, SimActionData.TMP, SimActionData.WRITE, tmp=tmp, data=data_ao, size=content.length)
self.state.history.add_action(r)
self.state._inspect('tmp_write', BP_AFTER) | Stores a Claripy expression in a VEX temp value.
If in symbolic mode, this involves adding a constraint for the tmp's symbolic variable.
:param tmp: the number of the tmp
:param content: a Claripy expression of the content
:param reg_deps: the register dependencies of the content
:param tmp_deps: the temporary value dependencies of the content |
def create_group(groupname, gid, system=True):
"""
Creates a new user group with a specific id.
:param groupname: Group name.
:type groupname: unicode
:param gid: Group id.
:type gid: int or unicode
:param system: Creates a system group.
"""
sudo(addgroup(groupname, gid, system)) | Creates a new user group with a specific id.
:param groupname: Group name.
:type groupname: unicode
:param gid: Group id.
:type gid: int or unicode
:param system: Creates a system group. |
def chimera_elimination_order(m, n=None, t=None):
"""Provides a variable elimination order for a Chimera graph.
A graph defined by chimera_graph(m,n,t) has treewidth max(m,n)*t.
This function outputs a variable elimination order inducing a tree
decomposition of that width.
Parameters
----------
m : int
Number of rows in the Chimera lattice.
n : int (optional, default m)
Number of columns in the Chimera lattice.
t : int (optional, default 4)
Size of the shore within each Chimera tile.
Returns
-------
order : list
An elimination order that induces the treewidth of chimera_graph(m,n,t).
Examples
--------
>>> G = dnx.chimera_elimination_order(1, 1, 4) # a single Chimera tile
"""
if n is None:
n = m
if t is None:
t = 4
index_flip = m > n
if index_flip:
m, n = n, m
def chimeraI(m0, n0, k0, l0):
if index_flip:
return m*2*t*n0 + 2*t*m0 + t*(1-k0) + l0
else:
return n*2*t*m0 + 2*t*n0 + t*k0 + l0
order = []
for n_i in range(n):
for t_i in range(t):
for m_i in range(m):
order.append(chimeraI(m_i, n_i, 0, t_i))
for n_i in range(n):
for m_i in range(m):
for t_i in range(t):
order.append(chimeraI(m_i, n_i, 1, t_i))
return order | Provides a variable elimination order for a Chimera graph.
A graph defined by chimera_graph(m,n,t) has treewidth max(m,n)*t.
This function outputs a variable elimination order inducing a tree
decomposition of that width.
Parameters
----------
m : int
Number of rows in the Chimera lattice.
n : int (optional, default m)
Number of columns in the Chimera lattice.
t : int (optional, default 4)
Size of the shore within each Chimera tile.
Returns
-------
order : list
An elimination order that induces the treewidth of chimera_graph(m,n,t).
Examples
--------
>>> G = dnx.chimera_elimination_order(1, 1, 4) # a single Chimera tile |
def create_cli(create_app=None):
"""Create CLI for ``inveniomanage`` command.
:param create_app: Flask application factory.
:returns: Click command group.
.. versionadded: 1.0.0
"""
def create_cli_app(info):
"""Application factory for CLI app.
Internal function for creating the CLI. When invoked via
``inveniomanage`` FLASK_APP must be set.
"""
if create_app is None:
# Fallback to normal Flask behavior
info.create_app = None
app = info.load_app()
else:
app = create_app(debug=get_debug_flag())
return app
@click.group(cls=FlaskGroup, create_app=create_cli_app)
def cli(**params):
"""Command Line Interface for Invenio."""
pass
return cli | Create CLI for ``inveniomanage`` command.
:param create_app: Flask application factory.
:returns: Click command group.
.. versionadded: 1.0.0 |
def _startProductionCrewNode(self, name, attrs):
"""Process the start of a node under xtvd/productionCrew"""
if name == 'crew':
self._programId = attrs.get('program')
elif name == 'member':
self._role = None
self._givenname = None
self._surname = None | Process the start of a node under xtvd/productionCrew |
def validate_jsonschema_from_file(self, json_string, path_to_schema):
"""
Validate JSON according to schema, loaded from a file.
*Args:*\n
_json_string_ - JSON string;\n
_path_to_schema_ - path to file with JSON schema;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json |
"""
schema = open(path_to_schema).read()
load_input_json = self.string_to_json(json_string)
try:
load_schema = json.loads(schema)
except ValueError as e:
raise JsonValidatorError('Error in schema: {}'.format(e))
self._validate_json(load_input_json, load_schema) | Validate JSON according to schema, loaded from a file.
*Args:*\n
_json_string_ - JSON string;\n
_path_to_schema_ - path to file with JSON schema;
*Raises:*\n
JsonValidatorError
*Example:*\n
| *Settings* | *Value* |
| Library | JsonValidator |
| *Test Cases* | *Action* | *Argument* | *Argument* |
| Simple | Validate jsonschema from file | {"foo":bar} | ${CURDIR}${/}schema.json | |
def update_target(self, name, current, total):
"""Updates progress bar for a specified target."""
self.refresh(self._bar(name, current, total)) | Updates progress bar for a specified target. |
def calc_gs_kappa(b, ne, delta, sinth, nu):
"""Calculate the gyrosynchrotron absorption coefficient κ_ν.
This is Dulk (1985) equation 36, which is a fitting function assuming a
power-law electron population. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 7``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
The equation is valid for θ > 20° or ``sinth > 0.34`` or so.
nu
The frequency at which to calculate η, in Hz. The equation is valid for
``10 <~ nu/nu_b <~ 100``, which sets a limit on the ratio of ``nu`` and ``b``.
The return value is the absorption coefficient, in units of ``cm^-1``.
No complaints are raised if you attempt to use the equation outside of its
range of validity.
"""
s = nu / calc_nu_b(b)
return (ne / b *
1.4e-9 *
10**(-0.22 * delta) *
sinth**(-0.09 + 0.72 * delta) *
s**(-1.30 - 0.98 * delta)) | Calculate the gyrosynchrotron absorption coefficient κ_ν.
This is Dulk (1985) equation 36, which is a fitting function assuming a
power-law electron population. Arguments are:
b
Magnetic field strength in Gauss
ne
The density of electrons per cubic centimeter with energies greater than 10 keV.
delta
The power-law index defining the energy distribution of the electron population,
with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 7``.
sinth
The sine of the angle between the line of sight and the magnetic field direction.
The equation is valid for θ > 20° or ``sinth > 0.34`` or so.
nu
The frequency at which to calculate η, in Hz. The equation is valid for
``10 <~ nu/nu_b <~ 100``, which sets a limit on the ratio of ``nu`` and ``b``.
The return value is the absorption coefficient, in units of ``cm^-1``.
No complaints are raised if you attempt to use the equation outside of its
range of validity. |
def __search_ca_path(self):
"""
Get CA Path to check the validity of the server host certificate on the client side
"""
if "X509_CERT_DIR" in os.environ:
self._ca_path = os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
self._ca_path = '/etc/grid-security/certificates'
else:
raise ClientAuthException("Could not find a valid CA path") | Get CA Path to check the validity of the server host certificate on the client side |
def apply_thresholds(input, thresholds, choices):
"""
Return one of the choices depending on the input position compared to thresholds, for each input.
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0])
"""
condlist = [input <= threshold for threshold in thresholds]
if len(condlist) == len(choices) - 1:
# If a choice is provided for input > highest threshold, last condition must be true to return it.
condlist += [True]
assert len(condlist) == len(choices), \
"apply_thresholds must be called with the same number of thresholds than choices, or one more choice"
return np.select(condlist, choices) | Return one of the choices depending on the input position compared to thresholds, for each input.
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0]) |
def RgbToYiq(r, g, b):
'''Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % Color.RgbToYiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)'
'''
y = (r * 0.29895808) + (g * 0.58660979) + (b *0.11443213)
i = (r * 0.59590296) - (g * 0.27405705) - (b *0.32184591)
q = (r * 0.21133576) - (g * 0.52263517) + (b *0.31129940)
return (y, i, q) | Convert the color from RGB to YIQ.
Parameters:
:r:
The Red component value [0...1]
:g:
The Green component value [0...1]
:b:
The Blue component value [0...1]
Returns:
The color as an (y, i, q) tuple in the range:
y[0...1],
i[0...1],
q[0...1]
>>> '(%g, %g, %g)' % Color.RgbToYiq(1, 0.5, 0)
'(0.592263, 0.458874, -0.0499818)' |
def logprob(self, action_sample, pd_params):
""" Log-likelihood """
means = pd_params[:, :, 0]
log_std = pd_params[:, :, 1]
std = torch.exp(log_std)
z_score = (action_sample - means) / std
return - (0.5 * ((z_score**2 + self.LOG2PI).sum(dim=-1)) + log_std.sum(dim=-1)) | Log-likelihood |
def refresh_content(self, order=None, name=None):
"""
Re-download all submissions and reset the page index
"""
order = order or self.content.order
# Preserve the query if staying on the current page
if name is None:
query = self.content.query
else:
query = None
name = name or self.content.name
# Hack to allow an order specified in the name by prompt_subreddit() to
# override the current default
if order == 'ignore':
order = None
with self.term.loader('Refreshing page'):
self.content = SubredditContent.from_name(
self.reddit, name, self.term.loader, order=order, query=query)
if not self.term.loader.exception:
self.nav = Navigator(self.content.get) | Re-download all submissions and reset the page index |
def exec_context(self, **kwargs):
"""Base environment for evals, the stuff that is the same for all evals. Primarily used in the
Caster pipe"""
import inspect
import dateutil.parser
import datetime
import random
from functools import partial
from ambry.valuetype.types import parse_date, parse_time, parse_datetime
import ambry.valuetype.types
import ambry.valuetype.exceptions
import ambry.valuetype.test
import ambry.valuetype
def set_from(f, frm):
try:
try:
f.ambry_from = frm
except AttributeError: # for instance methods
f.im_func.ambry_from = frm
except (TypeError, AttributeError): # Builtins, non python code
pass
return f
test_env = dict(
parse_date=parse_date,
parse_time=parse_time,
parse_datetime=parse_datetime,
partial=partial,
bundle=self
)
test_env.update(kwargs)
test_env.update(dateutil.parser.__dict__)
test_env.update(datetime.__dict__)
test_env.update(random.__dict__)
test_env.update(ambry.valuetype.core.__dict__)
test_env.update(ambry.valuetype.types.__dict__)
test_env.update(ambry.valuetype.exceptions.__dict__)
test_env.update(ambry.valuetype.test.__dict__)
test_env.update(ambry.valuetype.__dict__)
localvars = {}
for f_name, func in test_env.items():
if not isinstance(func, (str, tuple)):
localvars[f_name] = set_from(func, 'env')
# The 'b' parameter of randint is assumed to be a bundle, but
# replacing it with a lambda prevents the param assignment
localvars['randint'] = lambda a, b: random.randint(a, b)
if self != Bundle:
# Functions from the bundle
base = set(inspect.getmembers(Bundle, predicate=inspect.isfunction))
mine = set(inspect.getmembers(self.__class__, predicate=inspect.isfunction))
localvars.update({f_name: set_from(func, 'bundle') for f_name, func in mine - base})
# Bound methods. In python 2, these must be called referenced from the bundle, since
# there is a difference between bound and unbound methods. In Python 3, there is no differnce,
# so the lambda functions may not be necessary.
base = set(inspect.getmembers(Bundle, predicate=inspect.ismethod))
mine = set(inspect.getmembers(self.__class__, predicate=inspect.ismethod))
# Functions are descriptors, and the __get__ call binds the function to its object to make a bound method
localvars.update({f_name: set_from(func.__get__(self), 'bundle') for f_name, func in (mine - base)})
# Bundle module functions
module_entries = inspect.getmembers(sys.modules['ambry.build'], predicate=inspect.isfunction)
localvars.update({f_name: set_from(func, 'module') for f_name, func in module_entries})
return localvars | Base environment for evals, the stuff that is the same for all evals. Primarily used in the
Caster pipe |
def genslices_ndim(ndim, shape):
"""Generate all possible slice tuples for 'shape'."""
iterables = [genslices(shape[n]) for n in range(ndim)]
yield from product(*iterables) | Generate all possible slice tuples for 'shape'. |
def _update_dates(self, **update_props):
"""
Update operation for ArcGIS Dates metadata
:see: gis_metadata.utils._complex_definitions[DATES]
"""
tree_to_update = update_props['tree_to_update']
xpath_root = self._data_map['_dates_root']
if self.dates:
date_type = self.dates[DATE_TYPE]
# First remove all date info from common root
remove_element(tree_to_update, xpath_root)
if date_type == DATE_TYPE_MULTIPLE:
xpath_root += '/TempExtent/TM_Instant'
elif date_type == DATE_TYPE_RANGE:
xpath_root += '/TempExtent/TM_Period'
return super(ArcGISParser, self)._update_dates(xpath_root, **update_props) | Update operation for ArcGIS Dates metadata
:see: gis_metadata.utils._complex_definitions[DATES] |
def start_update(self, draw=None, queues=None):
"""
Conduct the formerly registered updates
This method conducts the updates that have been registered via the
:meth:`update` method. You can call this method if the
:attr:`auto_update` attribute of this instance is True and the
`auto_update` parameter in the :meth:`update` method has been set to
False
Parameters
----------
%(InteractiveBase.start_update.parameters)s
Returns
-------
%(InteractiveBase.start_update.returns)s
See Also
--------
:attr:`no_auto_update`, update
"""
if queues is not None:
queues[0].get()
try:
for arr in self:
arr.psy.start_update(draw=False)
self.onupdate.emit()
except Exception:
self._finish_all(queues)
raise
if queues is not None:
queues[0].task_done()
return InteractiveBase.start_update(self, draw=draw, queues=queues) | Conduct the formerly registered updates
This method conducts the updates that have been registered via the
:meth:`update` method. You can call this method if the
:attr:`auto_update` attribute of this instance is True and the
`auto_update` parameter in the :meth:`update` method has been set to
False
Parameters
----------
%(InteractiveBase.start_update.parameters)s
Returns
-------
%(InteractiveBase.start_update.returns)s
See Also
--------
:attr:`no_auto_update`, update |
def calculate_subgraph_edge_overlap(
graph: BELGraph,
annotation: str = 'Subgraph'
) -> Tuple[
Mapping[str, EdgeSet],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, float]],
]:
"""Build a DatafFame to show the overlap between different sub-graphs.
Options:
1. Total number of edges overlap (intersection)
2. Percentage overlap (tanimoto similarity)
:param graph: A BEL graph
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},
{(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity},
"""
sg2edge = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
sg2edge[d[ANNOTATIONS][annotation]].add((u, v))
subgraph_intersection = defaultdict(dict)
subgraph_union = defaultdict(dict)
result = defaultdict(dict)
for sg1, sg2 in itt.product(sg2edge, repeat=2):
subgraph_intersection[sg1][sg2] = sg2edge[sg1] & sg2edge[sg2]
subgraph_union[sg1][sg2] = sg2edge[sg1] | sg2edge[sg2]
result[sg1][sg2] = len(subgraph_intersection[sg1][sg2]) / len(subgraph_union[sg1][sg2])
return sg2edge, subgraph_intersection, subgraph_union, result | Build a DatafFame to show the overlap between different sub-graphs.
Options:
1. Total number of edges overlap (intersection)
2. Percentage overlap (tanimoto similarity)
:param graph: A BEL graph
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},
{(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity}, |
def prox_gradf_lim(xy, step, boundary=None):
"""Forward-backward step: gradient, followed by projection"""
return prox_lim(prox_gradf(xy,step), step, boundary=boundary) | Forward-backward step: gradient, followed by projection |
def signature(self, name=None):
"""Return our function signature as a string.
By default this function uses the annotated name of the function
however if you need to override that with a custom name you can
pass name=<custom name>
Args:
name (str): Optional name to override the default name given
in the function signature.
Returns:
str: The formatted function signature
"""
self._ensure_loaded()
if name is None:
name = self.name
num_args = len(self.arg_names)
num_def = 0
if self.arg_defaults is not None:
num_def = len(self.arg_defaults)
num_no_def = num_args - num_def
args = []
for i in range(0, len(self.arg_names)):
typestr = ""
if self.arg_names[i] in self.annotated_params:
typestr = "{} ".format(self.annotated_params[self.arg_names[i]].type_name)
if i >= num_no_def:
default = str(self.arg_defaults[i-num_no_def])
if len(default) == 0:
default = "''"
args.append("{}{}={}".format(typestr, str(self.arg_names[i]), default))
else:
args.append(typestr + str(self.arg_names[i]))
return "{}({})".format(name, ", ".join(args)) | Return our function signature as a string.
By default this function uses the annotated name of the function
however if you need to override that with a custom name you can
pass name=<custom name>
Args:
name (str): Optional name to override the default name given
in the function signature.
Returns:
str: The formatted function signature |
def getstate(self):
"""
Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255
"""
# the jobstore never existed
if not os.path.exists(self.jobstorefile):
logging.info('Workflow ' + self.run_id + ': QUEUED')
return "QUEUED", -1
# completed earlier
if os.path.exists(self.statcompletefile):
logging.info('Workflow ' + self.run_id + ': COMPLETE')
return "COMPLETE", 0
# errored earlier
if os.path.exists(self.staterrorfile):
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
return "EXECUTOR_ERROR", 255
# the workflow is staged but has not run yet
if not os.path.exists(self.errfile):
logging.info('Workflow ' + self.run_id + ': INITIALIZING')
return "INITIALIZING", -1
# TODO: Query with "toil status"
completed = False
with open(self.errfile, 'r') as f:
for line in f:
if 'Traceback (most recent call last)' in line:
logging.info('Workflow ' + self.run_id + ': EXECUTOR_ERROR')
open(self.staterrorfile, 'a').close()
return "EXECUTOR_ERROR", 255
# run can complete successfully but fail to upload outputs to cloud buckets
# so save the completed status and make sure there was no error elsewhere
if 'Finished toil run successfully.' in line:
completed = True
if completed:
logging.info('Workflow ' + self.run_id + ': COMPLETE')
open(self.statcompletefile, 'a').close()
return "COMPLETE", 0
logging.info('Workflow ' + self.run_id + ': RUNNING')
return "RUNNING", -1 | Returns QUEUED, -1
INITIALIZING, -1
RUNNING, -1
COMPLETE, 0
or
EXECUTOR_ERROR, 255 |
def list_objects(self, path='', relative=False, first_level=False,
max_request_entries=None):
"""
List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict
"""
entries = 0
max_request_entries_arg = None
if not relative:
path = self.relpath(path)
# From root
if not path:
locators = self._list_locators()
# Yields locators
if first_level:
for locator in locators:
entries += 1
yield locator
if entries == max_request_entries:
return
return
# Yields each locator objects
for loc_path, loc_header in locators:
# Yields locator itself
loc_path = loc_path.strip('/')
entries += 1
yield loc_path, loc_header
if entries == max_request_entries:
return
# Yields locator content is read access to it
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
try:
for obj_path, obj_header in self._list_objects(
self.get_client_kwargs(loc_path), '',
max_request_entries_arg):
entries += 1
yield ('/'.join((loc_path, obj_path.lstrip('/'))),
obj_header)
if entries == max_request_entries:
return
except ObjectPermissionError:
# No read access to locator
continue
return
# From locator or sub directory
locator, path = self.split_locator(path)
if first_level:
seen = set()
if max_request_entries is not None:
max_request_entries_arg = max_request_entries - entries
for obj_path, header in self._list_objects(
self.get_client_kwargs(locator), path, max_request_entries_arg):
if path:
try:
obj_path = obj_path.split(path, 1)[1]
except IndexError:
# Not sub path of path
continue
obj_path = obj_path.lstrip('/')
# Skips parent directory
if not obj_path:
continue
# Yields first level locator objects only
if first_level:
# Directory
try:
obj_path, _ = obj_path.strip('/').split('/', 1)
obj_path += '/'
# Avoids to use the header of the object instead of the
# non existing header of the directory that only exists
# virtually in object path.
header = dict()
# File
except ValueError:
pass
if obj_path not in seen:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return
seen.add(obj_path)
# Yields locator objects
else:
entries += 1
yield obj_path, header
if entries == max_request_entries:
return | List objects.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
first_level (bool): It True, returns only first level objects.
Else, returns full tree.
max_request_entries (int): If specified, maximum entries returned
by request.
Returns:
generator of tuple: object name str, object header dict |
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand.
"""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header | Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand. |
def combine_sj_out(
fns,
external_db,
total_jxn_cov_cutoff=20,
define_sample_name=None,
verbose=False,
):
"""Combine SJ.out.tab files from STAR by filtering based on coverage and
comparing to an external annotation to discover novel junctions.
Parameters
----------
fns : list of strings
Filenames of SJ.out.tab files to combine.
external_db : str
Filename of splice junction information from external database. The file
should have a header and contained the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'.
total_jxn_cov_cutoff : int
Discard junctions with less than this many reads summed over all
samples.
define_sample_name : function
A function mapping the SJ.out.tab filenames to sample names.
Returns
-------
countDF : pandas.DataFrame
Number of unique junction spanning reads for each junction that passed
filtering criteria.
annotDF : pandas.DataFrame
Annotation information for junctions that passed filtering criteria.
stats : list of strings
Human readable statistics.
"""
if verbose:
import sys
# I'll start by figuring out which junctions we will keep.
counts = _total_jxn_counts(fns)
jxns = set(counts[counts >= total_jxn_cov_cutoff].index)
if verbose:
sys.stderr.write('Counting done\n')
stats = []
sj_outD = _make_sj_out_dict(fns, jxns=jxns,
define_sample_name=define_sample_name)
stats.append('Number of junctions in SJ.out file per sample')
for k in sj_outD.keys():
stats.append('{0}\t{1:,}'.format(k, sj_outD[k].shape[0]))
stats.append('')
if verbose:
sys.stderr.write('Dict done\n')
sj_outP, annotDF = _make_sj_out_panel(sj_outD, total_jxn_cov_cutoff)
stats.append('SJ.out panel size\t{0}'.format(sj_outP.shape))
stats.append('')
if verbose:
sys.stderr.write('Panel done\n')
extDF, ext_stats = read_external_annotation(external_db)
stats += ext_stats
stats.append('')
if verbose:
sys.stderr.write('DB read done\n')
countsDF, annotDF, filter_stats = _filter_jxns_donor_acceptor(sj_outP,
annotDF,
extDF)
if verbose:
sys.stderr.write('Filter done\n')
annotDF = _find_novel_donor_acceptor_dist(annotDF, extDF)
if verbose:
sys.stderr.write('Dist done\n')
stats += filter_stats
return countsDF, annotDF, stats | Combine SJ.out.tab files from STAR by filtering based on coverage and
comparing to an external annotation to discover novel junctions.
Parameters
----------
fns : list of strings
Filenames of SJ.out.tab files to combine.
external_db : str
Filename of splice junction information from external database. The file
should have a header and contained the following columns 'gene',
'chrom', 'start', 'end', 'strand', 'chrom:start', 'chrom:end', 'donor',
'acceptor', 'intron'.
total_jxn_cov_cutoff : int
Discard junctions with less than this many reads summed over all
samples.
define_sample_name : function
A function mapping the SJ.out.tab filenames to sample names.
Returns
-------
countDF : pandas.DataFrame
Number of unique junction spanning reads for each junction that passed
filtering criteria.
annotDF : pandas.DataFrame
Annotation information for junctions that passed filtering criteria.
stats : list of strings
Human readable statistics. |
def delete_branch(self, project, repository, name, end_point):
"""
Delete branch from related repo
:param self:
:param project:
:param repository:
:param name:
:param end_point:
:return:
"""
url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branches'.format(project=project,
repository=repository)
data = {"name": str(name), "endPoint": str(end_point)}
return self.delete(url, data=data) | Delete branch from related repo
:param self:
:param project:
:param repository:
:param name:
:param end_point:
:return: |
def rerun(version="3.7.0"):
"""
Rerun last example code block with specified version of python.
"""
from commandlib import Command
Command(DIR.gen.joinpath("py{0}".format(version), "bin", "python"))(
DIR.gen.joinpath("state", "examplepythoncode.py")
).in_dir(DIR.gen.joinpath("state")).run() | Rerun last example code block with specified version of python. |
def config_default(dest):
""" Create a default configuration file.
\b
DEST: Path or file name for the configuration file.
"""
conf_path = Path(dest).resolve()
if conf_path.is_dir():
conf_path = conf_path / LIGHTFLOW_CONFIG_NAME
conf_path.write_text(Config.default())
click.echo('Configuration written to {}'.format(conf_path)) | Create a default configuration file.
\b
DEST: Path or file name for the configuration file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.