code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _prepare_outputs(self, data_out, outputs):
""" Open a ROOT file with option 'RECREATE' to create a new file (the file will
be overwritten if it already exists), and using the ZLIB compression algorithm
(with compression level 1) for better compatibility with older ROOT versions
(see https://root.cern.ch/doc/v614/release-notes.html#important-notice ).
:param data_out:
:param outputs:
:return:
"""
compress = ROOTModule.ROOT.CompressionSettings(ROOTModule.ROOT.kZLIB, 1)
if isinstance(data_out, (str, unicode)):
self.file_emulation = True
outputs.append(ROOTModule.TFile.Open(data_out, 'RECREATE', '', compress))
# multiple tables - require directory
elif isinstance(data_out, ROOTModule.TFile):
outputs.append(data_out)
else: # assume it's a file like object
self.file_emulation = True
filename = os.path.join(tempfile.mkdtemp(),'tmp.root')
outputs.append(ROOTModule.TFile.Open(filename, 'RECREATE', '', compress))
|
Open a ROOT file with option 'RECREATE' to create a new file (the file will
be overwritten if it already exists), and using the ZLIB compression algorithm
(with compression level 1) for better compatibility with older ROOT versions
(see https://root.cern.ch/doc/v614/release-notes.html#important-notice ).
:param data_out:
:param outputs:
:return:
|
def _get_trendline(self,date0=None,date1=None,on=None,kind='trend',to_strfmt='%Y-%m-%d',from_strfmt='%d%b%y',**kwargs):
"""
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
ann_values=copy.deepcopy(get_annotation_kwargs())
ann_values.extend(['x','y'])
ann_kwargs=utils.check_kwargs(kwargs,ann_values,{},clean_origin=True)
def position(d0,d1):
return d0+(d1-d0)/2
date0=kwargs.pop('date',date0)
date0=date_tools.stringToString(date0,from_strfmt,to_strfmt) if '-' not in date0 else date0
if kind=='trend':
date1=date_tools.stringToString(date1,from_strfmt,to_strfmt) if '-' not in date1 else date1
on='close' if not on else on
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.ix[date0].values[0])
y1=kwargs.get('y1',df.ix[date1].values[0])
if kind in ('support','resistance'):
mode=kwargs.pop('mode','starttoend')
if not on:
on='low' if kind=='support' else 'high'
df=pd.DataFrame(self.df[self._d[on]])
y0=kwargs.get('y0',df.ix[date0].values[0])
y1=kwargs.get('y1',y0)
if mode=='starttoend':
date0=df.index[0]
date1=df.index[-1]
elif mode=='toend':
date1=df.index[-1]
elif mode=='fromstart':
date1=date0
date0=df.index[0]
if isinstance(date0,pd.Timestamp):
date0=date_tools.dateToString(date0,to_strfmt)
if isinstance(date1,pd.Timestamp):
date1=date_tools.dateToString(date1,to_strfmt)
d={'x0':date0,'x1':date1,'y0':y0,'y1':y1}
d.update(**kwargs)
shape=tools.get_shape(**d)
if ann_kwargs.get('text',False):
ann_kwargs['x']=ann_kwargs.get('x',date_tools.dateToString(position(date_tools.stringToDate(date0,to_strfmt),date_tools.stringToDate(date1,to_strfmt)),to_strfmt))
ann_kwargs['y']=ann_kwargs.get('y',position(shape['y0'],shape['y1']))
else:
ann_kwargs={}
return {'shape':shape,'annotation':ann_kwargs}
|
Returns a trendline (line), support or resistance
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
kind : string
Defines de kind of trendline
'trend'
'resistance'
'support'
mode : string
Defines how the support/resistance will
be drawn
'starttoened' : (x0,x1)
'fromstart' : (x0,date0)
'toend' : (date0,x1)
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
|
def remove_known_host(host, application_name, user=None):
"""Remove the entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
"""
log('Removing SSH known host entry for compute host at %s' % host)
cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
subprocess.check_call(cmd)
|
Remove the entry in known_hosts for host.
:param host: hostname to lookup in file.
:type host: str
:param application_name: Name of application eg nova-compute-something
:type application_name: str
:param user: The user that the ssh asserts are for.
:type user: str
|
def iter_elements(element_function, parent_to_parse, **kwargs):
"""
Applies element_function to each of the sub-elements in parent_to_parse.
The passed in function must take at least one element, and an optional
list of kwargs which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs)
"""
parent = get_element(parent_to_parse)
if not hasattr(element_function, '__call__'):
return parent
for child in ([] if parent is None else parent):
element_function(child, **kwargs)
return parent
|
Applies element_function to each of the sub-elements in parent_to_parse.
The passed in function must take at least one element, and an optional
list of kwargs which are relevant to each of the elements in the list:
def elem_func(each_elem, **kwargs)
|
def top(self):
"""
list of processes in a running container
:return: None or list of dicts
"""
# let's get resources from .stats()
ps_args = "-eo pid,ppid,wchan,args"
# returns {"Processes": [values], "Titles": [values]}
# it's easier to play with list of dicts: [{"pid": 1, "ppid": 0}]
try:
response = self.d.top(self.container_id, ps_args=ps_args)
except docker.errors.APIError as ex:
logger.warning("error getting processes: %r", ex)
return []
# TODO: sort?
logger.debug(json.dumps(response, indent=2))
return [dict(zip(response["Titles"], process))
for process in response["Processes"] or []]
|
list of processes in a running container
:return: None or list of dicts
|
def after_reinstate(analysis_request):
"""Method triggered after a 'reinstate' transition for the Analysis Request
passed in is performed. Sets its status to the last status before it was
cancelled. Reinstates the descendant partitions and all the analyses
associated to the analysis request as well.
"""
do_action_to_descendants(analysis_request, "reinstate")
do_action_to_analyses(analysis_request, "reinstate")
# Force the transition to previous state before the request was cancelled
prev_status = get_prev_status_from_history(analysis_request, "cancelled")
changeWorkflowState(analysis_request, AR_WORKFLOW_ID, prev_status,
action="reinstate")
analysis_request.reindexObject()
|
Method triggered after a 'reinstate' transition for the Analysis Request
passed in is performed. Sets its status to the last status before it was
cancelled. Reinstates the descendant partitions and all the analyses
associated to the analysis request as well.
|
def _additive_estimate(events, timeline, _additive_f, _additive_var, reverse):
"""
Called to compute the Kaplan Meier and Nelson-Aalen estimates.
"""
if reverse:
events = events.sort_index(ascending=False)
at_risk = events["entrance"].sum() - events["removed"].cumsum().shift(1).fillna(0)
deaths = events["observed"]
estimate_ = np.cumsum(_additive_f(at_risk, deaths)).sort_index().shift(-1).fillna(0)
var_ = np.cumsum(_additive_var(at_risk, deaths)).sort_index().shift(-1).fillna(0)
else:
deaths = events["observed"]
# Why subtract entrants like this? see https://github.com/CamDavidsonPilon/lifelines/issues/497
# specifically, we kill people, compute the ratio, and then "add" the entrants. This means that
# the population should not have the late entrants. The only exception to this rule
# is the first period, where entrants happen _prior_ to deaths.
entrances = events["entrance"].copy()
entrances.iloc[0] = 0
population = events["at_risk"] - entrances
estimate_ = np.cumsum(_additive_f(population, deaths))
var_ = np.cumsum(_additive_var(population, deaths))
timeline = sorted(timeline)
estimate_ = estimate_.reindex(timeline, method="pad").fillna(0)
var_ = var_.reindex(timeline, method="pad")
var_.index.name = "timeline"
estimate_.index.name = "timeline"
return estimate_, var_
|
Called to compute the Kaplan Meier and Nelson-Aalen estimates.
|
def adjust(self):
""" If one of the transformations is not defined it is expected to
be the mirror image of the other.
"""
if self._fro is None and self._to is not None:
self._fro = dict(
[(value.lower(), key) for key, value in self._to.items()])
if self._to is None and self._fro is not None:
self._to = dict(
[(value.lower(), key) for key, value in self._fro.items()])
|
If one of the transformations is not defined it is expected to
be the mirror image of the other.
|
def get(self, callback):
'''
Gets an item based on the path.
'''
derived_path = self.context.request.url
logger.debug('[{log_prefix}]: get.derived_path: {path}'.format(
log_prefix=LOG_PREFIX, path=derived_path))
callback(self.storage.get(self.result_key_for(derived_path)))
|
Gets an item based on the path.
|
def get_ordering(self):
"""!
@brief Returns clustering ordering information about the input data set.
@details Clustering ordering of data-set contains the information about the internal clustering structure in line with connectivity radius.
@return (ordering_analyser) Analyser of clustering ordering.
@see process()
@see get_clusters()
@see get_noise()
@see get_radius()
@see get_optics_objects()
"""
if self.__ordering is None:
self.__ordering = []
for cluster in self.__clusters:
for index_object in cluster:
optics_object = self.__optics_objects[index_object]
if optics_object.reachability_distance is not None:
self.__ordering.append(optics_object.reachability_distance)
return self.__ordering
|
!
@brief Returns clustering ordering information about the input data set.
@details Clustering ordering of data-set contains the information about the internal clustering structure in line with connectivity radius.
@return (ordering_analyser) Analyser of clustering ordering.
@see process()
@see get_clusters()
@see get_noise()
@see get_radius()
@see get_optics_objects()
|
def process_remove_action(processors, action, argument):
"""Process action removals."""
for processor in processors:
processor(action, argument)
db.session.commit()
|
Process action removals.
|
def layer_uri(self, layer_name):
"""Get layer URI.
For a vector layer :
/path/to/the/geopackage.gpkg|layername=my_vector_layer
For a raster :
GPKG:/path/to/the/geopackage.gpkg:my_raster_layer
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
"""
for layer in self._vector_layers():
if layer == layer_name:
uri = '{}|layername={}'.format(
self.uri.absoluteFilePath(), layer_name)
return uri
else:
for layer in self._raster_layers():
if layer == layer_name:
uri = 'GPKG:{}:{}'.format(
self.uri.absoluteFilePath(), layer_name)
return uri
else:
return None
|
Get layer URI.
For a vector layer :
/path/to/the/geopackage.gpkg|layername=my_vector_layer
For a raster :
GPKG:/path/to/the/geopackage.gpkg:my_raster_layer
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
|
def _get_collection(self, collection_uri, request_headers=None):
"""Generator function that returns collection members."""
# get the collection
status, headers, thecollection = self._rest_get(collection_uri)
if status != 200:
msg = self._get_extended_error(thecollection)
raise exception.IloError(msg)
while status < 300:
# verify expected type
# Don't limit to version 0 here as we will rev to 1.0 at some
# point hopefully with minimal changes
ctype = self._get_type(thecollection)
if (ctype not in ['Collection.0', 'Collection.1']):
raise exception.IloError("collection not found")
# if this collection has inline items, return those
# NOTE: Collections are very flexible in how the represent
# members. They can be inline in the collection as members
# of the 'Items' array, or they may be href links in the
# links/Members array. The could actually be both. Typically,
# iLO implements the inline (Items) for only when the collection
# is read only. We have to render it with the href links when an
# array contains PATCHable items because its complex to PATCH
# inline collection members.
if 'Items' in thecollection:
# iterate items
for item in thecollection['Items']:
# if the item has a self uri pointer,
# supply that for convenience.
memberuri = None
if 'links' in item and 'self' in item['links']:
memberuri = item['links']['self']['href']
yield 200, None, item, memberuri
# else walk the member links
elif ('links' in thecollection and
'Member' in thecollection['links']):
# iterate members
for memberuri in thecollection['links']['Member']:
# for each member return the resource indicated by the
# member link
status, headers, member = self._rest_get(memberuri['href'])
yield status, headers, member, memberuri['href']
# page forward if there are more pages in the collection
if ('links' in thecollection and
'NextPage' in thecollection['links']):
next_link_uri = (collection_uri + '?page=' + str(
thecollection['links']['NextPage']['page']))
status, headers, thecollection = self._rest_get(next_link_uri)
# else we are finished iterating the collection
else:
break
|
Generator function that returns collection members.
|
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_('Only `SELECT` statements are allowed against this database'))
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(_(
'Only `SELECT` statements can be used with the CREATE TABLE '
'feature.'))
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
with stats_timing('sqllab.query.time_executing_query', stats_logger):
logging.info('Running query: \n{}'.format(sql))
db_engine_spec.execute(cursor, sql, async_=True)
logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing('sqllab.query.time_fetching_results', stats_logger):
logging.debug('Fetching data for query object: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug('Fetching cursor description')
cursor_description = cursor.description
return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
|
Executes a single SQL statement
|
def parse_library(lib_files):
"""
Analizuje pliki podane w liście lib_files
Zwraca instancję MusicLibrary
"""
tracks, playlists = lib_files
lib = MusicLibrary()
lib_length = len(tracks)
i = 0
writer = lib.ix.writer()
previous_procent_done_str = ""
for f in tracks:
track_info = TrackInfo(f)
lib.add_track_internal(track_info, writer)
current_percent_done_str = "%d%%" % (i / lib_length * 100)
if current_percent_done_str != previous_procent_done_str:
logs.print_info("Analizowanie biblioteki muzycznej... " + current_percent_done_str)
previous_procent_done_str = current_percent_done_str
i += 1.0
logs.print_info("Analizowanie playlist...")
for f in playlists:
with open(f, 'r') as fo:
playlist_dict = loads(fo.read())
playlist = Playlist(lib, f, playlist_dict['title'], playlist_dict['tracks'])
lib.add_playlist(playlist)
writer.commit()
logs.print_info("Optymalizacja index-u...")
lib.ix.optimize()
return lib
|
Analizuje pliki podane w liście lib_files
Zwraca instancję MusicLibrary
|
def retrieve(self, filter_expression=None,
order_expression=None, slice_key=None):
"""
Retrieve entities from this cache, possibly after filtering, ordering
and slicing.
"""
ents = iter(self.__entities)
if not filter_expression is None:
ents = filter_expression(ents)
if not order_expression is None:
# Ordering always involves a copy and conversion to a list, so
# we have to wrap in an iterator.
ents = iter(order_expression(ents))
if not slice_key is None:
ents = islice(ents, slice_key.start, slice_key.stop)
return ents
|
Retrieve entities from this cache, possibly after filtering, ordering
and slicing.
|
def setup(self,
hunt_id,
reason, grr_server_url, grr_username, grr_password, approvers=None,
verify=True):
"""Initializes a GRR Hunt file collector.
Args:
hunt_id: Hunt ID to download results from.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
"""
super(GRRHuntDownloader, self).setup(
reason, grr_server_url, grr_username, grr_password,
approvers=approvers, verify=verify)
self.hunt_id = hunt_id
self.output_path = tempfile.mkdtemp()
|
Initializes a GRR Hunt file collector.
Args:
hunt_id: Hunt ID to download results from.
reason: justification for GRR access.
grr_server_url: GRR server URL.
grr_username: GRR username.
grr_password: GRR password.
approvers: comma-separated list of GRR approval recipients.
verify: boolean, whether to verify the GRR server's x509 certificate.
|
def get_feature(self, croplayer_id, cropfeature_id):
"""
Gets a crop feature
:param int croplayer_id: ID of a cropping layer
:param int cropfeature_id: ID of a cropping feature
:rtype: CropFeature
"""
target_url = self.client.get_url('CROPFEATURE', 'GET', 'single', {'croplayer_id': croplayer_id, 'cropfeature_id': cropfeature_id})
return self.client.get_manager(CropFeature)._get(target_url)
|
Gets a crop feature
:param int croplayer_id: ID of a cropping layer
:param int cropfeature_id: ID of a cropping feature
:rtype: CropFeature
|
def Network_emulateNetworkConditions(self, offline, latency,
downloadThroughput, uploadThroughput, **kwargs):
"""
Function path: Network.emulateNetworkConditions
Domain: Network
Method name: emulateNetworkConditions
Parameters:
Required arguments:
'offline' (type: boolean) -> True to emulate internet disconnection.
'latency' (type: number) -> Minimum latency from request sent to response headers received (ms).
'downloadThroughput' (type: number) -> Maximal aggregated download throughput (bytes/sec). -1 disables download throttling.
'uploadThroughput' (type: number) -> Maximal aggregated upload throughput (bytes/sec). -1 disables upload throttling.
Optional arguments:
'connectionType' (type: ConnectionType) -> Connection type if known.
No return value.
Description: Activates emulation of network conditions.
"""
assert isinstance(offline, (bool,)
), "Argument 'offline' must be of type '['bool']'. Received type: '%s'" % type(
offline)
assert isinstance(latency, (float, int)
), "Argument 'latency' must be of type '['float', 'int']'. Received type: '%s'" % type(
latency)
assert isinstance(downloadThroughput, (float, int)
), "Argument 'downloadThroughput' must be of type '['float', 'int']'. Received type: '%s'" % type(
downloadThroughput)
assert isinstance(uploadThroughput, (float, int)
), "Argument 'uploadThroughput' must be of type '['float', 'int']'. Received type: '%s'" % type(
uploadThroughput)
expected = ['connectionType']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['connectionType']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Network.emulateNetworkConditions',
offline=offline, latency=latency, downloadThroughput=
downloadThroughput, uploadThroughput=uploadThroughput, **kwargs)
return subdom_funcs
|
Function path: Network.emulateNetworkConditions
Domain: Network
Method name: emulateNetworkConditions
Parameters:
Required arguments:
'offline' (type: boolean) -> True to emulate internet disconnection.
'latency' (type: number) -> Minimum latency from request sent to response headers received (ms).
'downloadThroughput' (type: number) -> Maximal aggregated download throughput (bytes/sec). -1 disables download throttling.
'uploadThroughput' (type: number) -> Maximal aggregated upload throughput (bytes/sec). -1 disables upload throttling.
Optional arguments:
'connectionType' (type: ConnectionType) -> Connection type if known.
No return value.
Description: Activates emulation of network conditions.
|
def set_relay_on(self):
"""Turn the relay on."""
if not self.get_relay_state():
try:
request = requests.get(
'{}/relay'.format(self.resource), params={'state': '1'},
timeout=self.timeout)
if request.status_code == 200:
self.data['relay'] = True
except requests.exceptions.ConnectionError:
raise exceptions.MyStromConnectionError()
|
Turn the relay on.
|
def dispatch_shell(self, stream, msg):
"""dispatch shell requests"""
# flush control requests first
if self.control_stream:
self.control_stream.flush()
idents,msg = self.session.feed_identities(msg, copy=False)
try:
msg = self.session.unserialize(msg, content=True, copy=False)
except:
self.log.error("Invalid Message", exc_info=True)
return
header = msg['header']
msg_id = header['msg_id']
msg_type = msg['header']['msg_type']
# Print some info about this message and leave a '--->' marker, so it's
# easier to trace visually the message chain when debugging. Each
# handler prints its message at the end.
self.log.debug('\n*** MESSAGE TYPE:%s***', msg_type)
self.log.debug(' Content: %s\n --->\n ', msg['content'])
if msg_id in self.aborted:
self.aborted.remove(msg_id)
# is it safe to assume a msg_id will not be resubmitted?
reply_type = msg_type.split('_')[0] + '_reply'
status = {'status' : 'aborted'}
sub = {'engine' : self.ident}
sub.update(status)
reply_msg = self.session.send(stream, reply_type, subheader=sub,
content=status, parent=msg, ident=idents)
return
handler = self.shell_handlers.get(msg_type, None)
if handler is None:
self.log.error("UNKNOWN MESSAGE TYPE: %r", msg_type)
else:
# ensure default_int_handler during handler call
sig = signal(SIGINT, default_int_handler)
try:
handler(stream, idents, msg)
except Exception:
self.log.error("Exception in message handler:", exc_info=True)
finally:
signal(SIGINT, sig)
|
dispatch shell requests
|
def analysis_question_extractor(impact_report, component_metadata):
"""Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
"""
multi_exposure = impact_report.multi_exposure_impact_function
if multi_exposure:
return multi_exposure_analysis_question_extractor(
impact_report, component_metadata)
context = {}
extra_args = component_metadata.extra_args
provenance = impact_report.impact_function.provenance
header = resolve_from_dictionary(extra_args, 'header')
analysis_question = provenance['analysis_question']
context['component_key'] = component_metadata.key
context['header'] = header
context['analysis_questions'] = [analysis_question]
return context
|
Extracting analysis question from the impact layer.
:param impact_report: the impact report that acts as a proxy to fetch
all the data that extractor needed
:type impact_report: safe.report.impact_report.ImpactReport
:param component_metadata: the component metadata. Used to obtain
information about the component we want to render
:type component_metadata: safe.report.report_metadata.
ReportComponentsMetadata
:return: context for rendering phase
:rtype: dict
.. versionadded:: 4.0
|
def searchForMessages(self, query, offset=0, limit=5, thread_id=None):
"""
Find and get :class:`models.Message` objects by query
.. warning::
This method sends request for every found message ID.
:param query: Text to search for
:param offset: Number of messages to skip
:param limit: Max. number of messages to retrieve
:param thread_id: User/Group ID to search in. See :ref:`intro_threads`
:type offset: int
:type limit: int
:return: Found :class:`models.Message` objects
:rtype: generator
:raises: FBchatException if request failed
"""
message_ids = self.searchForMessageIDs(
query, offset=offset, limit=limit, thread_id=thread_id
)
for mid in message_ids:
yield self.fetchMessageInfo(mid, thread_id)
|
Find and get :class:`models.Message` objects by query
.. warning::
This method sends request for every found message ID.
:param query: Text to search for
:param offset: Number of messages to skip
:param limit: Max. number of messages to retrieve
:param thread_id: User/Group ID to search in. See :ref:`intro_threads`
:type offset: int
:type limit: int
:return: Found :class:`models.Message` objects
:rtype: generator
:raises: FBchatException if request failed
|
def ws050(self, value=None):
""" Corresponds to IDD Field `ws050`
Wind speed corresponding 5.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws050`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError('value {} need to be of type float '
'for field `ws050`'.format(value))
self._ws050 = value
|
Corresponds to IDD Field `ws050`
Wind speed corresponding 5.0% annual cumulative frequency of occurrence
Args:
value (float): value for IDD Field `ws050`
Unit: m/s
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
|
def get_role_id(self, role_name, mount_point='approle'):
"""GET /auth/<mount_point>/role/<role name>/role-id
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:return:
:rtype:
"""
url = '/v1/auth/{0}/role/{1}/role-id'.format(mount_point, role_name)
return self._adapter.get(url).json()['data']['role_id']
|
GET /auth/<mount_point>/role/<role name>/role-id
:param role_name:
:type role_name:
:param mount_point:
:type mount_point:
:return:
:rtype:
|
def interface_type(self, ift):
"""
Set the CoRE Link Format if attribute of the resource.
:param ift: the CoRE Link Format if attribute
"""
if not isinstance(ift, str):
ift = str(ift)
self._attributes["if"] = ift
|
Set the CoRE Link Format if attribute of the resource.
:param ift: the CoRE Link Format if attribute
|
def remove_lines(lines, remove=('[[back to top]', '<a class="mk-toclify"')):
"""Removes existing [back to top] links and <a id> tags."""
if not remove:
return lines[:]
out = []
for l in lines:
if l.startswith(remove):
continue
out.append(l)
return out
|
Removes existing [back to top] links and <a id> tags.
|
def _help():
""" Display both SQLAlchemy and Python help statements """
statement = '%s%s' % (shelp, phelp % ', '.join(cntx_.keys()))
print statement.strip()
|
Display both SQLAlchemy and Python help statements
|
def define_property(obj, name, fget=None, fset=None, fdel=None, doc=None):
"""Defines a @property dynamically for an instance rather than a class."""
if hasattr(fget, '__get__'): # can pass a property declaration too
prop = fget
else:
prop = property(fget, fset, fdel, doc)
cls = obj.__class__
obj.__class__ = type(cls.__name__, (cls, ), {
'__doc__': cls.__doc__,
name: prop
})
|
Defines a @property dynamically for an instance rather than a class.
|
def sign(pkey, data, digest):
"""
Sign a data string using the given key and message digest.
:param pkey: PKey to sign with
:param data: data to be signed
:param digest: message digest to use
:return: signature
.. versionadded:: 0.11
"""
data = _text_to_bytes_and_warn("data", data)
digest_obj = _lib.EVP_get_digestbyname(_byte_string(digest))
if digest_obj == _ffi.NULL:
raise ValueError("No such digest method")
md_ctx = _lib.Cryptography_EVP_MD_CTX_new()
md_ctx = _ffi.gc(md_ctx, _lib.Cryptography_EVP_MD_CTX_free)
_lib.EVP_SignInit(md_ctx, digest_obj)
_lib.EVP_SignUpdate(md_ctx, data, len(data))
length = _lib.EVP_PKEY_size(pkey._pkey)
_openssl_assert(length > 0)
signature_buffer = _ffi.new("unsigned char[]", length)
signature_length = _ffi.new("unsigned int *")
final_result = _lib.EVP_SignFinal(
md_ctx, signature_buffer, signature_length, pkey._pkey)
_openssl_assert(final_result == 1)
return _ffi.buffer(signature_buffer, signature_length[0])[:]
|
Sign a data string using the given key and message digest.
:param pkey: PKey to sign with
:param data: data to be signed
:param digest: message digest to use
:return: signature
.. versionadded:: 0.11
|
def analyse(self, traj, network, current_subrun, subrun_list, network_dict):
"""Calculates average Fano Factor of a network.
:param traj:
Trajectory container
Expects:
`results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons
Adds:
`results.statistics.mean_fano_factor`: Average Fano Factor
:param network:
The BRIAN network
:param current_subrun:
BrianParameter
:param subrun_list:
Upcoming subruns, analysis is only performed if subruns is empty,
aka the final subrun has finished.
:param network_dict:
Dictionary of items shared among componetns
"""
#Check if we finished all subruns
if len(subrun_list)==0:
spikes_e = traj.results.monitors.spikes_e
time_window = traj.parameters.analysis.statistics.time_window
start_time = traj.parameters.simulation.durations.initial_run
end_time = start_time+traj.parameters.simulation.durations.measurement_run
neuron_ids = traj.parameters.analysis.statistics.neuron_ids
mean_ff = self._compute_mean_fano_factor(
neuron_ids, spikes_e, time_window, start_time, end_time)
traj.f_add_result('statistics.mean_fano_factor', mean_ff, comment='Average Fano '
'Factor over all '
'exc neurons')
print('R_ee: %f, Mean FF: %f' % (traj.R_ee, mean_ff))
|
Calculates average Fano Factor of a network.
:param traj:
Trajectory container
Expects:
`results.monitors.spikes_e`: Data from SpikeMonitor for excitatory neurons
Adds:
`results.statistics.mean_fano_factor`: Average Fano Factor
:param network:
The BRIAN network
:param current_subrun:
BrianParameter
:param subrun_list:
Upcoming subruns, analysis is only performed if subruns is empty,
aka the final subrun has finished.
:param network_dict:
Dictionary of items shared among componetns
|
def add_constraints(self):
"""
Set the base constraints on the relation query.
:rtype: None
"""
if self._constraints:
foreign_key = getattr(self._parent, self._foreign_key, None)
if foreign_key is None:
self._query = None
else:
table = self._related.get_table()
self._query.where(
"{}.{}".format(table, self._other_key), "=", foreign_key
)
|
Set the base constraints on the relation query.
:rtype: None
|
def multiplySeries(requestContext, *seriesLists):
"""
Takes two or more series and multiplies their points. A constant may not be
used. To multiply by a constant, use the scale() function.
Example::
&target=multiplySeries(Series.dividends,Series.divisors)
"""
if not seriesLists or not any(seriesLists):
return []
seriesList, start, end, step = normalize(seriesLists)
if len(seriesList) == 1:
return seriesList
name = "multiplySeries(%s)" % ','.join([s.name for s in seriesList])
product = map(lambda x: safeMul(*x), zip_longest(*seriesList))
resultSeries = TimeSeries(name, start, end, step, product)
resultSeries.pathExpression = name
return [resultSeries]
|
Takes two or more series and multiplies their points. A constant may not be
used. To multiply by a constant, use the scale() function.
Example::
&target=multiplySeries(Series.dividends,Series.divisors)
|
def update_cluster(cluster_dict, datacenter=None, cluster=None,
service_instance=None):
'''
Updates a cluster.
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.update_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.update_cluster cluster_dict=$cluster_dict
'''
# Validate cluster dictionary
schema = ESXClusterConfigSchema.serialize()
try:
jsonschema.validate(cluster_dict, schema)
except jsonschema.exceptions.ValidationError as exc:
raise InvalidConfigError(exc)
# Get required details from the proxy
proxy_type = get_proxy_type()
if proxy_type == 'esxdatacenter':
datacenter = __salt__['esxdatacenter.get_details']()['datacenter']
dc_ref = _get_proxy_target(service_instance)
if not cluster:
raise ArgumentValueError('\'cluster\' needs to be specified')
elif proxy_type == 'esxcluster':
datacenter = __salt__['esxcluster.get_details']()['datacenter']
dc_ref = salt.utils.vmware.get_datacenter(service_instance, datacenter)
cluster = __salt__['esxcluster.get_details']()['cluster']
if cluster_dict.get('vsan') and not \
salt.utils.vsan.vsan_supported(service_instance):
raise VMwareApiError('VSAN operations are not supported')
cluster_ref = salt.utils.vmware.get_cluster(dc_ref, cluster)
cluster_spec = vim.ClusterConfigSpecEx()
props = salt.utils.vmware.get_properties_of_managed_object(
cluster_ref, properties=['configurationEx'])
# Copy elements we want to update to spec
for p in ['dasConfig', 'drsConfig']:
setattr(cluster_spec, p, getattr(props['configurationEx'], p))
if props['configurationEx'].vsanConfigInfo:
cluster_spec.vsanConfig = props['configurationEx'].vsanConfigInfo
vsan_spec = None
vsan_61 = None
if cluster_dict.get('vsan'):
# XXX The correct way of retrieving the VSAN data (on the if branch)
# is not supported before 60u2 vcenter
vcenter_info = salt.utils.vmware.get_service_info(service_instance)
if float(vcenter_info.apiVersion) >= 6.0 and \
int(vcenter_info.build) >= 3634794: # 60u2
vsan_61 = False
vsan_info = salt.utils.vsan.get_cluster_vsan_info(cluster_ref)
vsan_spec = vim.vsan.ReconfigSpec(modify=True)
# Only interested in the vsanClusterConfig and the
# dataEfficiencyConfig
# vsan_spec.vsanClusterConfig = vsan_info
vsan_spec.dataEfficiencyConfig = vsan_info.dataEfficiencyConfig
vsan_info.dataEfficiencyConfig = None
else:
vsan_61 = True
_apply_cluster_dict(cluster_spec, cluster_dict, vsan_spec, vsan_61)
# We try to reconfigure vsan first as it fails if HA is enabled so the
# command will abort not having any side-effects
# also if HA was previously disabled it can be enabled automatically if
# desired
if vsan_spec:
log.trace('vsan_spec = %s', vsan_spec)
salt.utils.vsan.reconfigure_cluster_vsan(cluster_ref, vsan_spec)
# We need to retrieve again the properties and reapply them
# As the VSAN configuration has changed
cluster_spec = vim.ClusterConfigSpecEx()
props = salt.utils.vmware.get_properties_of_managed_object(
cluster_ref, properties=['configurationEx'])
# Copy elements we want to update to spec
for p in ['dasConfig', 'drsConfig']:
setattr(cluster_spec, p, getattr(props['configurationEx'], p))
if props['configurationEx'].vsanConfigInfo:
cluster_spec.vsanConfig = props['configurationEx'].vsanConfigInfo
# We only need to configure the cluster_spec, as if it were a vsan_61
# cluster
_apply_cluster_dict(cluster_spec, cluster_dict)
salt.utils.vmware.update_cluster(cluster_ref, cluster_spec)
return {'update_cluster': True}
|
Updates a cluster.
config_dict
Dictionary with the config values of the new cluster.
datacenter
Name of datacenter containing the cluster.
Ignored if already contained by proxy details.
Default value is None.
cluster
Name of cluster.
Ignored if already contained by proxy details.
Default value is None.
service_instance
Service instance (vim.ServiceInstance) of the vCenter.
Default is None.
.. code-block:: bash
# esxdatacenter proxy
salt '*' vsphere.update_cluster cluster_dict=$cluster_dict cluster=cl1
# esxcluster proxy
salt '*' vsphere.update_cluster cluster_dict=$cluster_dict
|
def from_payload(self, payload):
"""Init frame from binary data."""
self.session_id = payload[0]*256 + payload[1]
self.status = CommandSendConfirmationStatus(payload[2])
|
Init frame from binary data.
|
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.stop()
# 等待事件处理线程退出
self.__thread.join()
|
停止引擎
|
def shell(cmd, output=None, mode='w', cwd=None, shell=False):
"""Execute a shell command.
You can add a shell command::
server.watch(
'style.less', shell('lessc style.less', output='style.css')
)
:param cmd: a shell command, string or list
:param output: output stdout to the given file
:param mode: only works with output, mode ``w`` means write,
mode ``a`` means append
:param cwd: set working directory before command is executed.
:param shell: if true, on Unix the executable argument specifies a
replacement shell for the default ``/bin/sh``.
"""
if not output:
output = os.devnull
else:
folder = os.path.dirname(output)
if folder and not os.path.isdir(folder):
os.makedirs(folder)
if not isinstance(cmd, (list, tuple)) and not shell:
cmd = shlex.split(cmd)
def run_shell():
try:
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd,
shell=shell)
except OSError as e:
logger.error(e)
if e.errno == os.errno.ENOENT: # file (command) not found
logger.error("maybe you haven't installed %s", cmd[0])
return e
stdout, stderr = p.communicate()
if stderr:
logger.error(stderr)
return stderr
#: stdout is bytes, decode for python3
if PY3:
stdout = stdout.decode()
with open(output, mode) as f:
f.write(stdout)
return run_shell
|
Execute a shell command.
You can add a shell command::
server.watch(
'style.less', shell('lessc style.less', output='style.css')
)
:param cmd: a shell command, string or list
:param output: output stdout to the given file
:param mode: only works with output, mode ``w`` means write,
mode ``a`` means append
:param cwd: set working directory before command is executed.
:param shell: if true, on Unix the executable argument specifies a
replacement shell for the default ``/bin/sh``.
|
def _do_setup_step(self, play):
''' get facts from the remote system '''
host_list = self._list_available_hosts(play.hosts)
if play.gather_facts is False:
return {}
elif play.gather_facts is None:
host_list = [h for h in host_list if h not in self.SETUP_CACHE or 'module_setup' not in self.SETUP_CACHE[h]]
if len(host_list) == 0:
return {}
self.callbacks.on_setup()
self.inventory.restrict_to(host_list)
# push any variables down to the system
setup_results = cirruscluster.ext.ansible.runner.Runner(
pattern=play.hosts, module_name='setup', module_args={}, inventory=self.inventory,
forks=self.forks, module_path=self.module_path, timeout=self.timeout, remote_user=play.remote_user,
remote_pass=self.remote_pass, remote_port=play.remote_port, private_key_file=self.private_key_file,
private_key=self.private_key,
setup_cache=self.SETUP_CACHE, callbacks=self.runner_callbacks, sudo=play.sudo, sudo_user=play.sudo_user,
transport=play.transport, sudo_pass=self.sudo_pass, is_playbook=True, module_vars=play.vars,
).run()
self.stats.compute(setup_results, setup=True)
self.inventory.lift_restriction()
# now for each result, load into the setup cache so we can
# let runner template out future commands
setup_ok = setup_results.get('contacted', {})
for (host, result) in setup_ok.iteritems():
self.SETUP_CACHE[host].update({'module_setup': True})
self.SETUP_CACHE[host].update(result.get('ansible_facts', {}))
return setup_results
|
get facts from the remote system
|
def bilateral2(data, fSize, sigma_p, sigma_x = 10.):
"""bilateral filter """
dtype = data.dtype.type
dtypes_kernels = {np.float32:"bilat2_float",
np.uint16:"bilat2_short"}
if not dtype in dtypes_kernels:
logger.info("data type %s not supported yet (%s), casting to float:"%(dtype,list(dtypes_kernels.keys())))
data = data.astype(np.float32)
dtype = data.dtype.type
img = OCLImage.from_array(data)
res = OCLArray.empty_like(data)
prog = OCLProgram(abspath("kernels/bilateral2.cl"))
prog.run_kernel(dtypes_kernels[dtype],
img.shape,None,
img,res.data,
np.int32(img.shape[0]),np.int32(img.shape[1]),
np.int32(fSize),np.float32(sigma_x),np.float32(sigma_p))
return res.get()
|
bilateral filter
|
def tunnel(self, local_port, remote_port):
"""
Creates an SSH tunnel.
"""
r = self.local_renderer
r.env.tunnel_local_port = local_port
r.env.tunnel_remote_port = remote_port
r.local(' ssh -i {key_filename} -L {tunnel_local_port}:localhost:{tunnel_remote_port} {user}@{host_string} -N')
|
Creates an SSH tunnel.
|
def _encrypt_private(self, ret, dictkey, target):
'''
The server equivalent of ReqChannel.crypted_transfer_decode_dictentry
'''
# encrypt with a specific AES key
pubfn = os.path.join(self.opts['pki_dir'],
'minions',
target)
key = salt.crypt.Crypticle.generate_key_string()
pcrypt = salt.crypt.Crypticle(
self.opts,
key)
try:
pub = salt.crypt.get_rsa_pub_key(pubfn)
except (ValueError, IndexError, TypeError):
return self.crypticle.dumps({})
except IOError:
log.error('AES key not found')
return {'error': 'AES key not found'}
pret = {}
if not six.PY2:
key = salt.utils.stringutils.to_bytes(key)
if HAS_M2:
pret['key'] = pub.public_encrypt(key, RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(pub)
pret['key'] = cipher.encrypt(key)
pret[dictkey] = pcrypt.dumps(
ret if ret is not False else {}
)
return pret
|
The server equivalent of ReqChannel.crypted_transfer_decode_dictentry
|
def fit_two_gaussian(spectra, f_ppm, lb=3.6, ub=3.9):
"""
Fit a gaussian function to the difference spectra
This is useful for estimation of the Glx peak, which tends to have two
peaks.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the difference of the on/off spectra in each transient.
f_ppm : array
lb, ub : floats
In ppm, the range over which optimization is bounded
"""
idx = ut.make_idx(f_ppm, lb, ub)
# We are only going to look at the interval between lb and ub
n_points = idx.stop - idx.start
n_params = 8
fit_func = ut.two_gaussian
# Set the bounds for the optimization
bounds = [(lb,ub), # peak 1 location
(lb,ub), # peak 2 location
(0,None), # sigma 1
(0,None), # sigma 2
(0,None), # amp 1
(0,None), # amp 2
(None, None), # offset
(None, None), # drift
]
model = np.empty((spectra.shape[0], n_points))
signal = np.empty((spectra.shape[0], n_points))
params = np.empty((spectra.shape[0], n_params))
for ii, xx in enumerate(spectra):
# We fit to the real spectrum:
signal[ii] = np.real(xx[idx])
params[ii] = _do_two_gaussian_fit(f_ppm[idx], np.real(signal[ii]),
bounds=bounds)
model[ii] = fit_func(f_ppm[idx], *params[ii])
return model, signal, params
|
Fit a gaussian function to the difference spectra
This is useful for estimation of the Glx peak, which tends to have two
peaks.
Parameters
----------
spectra : array of shape (n_transients, n_points)
Typically the difference of the on/off spectra in each transient.
f_ppm : array
lb, ub : floats
In ppm, the range over which optimization is bounded
|
def set_keyspace(self, keyspace):
""" switch all connections to another keyspace """
self.keyspace = keyspace
dfrds = []
for p in self._protos:
dfrds.append(p.submitRequest(ManagedThriftRequest(
'set_keyspace', keyspace)))
return defer.gatherResults(dfrds)
|
switch all connections to another keyspace
|
def search_all_payments(payment_status=None, page_size=20, start_cursor=None, offset=0, use_cache=True,
cache_begin=True, relations=None):
"""
Returns a command to search all payments ordered by creation desc
@param payment_status: The payment status. If None is going to return results independent from status
@param page_size: number of payments per page
@param start_cursor: cursor to continue the search
@param offset: offset number of payment on search
@param use_cache: indicates with should use cache or not for results
@param cache_begin: indicates with should use cache on beginning or not for results
@param relations: list of relations to bring with payment objects. possible values on list: logs, pay_items, owner
@return: Returns a command to search all payments ordered by creation desc
"""
if payment_status:
return PaymentsByStatusSearch(payment_status, page_size, start_cursor, offset, use_cache,
cache_begin, relations)
return AllPaymentsSearch(page_size, start_cursor, offset, use_cache, cache_begin, relations)
|
Returns a command to search all payments ordered by creation desc
@param payment_status: The payment status. If None is going to return results independent from status
@param page_size: number of payments per page
@param start_cursor: cursor to continue the search
@param offset: offset number of payment on search
@param use_cache: indicates with should use cache or not for results
@param cache_begin: indicates with should use cache on beginning or not for results
@param relations: list of relations to bring with payment objects. possible values on list: logs, pay_items, owner
@return: Returns a command to search all payments ordered by creation desc
|
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
|
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
|
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
R2deriv
PURPOSE:
evaluate R2 derivative
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
-d K_R (R,z) d R
HISTORY:
2012-12-27 - Written - Bovy (IAS)
"""
if True:
if isinstance(R,nu.ndarray):
if not isinstance(z,nu.ndarray): z= nu.ones_like(R)*z
out= nu.array([self._R2deriv(rr,zz) for rr,zz in zip(R,z)])
return out
if R > 16.*self._hr or R > 6.: return self._kp.R2deriv(R,z)
if R < 1.: R4max= 1.
else: R4max= R
kmax= 2.*self._kmaxFac*self._beta
maxj0zeroIndx= nu.argmin((self._j0zeros-kmax*R4max)**2.) #close enough
maxj2zeroIndx= nu.argmin((self._j2zeros-kmax*R4max)**2.) #close enough
ks0= nu.array([0.5*(self._glx+1.)*self._dj0zeros[ii+1] + self._j0zeros[ii] for ii in range(maxj0zeroIndx)]).flatten()
weights0= nu.array([self._glw*self._dj0zeros[ii+1] for ii in range(maxj0zeroIndx)]).flatten()
ks2= nu.array([0.5*(self._glx+1.)*self._dj2zeros[ii+1] + self._j2zeros[ii] for ii in range(maxj2zeroIndx)]).flatten()
weights2= nu.array([self._glw*self._dj2zeros[ii+1] for ii in range(maxj2zeroIndx)]).flatten()
evalInt0= ks0**2.*special.jn(0,ks0*R)*(self._alpha**2.+ks0**2.)**-1.5*(self._beta*nu.exp(-ks0*nu.fabs(z))-ks0*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks0**2.)
evalInt2= ks2**2.*special.jn(2,ks2*R)*(self._alpha**2.+ks2**2.)**-1.5*(self._beta*nu.exp(-ks2*nu.fabs(z))-ks2*nu.exp(-self._beta*nu.fabs(z)))/(self._beta**2.-ks2**2.)
return nu.pi*self._alpha*(nu.sum(weights0*evalInt0)
-nu.sum(weights2*evalInt2))
|
NAME:
R2deriv
PURPOSE:
evaluate R2 derivative
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
-d K_R (R,z) d R
HISTORY:
2012-12-27 - Written - Bovy (IAS)
|
def write(self, image, dest_fobj, quality=95, format=None):
"""
Wrapper for ``_write``
:param Image image: This is your engine's ``Image`` object. For
PIL it's PIL.Image.
:keyword int quality: A quality level as a percent. The lower, the
higher the compression, the worse the artifacts.
:keyword str format: The format to save to. If omitted, guess based
on the extension. We recommend specifying this. Typical values
are 'JPEG', 'GIF', 'PNG'. Other formats largely depend on your
choice of Engine.
"""
if isinstance(format, basestring) and format.lower() == 'jpg':
# This mistake is made all the time. Let's just effectively alias
# this, since it's commonly used.
format = 'JPEG'
raw_data = self._get_raw_data(image, format, quality)
dest_fobj.write(raw_data)
|
Wrapper for ``_write``
:param Image image: This is your engine's ``Image`` object. For
PIL it's PIL.Image.
:keyword int quality: A quality level as a percent. The lower, the
higher the compression, the worse the artifacts.
:keyword str format: The format to save to. If omitted, guess based
on the extension. We recommend specifying this. Typical values
are 'JPEG', 'GIF', 'PNG'. Other formats largely depend on your
choice of Engine.
|
def get_client(self):
"""Returns the Client
"""
context = self.context
parent = api.get_parent(context)
if context.portal_type == "Client":
return context
elif parent.portal_type == "Client":
return parent
elif context.portal_type == "Batch":
return context.getClient()
elif parent.portal_type == "Batch":
return context.getClient()
return None
|
Returns the Client
|
def bloquear_sat(retorno):
"""Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função
:meth:`~satcfe.base.FuncoesSAT.bloquear_sat`.
"""
resposta = analisar_retorno(forcar_unicode(retorno),
funcao='BloquearSAT')
if resposta.EEEEE not in ('16000',):
raise ExcecaoRespostaSAT(resposta)
return resposta
|
Constrói uma :class:`RespostaSAT` para o retorno (unicode) da função
:meth:`~satcfe.base.FuncoesSAT.bloquear_sat`.
|
def get_relative_to_remote(self):
"""Return the number of commits we are relative to the remote. Negative
is behind, positive in front, zero means we are matched to remote.
"""
s = self.git("status", "--short", "-b")[0]
r = re.compile("\[([^\]]+)\]")
toks = r.findall(s)
if toks:
try:
s2 = toks[-1]
adj, n = s2.split()
assert(adj in ("ahead", "behind"))
n = int(n)
return -n if adj == "behind" else n
except Exception as e:
raise ReleaseVCSError(
("Problem parsing first line of result of 'git status "
"--short -b' (%s):\n%s") % (s, str(e)))
else:
return 0
|
Return the number of commits we are relative to the remote. Negative
is behind, positive in front, zero means we are matched to remote.
|
def potential_from_grid(self, grid):
"""
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid,
args=(self.axis_ratio, self.slope, self.core_radius))[0]
return self.einstein_radius_rescaled * self.axis_ratio * potential_grid
|
Calculate the potential at a given set of arc-second gridded coordinates.
Parameters
----------
grid : grids.RegularGrid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
|
def load_atomic(self, ptr, ordering, align, name=''):
"""
Load value from pointer, with optional guaranteed alignment:
name = *ptr
"""
if not isinstance(ptr.type, types.PointerType):
raise TypeError("cannot load from value of type %s (%r): not a pointer"
% (ptr.type, str(ptr)))
ld = instructions.LoadAtomicInstr(self.block, ptr, ordering, align, name)
self._insert(ld)
return ld
|
Load value from pointer, with optional guaranteed alignment:
name = *ptr
|
def add(self, varname, result, pointer=None):
"""Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
"""
self.result[varname] = result
setattr(self, varname, result)
if pointer is not None:
self._finalizers[varname] = pointer
|
Adds the specified python-typed result and an optional Ftype pointer
to use when cleaning up this object.
:arg result: a python-typed representation of the result.
:arg pointer: an instance of Ftype with pointer information for deallocating
the c-pointer.
|
def logout(self):
""" logout page
"""
sess = cherrypy.session
username = sess.get(SESSION_KEY, None)
sess[SESSION_KEY] = None
if username:
cherrypy.request.login = None
cherrypy.log.error(
msg="user '%(user)s' logout" % {'user': username},
severity=logging.INFO
)
raise cherrypy.HTTPRedirect("/signin")
|
logout page
|
def geom_reflect(g, nv):
""" Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
"""
# Imports
import numpy as np
# Force g to n-vector
g = make_nd_vec(g, nd=None, t=np.float64, norm=False)
# Transform the geometry and return
refl_g = np.dot(mtx_refl(nv, reps=(g.shape[0] // 3)), g) \
.reshape((g.shape[0],1))
return refl_g
|
Reflection symmetry operation.
nv is normal vector to reflection plane
g is assumed already translated to center of mass @ origin
.. todo:: Complete geom_reflect docstring
|
def unpack_layer(plane):
"""Return a correctly shaped numpy array given the feature layer bytes."""
size = point.Point.build(plane.size)
if size == (0, 0):
# New layer that isn't implemented in this SC2 version.
return None
data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])
if plane.bits_per_pixel == 1:
data = np.unpackbits(data)
if data.shape[0] != size.x * size.y:
# This could happen if the correct length isn't a multiple of 8, leading
# to some padding bits at the end of the string which are incorrectly
# interpreted as data.
data = data[:size.x * size.y]
return data.reshape(size.y, size.x)
|
Return a correctly shaped numpy array given the feature layer bytes.
|
def do_cat(self, path):
"""display the contents of a file"""
path = path[0]
tmp_file_path = self.TMP_PATH + 'tmp'
if not os.path.exists(self.TMP_PATH):
os.makedirs(self.TMP_PATH)
f = self.n.downloadFile(self.current_path + path, tmp_file_path)
f = open(tmp_file_path, 'r')
self.stdout.write(f.read())
self.stdout.write("\n")
|
display the contents of a file
|
def cancel_job(self, job_resource_name: str):
"""Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
"""
self.service.projects().programs().jobs().cancel(
name=job_resource_name, body={}).execute()
|
Cancels the given job.
See also the cancel method on EngineJob.
Params:
job_resource_name: A string of the form
`projects/project_id/programs/program_id/jobs/job_id`.
|
def compile_string(self, mof, ns, filename=None):
"""
Compile a string of MOF statements into a namespace of the associated
CIM repository.
Parameters:
mof (:term:`string`):
The string of MOF statements to be compiled.
ns (:term:`string`):
The name of the CIM namespace in the associated CIM repository
that is used for lookup of any dependent CIM elements, and that
is also the target of the compilation.
filename (:term:`string`):
The path name of the file that the MOF statements were read from.
This information is used only in compiler messages.
Raises:
IOError: MOF file not found.
MOFParseError: Syntax error in the MOF.
: Any exceptions that are raised by the repository connection class.
"""
lexer = self.lexer.clone()
lexer.parser = self.parser
try:
oldfile = self.parser.file
except AttributeError:
oldfile = None
self.parser.file = filename
try:
oldmof = self.parser.mof
except AttributeError:
oldmof = None
self.parser.mof = mof
self.parser.handle.default_namespace = ns
if ns not in self.parser.qualcache:
self.parser.qualcache[ns] = NocaseDict()
if ns not in self.parser.classnames:
self.parser.classnames[ns] = []
try:
# Call the parser. To generate detailed output of states
# add debug=... to following line where debug may be a
# constant (ex. 1) or may be a log definition, ex..
# log = logging.getLogger()
# logging.basicConfig(level=logging.DEBUG)
rv = self.parser.parse(mof, lexer=lexer)
self.parser.file = oldfile
self.parser.mof = oldmof
return rv
except MOFParseError as pe:
# Generate the error message into log and reraise error
self.parser.log(pe.get_err_msg())
raise
except CIMError as ce:
if hasattr(ce, 'file_line'):
self.parser.log(
_format("Fatal Error: {0}:{1}",
ce.file_line[0], ce.file_line[1]))
else:
self.parser.log("Fatal Error:")
description = _format(":{0}", ce.status_description) if \
ce.status_description else ""
self.parser.log(
_format("{0}{1}",
_statuscode2string(ce.status_code), description))
raise
|
Compile a string of MOF statements into a namespace of the associated
CIM repository.
Parameters:
mof (:term:`string`):
The string of MOF statements to be compiled.
ns (:term:`string`):
The name of the CIM namespace in the associated CIM repository
that is used for lookup of any dependent CIM elements, and that
is also the target of the compilation.
filename (:term:`string`):
The path name of the file that the MOF statements were read from.
This information is used only in compiler messages.
Raises:
IOError: MOF file not found.
MOFParseError: Syntax error in the MOF.
: Any exceptions that are raised by the repository connection class.
|
def p_initial(self, p):
'initial : INITIAL initial_statement'
p[0] = Initial(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1))
|
initial : INITIAL initial_statement
|
def create_exception_by_name(
name,
detailCode='0',
description='',
traceInformation=None,
identifier=None,
nodeId=None,
):
"""Create a DataONEException based object by name.
Args:
name: str
The type name of a DataONE Exception. E.g. NotFound.
If an unknown type name is used, it is automatically set to ServiceFailure. As
the XML Schema for DataONE Exceptions does not restrict the type names, this
may occur when deserializing an exception not defined by DataONE.
detailCode: int
Optional index into a table of predefined error conditions.
See Also:
For remaining args, see: ``DataONEException()``
"""
try:
dataone_exception = globals()[name]
except LookupError:
dataone_exception = ServiceFailure
return dataone_exception(
detailCode, description, traceInformation, identifier, nodeId
)
|
Create a DataONEException based object by name.
Args:
name: str
The type name of a DataONE Exception. E.g. NotFound.
If an unknown type name is used, it is automatically set to ServiceFailure. As
the XML Schema for DataONE Exceptions does not restrict the type names, this
may occur when deserializing an exception not defined by DataONE.
detailCode: int
Optional index into a table of predefined error conditions.
See Also:
For remaining args, see: ``DataONEException()``
|
def tremolo(self, freq, depth=40):
"""tremolo takes two parameters: frequency and depth (max 100)"""
self.command.append("tremolo")
self.command.append(freq)
self.command.append(depth)
return self
|
tremolo takes two parameters: frequency and depth (max 100)
|
def apply_color_scheme(self, color_scheme):
"""
Apply a pygments color scheme to the console.
As there is not a 1 to 1 mapping between color scheme formats and
console formats, we decided to make the following mapping (it usually
looks good for most of the available pygments styles):
- stdout_color = normal color
- stderr_color = red (lighter if background is dark)
- stdin_color = numbers color
- app_msg_color = string color
- bacgorund_color = background
:param color_scheme: pyqode.core.api.ColorScheme to apply
"""
self.stdout_color = color_scheme.formats['normal'].foreground().color()
self.stdin_color = color_scheme.formats['number'].foreground().color()
self.app_msg_color = color_scheme.formats[
'string'].foreground().color()
self.background_color = color_scheme.background
if self.background_color.lightness() < 128:
self.stderr_color = QColor('#FF8080')
else:
self.stderr_color = QColor('red')
|
Apply a pygments color scheme to the console.
As there is not a 1 to 1 mapping between color scheme formats and
console formats, we decided to make the following mapping (it usually
looks good for most of the available pygments styles):
- stdout_color = normal color
- stderr_color = red (lighter if background is dark)
- stdin_color = numbers color
- app_msg_color = string color
- bacgorund_color = background
:param color_scheme: pyqode.core.api.ColorScheme to apply
|
def orient_averaged_adaptive(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z)
|
Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
|
def datetime2literal_rnc(d: datetime.datetime, c: Optional[Dict]) -> str:
"""Format a DateTime object as something MySQL will actually accept."""
# dt = d.strftime("%Y-%m-%d %H:%M:%S")
# ... can fail with e.g.
# ValueError: year=1850 is before 1900; the datetime strftime() methods
# require year >= 1900
# http://stackoverflow.com/questions/10263956
dt = d.isoformat(" ")
# noinspection PyArgumentList
return _mysql.string_literal(dt, c)
|
Format a DateTime object as something MySQL will actually accept.
|
def _findlinestarts(code):
"""Find the offsets in a byte code which are start of lines in the source
Generate pairs offset,lineno as described in Python/compile.c
This is a modified version of dis.findlinestarts, which allows multiplelinestarts
with the same line number"""
lineno = code.co_firstlineno
addr = 0
for byte_incr, line_incr in zip(code.co_lnotab[0::2], code.co_lnotab[1::2]):
if byte_incr:
yield addr, lineno
addr += byte_incr
lineno += line_incr
yield addr, lineno
|
Find the offsets in a byte code which are start of lines in the source
Generate pairs offset,lineno as described in Python/compile.c
This is a modified version of dis.findlinestarts, which allows multiplelinestarts
with the same line number
|
def fit(self, X, y=None):
"""Perform robust single linkage clustering from features or
distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
self : object
Returns self
"""
X = check_array(X, accept_sparse='csr')
kwargs = self.get_params()
del kwargs['metric_params']
kwargs.update(self.metric_params)
self.labels_, self._cluster_hierarchy = robust_single_linkage(
X, **kwargs)
return self
|
Perform robust single linkage clustering from features or
distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
Returns
-------
self : object
Returns self
|
def bootstrap(
self, controller_name, region=None, agent_version=None,
auto_upgrade=False, bootstrap_constraints=None,
bootstrap_series=None, config=None, constraints=None,
credential=None, default_model=None, keep_broken=False,
metadata_source=None, no_gui=False, to=None,
upload_tools=False):
"""Initialize a cloud environment.
:param str controller_name: Name of controller to create
:param str region: Cloud region in which to bootstrap
:param str agent_version: Version of tools to use for Juju agents
:param bool auto_upgrade: Upgrade to latest path release tools on first
bootstrap
:param bootstrap_constraints: Constraints for the bootstrap machine
:type bootstrap_constraints: :class:`juju.Constraints`
:param str bootstrap_series: Series of the bootstrap machine
:param dict config: Controller configuration
:param constraints: Default constraints for all future workload
machines
:type constraints: :class:`juju.Constraints`
:param credential: Credential to use when bootstrapping
:type credential: :class:`juju.Credential`
:param str default_model: Name to give the default model
:param bool keep_broken: Don't destroy model if bootstrap fails
:param str metadata_source: Local path to use as tools and/or metadata
source
:param bool no_gui: Don't install the Juju GUI in the controller when
bootstrapping
:param str to: Placement directive for bootstrap node (typically used
with MAAS)
:param bool upload_tools: Upload local version of tools before
bootstrapping
"""
raise NotImplementedError()
|
Initialize a cloud environment.
:param str controller_name: Name of controller to create
:param str region: Cloud region in which to bootstrap
:param str agent_version: Version of tools to use for Juju agents
:param bool auto_upgrade: Upgrade to latest path release tools on first
bootstrap
:param bootstrap_constraints: Constraints for the bootstrap machine
:type bootstrap_constraints: :class:`juju.Constraints`
:param str bootstrap_series: Series of the bootstrap machine
:param dict config: Controller configuration
:param constraints: Default constraints for all future workload
machines
:type constraints: :class:`juju.Constraints`
:param credential: Credential to use when bootstrapping
:type credential: :class:`juju.Credential`
:param str default_model: Name to give the default model
:param bool keep_broken: Don't destroy model if bootstrap fails
:param str metadata_source: Local path to use as tools and/or metadata
source
:param bool no_gui: Don't install the Juju GUI in the controller when
bootstrapping
:param str to: Placement directive for bootstrap node (typically used
with MAAS)
:param bool upload_tools: Upload local version of tools before
bootstrapping
|
def add_variant(self, variant):
"""Add a variant to the variant collection
If the variant exists we update the count else we insert a new variant object.
Args:
variant (dict): A variant dictionary
"""
LOG.debug("Upserting variant: {0}".format(variant.get('_id')))
update = self._get_update(variant)
message = self.db.variant.update_one(
{'_id': variant['_id']},
update,
upsert=True
)
if message.modified_count == 1:
LOG.debug("Variant %s was updated", variant.get('_id'))
else:
LOG.debug("Variant was added to database for first time")
return
|
Add a variant to the variant collection
If the variant exists we update the count else we insert a new variant object.
Args:
variant (dict): A variant dictionary
|
def get(self, query_path=None, return_type=list, preceding_depth=None, throw_null_return_error=False):
""" Traverses the list of query paths to find the data requested
:param query_path: (list(str), str), list of query path branches or query string
Default behavior: returns list(str) of possible config headers
:param return_type: (list, str, dict, OrderedDict), desired return type for the data
:param preceding_depth: int, returns a dictionary containing the data that traces back up the path for x depth
-1: for the full traversal back up the path
None: is default for no traversal
:param throw_null_return_error: bool, whether or not to throw an error if we get an empty result but no error
:return: (list, str, dict, OrderedDict), the type specified from return_type
:raises: exceptions.ResourceNotFoundError: if the query path is invalid
"""
function_type_lookup = {str: self._get_path_entry_from_string,
list: self._get_path_entry_from_list}
if query_path is None:
return self._default_config(return_type)
try:
config_entry = function_type_lookup.get(type(query_path), str)(query_path)
query_result = self.config_entry_handler.format_query_result(config_entry,
query_path,
return_type=return_type,
preceding_depth=preceding_depth)
return query_result
except IndexError:
return return_type()
|
Traverses the list of query paths to find the data requested
:param query_path: (list(str), str), list of query path branches or query string
Default behavior: returns list(str) of possible config headers
:param return_type: (list, str, dict, OrderedDict), desired return type for the data
:param preceding_depth: int, returns a dictionary containing the data that traces back up the path for x depth
-1: for the full traversal back up the path
None: is default for no traversal
:param throw_null_return_error: bool, whether or not to throw an error if we get an empty result but no error
:return: (list, str, dict, OrderedDict), the type specified from return_type
:raises: exceptions.ResourceNotFoundError: if the query path is invalid
|
def is_not_empty(value, **kwargs):
"""Indicate whether ``value`` is empty.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is empty, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
value = validators.not_empty(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True
|
Indicate whether ``value`` is empty.
:param value: The value to evaluate.
:returns: ``True`` if ``value`` is empty, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
|
def get_vault_form_for_create(self, vault_record_types):
"""Gets the vault form for creating new vaults.
A new form should be requested for each create transaction.
arg: vault_record_types (osid.type.Type[]): array of vault
record types
return: (osid.authorization.VaultForm) - the vault form
raise: NullArgument - ``vault_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form qith requested record
types
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinAdminSession.get_bin_form_for_create_template
if self._catalog_session is not None:
return self._catalog_session.get_catalog_form_for_create(catalog_record_types=vault_record_types)
for arg in vault_record_types:
if not isinstance(arg, ABCType):
raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type')
if vault_record_types == []:
result = objects.VaultForm(
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.
else:
result = objects.VaultForm(
record_types=vault_record_types,
runtime=self._runtime,
effective_agent_id=self.get_effective_agent_id(),
proxy=self._proxy) # Probably don't need effective agent id now that we have proxy in form.
self._forms[result.get_id().get_identifier()] = not CREATED
return result
|
Gets the vault form for creating new vaults.
A new form should be requested for each create transaction.
arg: vault_record_types (osid.type.Type[]): array of vault
record types
return: (osid.authorization.VaultForm) - the vault form
raise: NullArgument - ``vault_record_types`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - unable to get form qith requested record
types
*compliance: mandatory -- This method must be implemented.*
|
def mean_by_panel(self, length):
"""
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
"""
self._check_panel(length)
func = lambda v: v.reshape(-1, length).mean(axis=0)
newindex = arange(length)
return self.map(func, index=newindex)
|
Compute the mean across fixed sized panels of each record.
Splits each record into panels of size `length`,
and then computes the mean across panels.
Panel length must subdivide record exactly.
Parameters
----------
length : int
Fixed length with which to subdivide.
|
def __collect_fields(self):
""" Use field values from config.json and collect from request """
form = FormData()
form.add_field(self.__username_field, required=True,
error=self.__username_error)
form.add_field(self.__password_field, required=True,
error=self.__password_error)
form.parse()
self.username = form.values[self.__username_field]
self.password = form.values[self.__password_field]
return
|
Use field values from config.json and collect from request
|
def delete(self, robj, rw=None, r=None, w=None, dw=None, pr=None, pw=None,
timeout=None):
"""
Delete an object.
"""
# We could detect quorum_controls here but HTTP ignores
# unknown flags/params.
params = {'rw': rw, 'r': r, 'w': w, 'dw': dw, 'pr': pr, 'pw': pw,
'timeout': timeout}
headers = {}
bucket_type = self._get_bucket_type(robj.bucket.bucket_type)
url = self.object_path(robj.bucket.name, robj.key,
bucket_type=bucket_type, **params)
use_vclocks = (self.tombstone_vclocks() and hasattr(robj, 'vclock') and
robj.vclock is not None)
if use_vclocks:
headers['X-Riak-Vclock'] = robj.vclock.encode('base64')
response = self._request('DELETE', url, headers)
self.check_http_code(response[0], [204, 404])
return self
|
Delete an object.
|
def _compute_childtab(self, lcptab):
"""Computes the child 'up' and 'down' arrays in O(n) based on the LCP table.
Abouelhoda et al. (2004).
"""
last_index = -1
stack = [0]
n = len(lcptab)
childtab_up = np.zeros(n, dtype=np.int) # Zeros / -1 ?
childtab_down = np.zeros(n, dtype=np.int)
for i in xrange(n):
while lcptab[i] < lcptab[stack[-1]]:
last_index = stack.pop()
if lcptab[i] <= lcptab[stack[-1]] and lcptab[stack[-1]] != lcptab[last_index]:
childtab_down[stack[-1]] = last_index
if last_index != -1:
childtab_up[i] = last_index
last_index = -1
stack.append(i)
return childtab_up, childtab_down
|
Computes the child 'up' and 'down' arrays in O(n) based on the LCP table.
Abouelhoda et al. (2004).
|
def index_bams(job, config):
"""
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
"""
job.fileStore.logToMaster('Indexed sample BAMS: ' + config.uuid)
disk = '1G' if config.ci_test else '20G'
config.normal_bai = job.addChildJobFn(run_samtools_index, config.normal_bam, cores=1, disk=disk).rv()
config.tumor_bai = job.addChildJobFn(run_samtools_index, config.tumor_bam, cores=1, disk=disk).rv()
job.addFollowOnJobFn(preprocessing_declaration, config)
|
Convenience job for handling bam indexing to make the workflow declaration cleaner
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
|
def _waiting_expect(self):
'''``True`` when the client is waiting for 100 Continue.
'''
if self._expect_sent is None:
if self.environ.get('HTTP_EXPECT', '').lower() == '100-continue':
return True
self._expect_sent = ''
return False
|
``True`` when the client is waiting for 100 Continue.
|
def _stdlib_paths():
"""Return a set of paths from which Python imports the standard library.
"""
attr_candidates = [
'prefix',
'real_prefix', # virtualenv: only set inside a virtual environment.
'base_prefix', # venv: always set, equal to prefix if outside.
]
prefixes = (getattr(sys, a) for a in attr_candidates if hasattr(sys, a))
version = 'python%s.%s' % sys.version_info[0:2]
return set(os.path.abspath(os.path.join(p, 'lib', version))
for p in prefixes)
|
Return a set of paths from which Python imports the standard library.
|
def stack(args):
"""
%prog stack fastafile
Create landscape plots that show the amounts of genic sequences, and repetitive
sequences along the chromosomes.
"""
p = OptionParser(stack.__doc__)
p.add_option("--top", default=10, type="int",
help="Draw the first N chromosomes [default: %default]")
p.add_option("--stacks",
default="Exons,Introns,DNA_transposons,Retrotransposons",
help="Features to plot in stackplot [default: %default]")
p.add_option("--switch",
help="Change chr names based on two-column file [default: %default]")
add_window_options(p)
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 1:
sys.exit(not p.print_help())
fastafile, = args
top = opts.top
window, shift, subtract, merge = check_window_options(opts)
switch = opts.switch
if switch:
switch = DictFile(opts.switch)
stacks = opts.stacks.split(",")
bedfiles = get_beds(stacks)
binfiles = get_binfiles(bedfiles, fastafile, shift,
subtract=subtract, merge=merge)
sizes = Sizes(fastafile)
s = list(sizes.iter_sizes())[:top]
maxl = max(x[1] for x in s)
margin = .08
inner = .02 # y distance between tracks
pf = fastafile.rsplit(".", 1)[0]
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
# Gauge
ratio = draw_gauge(root, margin, maxl)
# Per chromosome
yinterval = (1 - 2 * margin) / (top + 1)
xx = margin
yy = 1 - margin
for chr, clen in s:
yy -= yinterval
xlen = clen / ratio
cc = chr
if "_" in chr:
ca, cb = chr.split("_")
cc = ca[0].upper() + cb
if switch and cc in switch:
cc = "\n".join((cc, "({0})".format(switch[cc])))
root.add_patch(Rectangle((xx, yy), xlen, yinterval - inner, color=gray))
ax = fig.add_axes([xx, yy, xlen, yinterval - inner])
nbins = clen / shift
if clen % shift:
nbins += 1
stackplot(ax, binfiles, nbins, palette, chr, window, shift)
root.text(xx - .04, yy + .5 * (yinterval - inner), cc, ha="center", va="center")
ax.set_xlim(0, nbins)
ax.set_ylim(0, 1)
ax.set_axis_off()
# Legends
yy -= yinterval
xx = margin
for b, p in zip(bedfiles, palette):
b = b.rsplit(".", 1)[0].replace("_", " ")
b = Registration.get(b, b)
root.add_patch(Rectangle((xx, yy), inner, inner, color=p, lw=0))
xx += 2 * inner
root.text(xx, yy, b, size=13)
xx += len(b) * .012 + inner
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = pf + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
|
%prog stack fastafile
Create landscape plots that show the amounts of genic sequences, and repetitive
sequences along the chromosomes.
|
def show_item_v3(h):
"""Show any RAR3 record.
"""
st = rar3_type(h.type)
xprint("%s: hdrlen=%d datlen=%d", st, h.header_size, h.add_size)
if h.type in (rf.RAR_BLOCK_FILE, rf.RAR_BLOCK_SUB):
if h.host_os == rf.RAR_OS_UNIX:
s_mode = "0%o" % h.mode
else:
s_mode = "0x%x" % h.mode
xprint(" flags=0x%04x:%s", h.flags, get_file_flags(h.flags))
if h.host_os >= 0 and h.host_os < len(os_list):
s_os = os_list[h.host_os]
else:
s_os = "?"
xprint(" os=%d:%s ver=%d mode=%s meth=%c cmp=%d dec=%d vol=%d",
h.host_os, s_os,
h.extract_version, s_mode, h.compress_type,
h.compress_size, h.file_size, h.volume)
ucrc = (h.CRC + (1 << 32)) & ((1 << 32) - 1)
xprint(" crc=0x%08x (%d) date_time=%s", ucrc, h.CRC, fmt_time(h.date_time))
xprint(" name=%s", h.filename)
if h.mtime:
xprint(" mtime=%s", fmt_time(h.mtime))
if h.ctime:
xprint(" ctime=%s", fmt_time(h.ctime))
if h.atime:
xprint(" atime=%s", fmt_time(h.atime))
if h.arctime:
xprint(" arctime=%s", fmt_time(h.arctime))
elif h.type == rf.RAR_BLOCK_MAIN:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, main_bits))
elif h.type == rf.RAR_BLOCK_ENDARC:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, endarc_bits))
elif h.type == rf.RAR_BLOCK_MARK:
xprint(" flags=0x%04x:", h.flags)
else:
xprint(" flags=0x%04x:%s", h.flags, render_flags(h.flags, generic_bits))
if h.comment is not None:
cm = repr(h.comment)
if cm[0] == 'u':
cm = cm[1:]
xprint(" comment=%s", cm)
|
Show any RAR3 record.
|
def pluralize(self, measure, singular, plural):
""" Returns a string that contains the measure (amount) and its plural
or singular form depending on the amount.
Parameters:
:param measure: Amount, value, always a numerical value
:param singular: The singular form of the chosen word
:param plural: The plural form of the chosen word
Returns:
String
"""
if measure == 1:
return "{} {}".format(measure, singular)
else:
return "{} {}".format(measure, plural)
|
Returns a string that contains the measure (amount) and its plural
or singular form depending on the amount.
Parameters:
:param measure: Amount, value, always a numerical value
:param singular: The singular form of the chosen word
:param plural: The plural form of the chosen word
Returns:
String
|
def _contains_blinded_text(stats_xml):
""" Heuristic to determine whether the treebank has blinded texts or not """
tree = ET.parse(stats_xml)
root = tree.getroot()
total_tokens = int(root.find('size/total/tokens').text)
unique_lemmas = int(root.find('lemmas').get('unique'))
# assume the corpus is largely blinded when there are less than 1% unique tokens
return (unique_lemmas / total_tokens) < 0.01
|
Heuristic to determine whether the treebank has blinded texts or not
|
def make_proxy_method(cls, name):
"""Creates a proxy function that can be used by Flasks routing. The
proxy instantiates the Mocha subclass and calls the appropriate
method.
:param name: the name of the method to create a proxy for
"""
i = cls()
view = getattr(i, name)
for decorator in cls.decorators:
view = decorator(view)
@functools.wraps(view)
def proxy(**forgettable_view_args):
# Always use the global request object's view_args, because they
# can be modified by intervening function before an endpoint or
# wrapper gets called. This matches Flask's behavior.
del forgettable_view_args
if hasattr(i, "before_request"):
response = i.before_request(name, **request.view_args)
if response is not None:
return response
before_view_name = "before_" + name
if hasattr(i, before_view_name):
before_view = getattr(i, before_view_name)
response = before_view(**request.view_args)
if response is not None:
return response
response = view(**request.view_args)
# You can also return a dict or None, it will pass it to render
if isinstance(response, dict) or response is None:
response = response or {}
if hasattr(i, "_renderer"):
response = i._renderer(response)
else:
_template = build_endpoint_route_name(cls, view.__name__)
_template = utils.list_replace([".", ":"], "/", _template)
_template = "%s.%s" % (_template, cls.template_markup)
# Set the title from the nav title, if not set
_meta_title = getattr(g, "__META__", {}).get("title")
if (not _meta_title or _meta_title == "") and get_view_attr(view, "title"):
page_attr(title=get_view_attr(view, "title"))
response.setdefault("_template", _template)
response = i.render(**response)
if not isinstance(response, Response):
response = make_response(response)
for ext in cls._ext:
response = ext(response)
after_view_name = "after_" + name
if hasattr(i, after_view_name):
after_view = getattr(i, after_view_name)
response = after_view(response)
if hasattr(i, "after_request"):
response = i.after_request(name, response)
return response
return proxy
|
Creates a proxy function that can be used by Flasks routing. The
proxy instantiates the Mocha subclass and calls the appropriate
method.
:param name: the name of the method to create a proxy for
|
def delete_namespaced_service(self, name, namespace, **kwargs): # noqa: E501
"""delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.delete_namespaced_service_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
delete_namespaced_service # noqa: E501
delete a Service # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_namespaced_service(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Service (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:param V1DeleteOptions body:
:return: V1Status
If the method is called asynchronously,
returns the request thread.
|
def make_spindles(events, power_peaks, powers, dat_det, dat_orig, time,
s_freq):
"""Create dict for each spindle, based on events of time points.
Parameters
----------
events : ndarray (dtype='int')
N x 3 matrix with start, peak, end samples, and peak frequency
power_peaks : ndarray (dtype='float')
peak in power spectrum for each event
powers : ndarray (dtype='float')
average power in power spectrum for each event
dat_det : ndarray (dtype='float')
vector with the data after detection-transformation (to compute peak)
dat_orig : ndarray (dtype='float')
vector with the raw data on which detection was performed
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
Returns
-------
list of dict
list of all the spindles, with information about start_time, peak_time,
end_time (s), peak_val (signal units), area_under_curve
(signal units * s), peak_freq (Hz)
"""
i, events = _remove_duplicate(events, dat_det)
power_peaks = power_peaks[i]
spindles = []
for i, one_peak, one_pwr in zip(events, power_peaks, powers):
one_spindle = {'start': time[i[0]],
'end': time[i[2] - 1],
'peak_time': time[i[1]],
'peak_val_det': dat_det[i[1]],
'peak_val_orig': dat_orig[i[1]],
'dur': (i[2] - i[0]) / s_freq,
'auc_det': sum(dat_det[i[0]:i[2]]) / s_freq,
'auc_orig': sum(dat_orig[i[0]:i[2]]) / s_freq,
'rms_det': sqrt(mean(square(dat_det[i[0]:i[2]]))),
'rms_orig': sqrt(mean(square(dat_orig[i[0]:i[2]]))),
'power_orig': one_pwr,
'peak_freq': one_peak,
'ptp_det': ptp(dat_det[i[0]:i[2]]),
'ptp_orig': ptp(dat_orig[i[0]:i[2]])
}
spindles.append(one_spindle)
return spindles
|
Create dict for each spindle, based on events of time points.
Parameters
----------
events : ndarray (dtype='int')
N x 3 matrix with start, peak, end samples, and peak frequency
power_peaks : ndarray (dtype='float')
peak in power spectrum for each event
powers : ndarray (dtype='float')
average power in power spectrum for each event
dat_det : ndarray (dtype='float')
vector with the data after detection-transformation (to compute peak)
dat_orig : ndarray (dtype='float')
vector with the raw data on which detection was performed
time : ndarray (dtype='float')
vector with time points
s_freq : float
sampling frequency
Returns
-------
list of dict
list of all the spindles, with information about start_time, peak_time,
end_time (s), peak_val (signal units), area_under_curve
(signal units * s), peak_freq (Hz)
|
def compact(db_spec, poll_interval=0):
"""
Compact a CouchDB database with optional synchronicity.
The ``compact`` function will compact a CouchDB database stored on an
running CouchDB server. By default, this process occurs *asynchronously*,
meaning that the compaction will occur in the background. Often, you'll want
to know when the process has completed; for this reason, ``compact`` will
return a function which, when called, will return the state of the
compaction. If it has completed, ``True`` will be returned; otherwise,
``False``. This may be called multiple times.
Alternatively, you may opt to run ``compact`` in synchronous mode, for
debugging or profiling purposes. If this is the case, an optional keyword
argument ``poll_interval`` is accepted, which should be a number (in
seconds) representing the time to take between polls. A sensible default
may be around 0.5 (seconds).
Because this function operates on database specifiers, you can choose to
operate on the local server or any remote server.
"""
server = get_server_from_specifier(db_spec)
db = get_db_from_specifier(db_spec)
# Get logger
logger = logging.getLogger('relax.couchdb.compact')
logger.info('Pre-compact size of %r: %s' % (db_spec,
repr_bytes(db.info()['disk_size']),))
logger.debug('POST ' + urlparse.urljoin(db.resource.uri + '/', '_compact'))
# Start compaction process by issuing a POST to '/<db_name>/_compact'.
resp_headers, resp_body = db.resource.post('/_compact')
# Asynchronous compaction
if not poll_interval:
if not (resp_body.get('ok', False) and
resp_headers['status'] == '202'):
err = CompactionError('Compaction of %r failed.')
# Give the exception some useful information.
err.response = (resp_headers, resp_body)
raise err
# Return a function which, when called, will return whether or not the
# compaction process is still running.
def check_completed():
logger.debug(
'Polling database to check if compaction has completed')
logger.debug('GET ' + db.resource.uri + '/')
db_info = db.info()
completed = not db_info.get('compact_running', False)
if completed and db_info.get('disk_size', None):
logger.info('Post-compact size of %r: %s' % (db_spec,
repr_bytes(db_info['disk_size'])))
return completed
return check_completed
# Synchronous compaction
elif poll_interval > 0:
logger.debug(
'Polling database to check if compaction has completed')
logger.debug('GET ' + db.resource.uri + '/')
# Shows whether compaction is running or not.
running = db.info().get('compact_running', False)
# Poll the running state of the compaction.
while running:
time.sleep(poll_interval)
logger.debug(
'Polling database to check if compaction has completed')
logger.debug('GET ' + db.resource.uri + '/')
running = db.info().get('compact_running', False)
size_after = db.info().get('disk_size', None)
if size_after:
logger.info('Post-compact size of %r: %s' % (db_spec,
repr_bytes(size_after)))
return True
else:
raise ValueError('Poll interval must be greater than zero.')
|
Compact a CouchDB database with optional synchronicity.
The ``compact`` function will compact a CouchDB database stored on an
running CouchDB server. By default, this process occurs *asynchronously*,
meaning that the compaction will occur in the background. Often, you'll want
to know when the process has completed; for this reason, ``compact`` will
return a function which, when called, will return the state of the
compaction. If it has completed, ``True`` will be returned; otherwise,
``False``. This may be called multiple times.
Alternatively, you may opt to run ``compact`` in synchronous mode, for
debugging or profiling purposes. If this is the case, an optional keyword
argument ``poll_interval`` is accepted, which should be a number (in
seconds) representing the time to take between polls. A sensible default
may be around 0.5 (seconds).
Because this function operates on database specifiers, you can choose to
operate on the local server or any remote server.
|
def triangulate(self):
"""
Triangulates the set of vertices and stores the triangles in faces and
the convex hull in convex_hull.
"""
npts = self._vertices.shape[0]
if np.any(self._vertices[0] != self._vertices[1]):
# start != end, so edges must wrap around to beginning.
edges = np.empty((npts, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
edges[-1, 1] = 0
else:
# start == end; no wrapping required.
edges = np.empty((npts-1, 2), dtype=np.uint32)
edges[:, 0] = np.arange(npts)
edges[:, 1] = edges[:, 0] + 1
tri = Triangulation(self._vertices, edges)
tri.triangulate()
return tri.pts, tri.tris
|
Triangulates the set of vertices and stores the triangles in faces and
the convex hull in convex_hull.
|
def deep_del(data, fn):
"""Create dict copy with removed items.
Recursively remove items where fn(value) is True.
Returns:
dict: New dict with matching items removed.
"""
result = {}
for k, v in data.iteritems():
if not fn(v):
if isinstance(v, dict):
result[k] = deep_del(v, fn)
else:
result[k] = v
return result
|
Create dict copy with removed items.
Recursively remove items where fn(value) is True.
Returns:
dict: New dict with matching items removed.
|
def set_key(key, value, host=None, port=None, db=None, password=None):
'''
Set redis key value
CLI Example:
.. code-block:: bash
salt '*' redis.set_key foo bar
'''
server = _connect(host, port, db, password)
return server.set(key, value)
|
Set redis key value
CLI Example:
.. code-block:: bash
salt '*' redis.set_key foo bar
|
def get(self, columns=None):
"""
Execute the query as a "select" statement.
:type columns: list
:rtype: orator.Collection
"""
if columns is None:
columns = ["*"]
if self._query.get_query().columns:
columns = []
select = self._get_select_columns(columns)
models = self._query.add_select(*select).get_models()
self._hydrate_pivot_relation(models)
if len(models) > 0:
models = self._query.eager_load_relations(models)
return self._related.new_collection(models)
|
Execute the query as a "select" statement.
:type columns: list
:rtype: orator.Collection
|
def contour_to_geojson(contour, geojson_filepath=None, min_angle_deg=None,
ndigits=5, unit='', stroke_width=1, geojson_properties=None, strdump=False,
serialize=True):
"""Transform matplotlib.contour to geojson."""
collections = contour.collections
contour_index = 0
line_features = []
for collection in collections:
color = collection.get_edgecolor()
for path in collection.get_paths():
v = path.vertices
if len(v) < 3:
continue
coordinates = keep_high_angle(v, min_angle_deg)
if ndigits:
coordinates = np.around(coordinates, ndigits)
line = LineString(coordinates.tolist())
properties = {
"stroke-width": stroke_width,
"stroke": rgb2hex(color[0]),
"title": "%.2f" % contour.levels[contour_index] + ' ' + unit,
"level-value": float("%.6f" % contour.levels[contour_index]),
"level-index": contour_index
}
if geojson_properties:
properties.update(geojson_properties)
line_features.append(Feature(geometry=line, properties=properties))
contour_index += 1
feature_collection = FeatureCollection(line_features)
return _render_feature_collection(feature_collection, geojson_filepath, strdump, serialize)
|
Transform matplotlib.contour to geojson.
|
def k_ion(self, E):
"""
Geometric focusing force due to ion column for given plasma density as a function of *E*
"""
return self.n_p * _np.power(_spc.e, 2) / (2*_sltr.GeV2joule(E) * _spc.epsilon_0)
|
Geometric focusing force due to ion column for given plasma density as a function of *E*
|
def env_present(name,
value=None,
user='root'):
'''
Verifies that the specified environment variable is present in the crontab
for the specified user.
name
The name of the environment variable to set in the user crontab
user
The name of the user whose crontab needs to be modified, defaults to
the root user
value
The value to set for the given environment variable
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron_env(user, name, value=value)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron env {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron env {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron env {0} is set to be updated'.format(name)
return ret
data = __salt__['cron.set_env'](user, name, value=value)
if data == 'present':
ret['comment'] = 'Cron env {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron env {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron env {0} updated'.format(name)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron env {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret
|
Verifies that the specified environment variable is present in the crontab
for the specified user.
name
The name of the environment variable to set in the user crontab
user
The name of the user whose crontab needs to be modified, defaults to
the root user
value
The value to set for the given environment variable
|
def exclude(self, *fields):
"""
Projection columns which not included in the fields
:param fields: field names
:return: new collection
:rtype: :class:`odps.df.expr.expression.CollectionExpr`
"""
if len(fields) == 1 and isinstance(fields[0], list):
exclude_fields = fields[0]
else:
exclude_fields = list(fields)
exclude_fields = [self._defunc(it) for it in exclude_fields]
exclude_fields = [field.name if not isinstance(field, six.string_types) else field
for field in exclude_fields]
fields = [name for name in self._schema.names
if name not in exclude_fields]
return self._project(fields)
|
Projection columns which not included in the fields
:param fields: field names
:return: new collection
:rtype: :class:`odps.df.expr.expression.CollectionExpr`
|
def sphinx(self):
"""Generate Sphinx-formatted documentation for the Property"""
try:
assert __IPYTHON__
classdoc = ''
except (NameError, AssertionError):
scls = self.sphinx_class()
classdoc = ' ({})'.format(scls) if scls else ''
prop_doc = '**{name}**{cls}: {doc}{info}'.format(
name=self.name,
cls=classdoc,
doc=self.doc,
info=', {}'.format(self.info) if self.info else '',
)
return prop_doc
|
Generate Sphinx-formatted documentation for the Property
|
def getProvince(self, default=None):
"""Return the Province from the Physical or Postal Address
"""
physical_address = self.getPhysicalAddress().get("state", default)
postal_address = self.getPostalAddress().get("state", default)
return physical_address or postal_address
|
Return the Province from the Physical or Postal Address
|
def create_review(self, commit=github.GithubObject.NotSet, body=None, event=github.GithubObject.NotSet, comments=github.GithubObject.NotSet):
"""
:calls: `POST /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
:param commit: github.Commit.Commit
:param body: string
:param event: string
:param comments: list
:rtype: :class:`github.PullRequestReview.PullRequestReview`
"""
assert commit is github.GithubObject.NotSet or isinstance(commit, github.Commit.Commit), commit
assert isinstance(body, str), body
assert event is github.GithubObject.NotSet or isinstance(event, str), event
assert comments is github.GithubObject.NotSet or isinstance(comments, list), comments
post_parameters = dict()
if commit is not github.GithubObject.NotSet:
post_parameters['commit_id'] = commit.sha
post_parameters['body'] = body
post_parameters['event'] = 'COMMENT' if event == github.GithubObject.NotSet else event
if comments is github.GithubObject.NotSet:
post_parameters['comments'] = []
else:
post_parameters['comments'] = comments
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/reviews",
input=post_parameters
)
self._useAttributes(data)
return github.PullRequestReview.PullRequestReview(self._requester, headers, data, completed=True)
|
:calls: `POST /repos/:owner/:repo/pulls/:number/reviews <https://developer.github.com/v3/pulls/reviews/>`_
:param commit: github.Commit.Commit
:param body: string
:param event: string
:param comments: list
:rtype: :class:`github.PullRequestReview.PullRequestReview`
|
def get_header(self, name, default=None):
"""
Retrieves the value of a header
"""
return self._handler.headers.get(name, default)
|
Retrieves the value of a header
|
def infer_active_forms(stmts):
"""Return inferred ActiveForm from RegulateActivity + Modification.
This function looks for combinations of Activation/Inhibition
Statements and Modification Statements, and infers an ActiveForm
from them. For example, if we know that A activates B and
A phosphorylates B, then we can infer that the phosphorylated form
of B is active.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer ActiveForms from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
"""
linked_stmts = []
for act_stmt in _get_statements_by_type(stmts, RegulateActivity):
# TODO: revise the conditions here
if not (act_stmt.subj.activity is not None and
act_stmt.subj.activity.activity_type == 'kinase' and
act_stmt.subj.activity.is_active):
continue
matching = []
ev = act_stmt.evidence
for mod_stmt in _get_statements_by_type(stmts, Modification):
if mod_stmt.enz is not None:
if mod_stmt.enz.entity_matches(act_stmt.subj) and \
mod_stmt.sub.entity_matches(act_stmt.obj):
matching.append(mod_stmt)
ev.extend(mod_stmt.evidence)
if not matching:
continue
mods = []
for mod_stmt in matching:
mod_type_name = mod_stmt.__class__.__name__.lower()
if isinstance(mod_stmt, AddModification):
is_modified = True
else:
is_modified = False
mod_type_name = mod_type_name[2:]
mc = ModCondition(mod_type_name, mod_stmt.residue,
mod_stmt.position, is_modified)
mods.append(mc)
source_stmts = [act_stmt] + [m for m in matching]
st = ActiveForm(Agent(act_stmt.obj.name, mods=mods,
db_refs=act_stmt.obj.db_refs),
act_stmt.obj_activity, act_stmt.is_activation,
evidence=ev)
linked_stmts.append(LinkedStatement(source_stmts, st))
logger.info('inferred: %s' % st)
return linked_stmts
|
Return inferred ActiveForm from RegulateActivity + Modification.
This function looks for combinations of Activation/Inhibition
Statements and Modification Statements, and infers an ActiveForm
from them. For example, if we know that A activates B and
A phosphorylates B, then we can infer that the phosphorylated form
of B is active.
Parameters
----------
stmts : list[indra.statements.Statement]
A list of Statements to infer ActiveForms from.
Returns
-------
linked_stmts : list[indra.mechlinker.LinkedStatement]
A list of LinkedStatements representing the inferred Statements.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.