signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_formats ( self , id , ** data ) :
"""GET / formats / : id /
Gets a : format : ` format ` by ID as ` ` format ` ` ."""
|
return self . get ( "/formats/{0}/" . format ( id ) , data = data )
|
def run ( self ) :
"""! @ brief SWV reader thread routine .
Starts the probe receiving SWO data by calling DebugProbe . swo _ start ( ) . For as long as the
thread runs , it reads SWO data from the probe and passes it to the SWO parser created in
init ( ) . When the thread is signaled to stop , it calls DebugProbe . swo _ stop ( ) before exiting ."""
|
# Stop SWO first in case the probe already had it started . Ignore if this fails .
try :
self . _session . probe . swo_stop ( )
except exceptions . ProbeError :
pass
self . _session . probe . swo_start ( self . _swo_clock )
while not self . _shutdown_event . is_set ( ) :
data = self . _session . probe . swo_read ( )
if data :
self . _parser . parse ( data )
sleep ( 0.001 )
self . _session . probe . swo_stop ( )
|
def execute ( self , eopatch ) :
"""Execute computation of local binary patterns on input eopatch
: param eopatch : Input eopatch
: type eopatch : eolearn . core . EOPatch
: return : EOPatch instance with new key holding the LBP image .
: rtype : eolearn . core . EOPatch"""
|
for feature_type , feature_name , new_feature_name in self . feature :
eopatch [ feature_type ] [ new_feature_name ] = self . _compute_lbp ( eopatch [ feature_type ] [ feature_name ] )
return eopatch
|
def xpnsl ( h1 , h2 , use_threads = True ) :
"""Cross - population version of the NSL statistic .
Parameters
h1 : array _ like , int , shape ( n _ variants , n _ haplotypes )
Haplotype array for the first population .
h2 : array _ like , int , shape ( n _ variants , n _ haplotypes )
Haplotype array for the second population .
use _ threads : bool , optional
If True use multiple threads to compute .
Returns
score : ndarray , float , shape ( n _ variants , )
Unstandardized XPNSL scores ."""
|
# check inputs
h1 = asarray_ndim ( h1 , 2 )
check_integer_dtype ( h1 )
h2 = asarray_ndim ( h2 , 2 )
check_integer_dtype ( h2 )
check_dim0_aligned ( h1 , h2 )
h1 = memoryview_safe ( h1 )
h2 = memoryview_safe ( h2 )
if use_threads and multiprocessing . cpu_count ( ) > 1 : # use multiple threads
# setup threadpool
pool = ThreadPool ( min ( 4 , multiprocessing . cpu_count ( ) ) )
# scan forward
res1_fwd = pool . apply_async ( nsl_scan , args = ( h1 , ) )
res2_fwd = pool . apply_async ( nsl_scan , args = ( h2 , ) )
# scan backward
res1_rev = pool . apply_async ( nsl_scan , args = ( h1 [ : : - 1 ] , ) )
res2_rev = pool . apply_async ( nsl_scan , args = ( h2 [ : : - 1 ] , ) )
# wait for both to finish
pool . close ( )
pool . join ( )
# obtain results
nsl1_fwd = res1_fwd . get ( )
nsl2_fwd = res2_fwd . get ( )
nsl1_rev = res1_rev . get ( )
nsl2_rev = res2_rev . get ( )
# cleanup
pool . terminate ( )
else : # compute without threads
# scan forward
nsl1_fwd = nsl_scan ( h1 )
nsl2_fwd = nsl_scan ( h2 )
# scan backward
nsl1_rev = nsl_scan ( h1 [ : : - 1 ] )
nsl2_rev = nsl_scan ( h2 [ : : - 1 ] )
# handle reverse scans
nsl1_rev = nsl1_rev [ : : - 1 ]
nsl2_rev = nsl2_rev [ : : - 1 ]
# compute unstandardized score
nsl1 = nsl1_fwd + nsl1_rev
nsl2 = nsl2_fwd + nsl2_rev
score = np . log ( nsl1 / nsl2 )
return score
|
def set_xticklabels_position ( self , row , column , position ) :
"""Specify the position of the axis tick labels .
This is generally only useful for multiplots containing only one
row . This can be used to e . g . alternatively draw the tick labels
on the bottom or the top of the subplot .
: param row , column : specify the subplot .
: param position : ' top ' or ' bottom ' to specify the position of the
tick labels ."""
|
subplot = self . get_subplot_at ( row , column )
subplot . set_xticklabels_position ( position )
|
def user_topic_ids ( user ) :
"""Retrieve the list of topics IDs a user has access to ."""
|
if user . is_super_admin ( ) or user . is_read_only_user ( ) :
query = sql . select ( [ models . TOPICS ] )
else :
query = ( sql . select ( [ models . JOINS_TOPICS_TEAMS . c . topic_id ] ) . select_from ( models . JOINS_TOPICS_TEAMS . join ( models . TOPICS , sql . and_ ( models . JOINS_TOPICS_TEAMS . c . topic_id == models . TOPICS . c . id , # noqa
models . TOPICS . c . state == 'active' ) ) # noqa
) . where ( sql . or_ ( models . JOINS_TOPICS_TEAMS . c . team_id . in_ ( user . teams_ids ) , # noqa
models . JOINS_TOPICS_TEAMS . c . team_id . in_ ( user . child_teams_ids ) ) ) )
# noqa
rows = flask . g . db_conn . execute ( query ) . fetchall ( )
return [ str ( row [ 0 ] ) for row in rows ]
|
def after ( self , i , sibling , name = None ) :
"""Adds siblings after the current tag ."""
|
self . parent . _insert ( sibling , idx = self . _own_index + 1 + i , name = name )
return self
|
def get_pages ( self , url , page = 1 , page_size = 100 , yield_pages = False , ** filters ) :
"""Get all pages at url , yielding individual results
: param url : the url to fetch
: param page : start from this page
: param page _ size : results per page
: param yield _ pages : yield whole pages rather than individual results
: param filters : additional filters
: return : a generator of objects ( dicts ) from the API"""
|
n = 0
for page in itertools . count ( page ) :
r = self . request ( url , page = page , page_size = page_size , ** filters )
n += len ( r [ 'results' ] )
log . debug ( "Got {url} page {page} / {pages}" . format ( url = url , ** r ) )
if yield_pages :
yield r
else :
for row in r [ 'results' ] :
yield row
if r [ 'next' ] is None :
break
|
def set_variant ( self , identity , experiment_name , variant_name ) :
"""Set the variant for a specific user .
: param identity a unique user identifier
: param experiment _ name the string name of the experiment
: param variant _ name the string name of the variant"""
|
try :
experiment = model . Experiment . get_by ( name = experiment_name )
variant = model . Variant . get_by ( name = variant_name )
if experiment and variant and model . Participant . query . filter ( and_ ( model . Participant . identity == identity , model . Participant . experiment_id == experiment . id , model . Participant . variant_id == variant . id ) ) . count ( ) == 0 :
model . Participant ( identity = identity , experiment = experiment , variant = variant )
self . Session . commit ( )
finally :
self . Session . close ( )
|
async def cancel ( self ) :
"""Coroutine to cancel this request / stream .
Client will send RST _ STREAM frame to the server , so it will be
explicitly informed that there is nothing to expect from the client
regarding this request / stream ."""
|
if self . _cancel_done :
raise ProtocolError ( 'Stream was already cancelled' )
with self . _wrapper :
await self . _stream . reset ( )
# TODO : specify error code
self . _cancel_done = True
|
def from_dict ( cls , data ) :
"""Create a new Measurement subclass instance using the given dict .
If Measurement . name _ from _ class was previously called with this data ' s
associated Measurement sub - class in Python , the returned object will be
an instance of that sub - class . If the measurement name in ` ` data ` ` is
unrecognized , the returned object will be of the generic ` ` Measurement ` `
type .
Args :
data ( dict ) : the data for the new measurement , including at least a
name and value ."""
|
args = [ ]
if 'id' in data and 'data' in data :
measurement_class = CanMessage
args . append ( "Bus %s: 0x%x" % ( data . get ( 'bus' , '?' ) , data [ 'id' ] ) )
args . append ( data [ 'data' ] )
# TODO grab bus
else :
measurement_class = cls . _class_from_name ( data [ 'name' ] )
if measurement_class == Measurement :
args . append ( data [ 'name' ] )
args . append ( data [ 'value' ] )
return measurement_class ( * args , event = data . get ( 'event' , None ) , override_unit = True )
|
def check_mem_usage ( soft_percent = None , hard_percent = None ) :
"""Display a warning if we are running out of memory"""
|
soft_percent = soft_percent or config . memory . soft_mem_limit
hard_percent = hard_percent or config . memory . hard_mem_limit
used_mem_percent = psutil . virtual_memory ( ) . percent
if used_mem_percent > hard_percent :
raise MemoryError ( 'Using more memory than allowed by configuration ' '(Used: %d%% / Allowed: %d%%)! Shutting down.' % ( used_mem_percent , hard_percent ) )
elif used_mem_percent > soft_percent :
msg = 'Using over %d%% of the memory in %s!'
return msg % ( used_mem_percent , socket . gethostname ( ) )
|
def get ( cls ) :
"""Get the current API key .
if one has not been given via ' set ' the env var STEAMODD _ API _ KEY will
be checked instead ."""
|
apikey = cls . __api_key or cls . __api_key_env_var
if apikey :
return apikey
else :
raise APIKeyMissingError ( "API key not set" )
|
def sam_conversions ( self , sam_file , depth = True ) :
"""Convert sam files to bam files , then sort and index them for later use .
: param bool depth : also calculate coverage over each position"""
|
cmd = self . tools . samtools + " view -bS " + sam_file + " > " + sam_file . replace ( ".sam" , ".bam" ) + "\n"
cmd += self . tools . samtools + " sort " + sam_file . replace ( ".sam" , ".bam" ) + " -o " + sam_file . replace ( ".sam" , "_sorted.bam" ) + "\n"
cmd += self . tools . samtools + " index " + sam_file . replace ( ".sam" , "_sorted.bam" ) + "\n"
if depth :
cmd += self . tools . samtools + " depth " + sam_file . replace ( ".sam" , "_sorted.bam" ) + " > " + sam_file . replace ( ".sam" , "_sorted.depth" ) + "\n"
return cmd
|
def rpm_send ( self , rpm1 , rpm2 , force_mavlink1 = False ) :
'''RPM sensor output
rpm1 : RPM Sensor1 ( float )
rpm2 : RPM Sensor2 ( float )'''
|
return self . send ( self . rpm_encode ( rpm1 , rpm2 ) , force_mavlink1 = force_mavlink1 )
|
def calc_bounds ( xy , entity ) :
"""For an entity with width and height attributes , determine
the bounding box if were positioned at ` ` ( x , y ) ` ` ."""
|
left , top = xy
right , bottom = left + entity . width , top + entity . height
return [ left , top , right , bottom ]
|
def path_manager_callback ( self ) :
"""Spyder path manager"""
|
from spyder . widgets . pathmanager import PathManager
self . remove_path_from_sys_path ( )
project_path = self . projects . get_pythonpath ( )
dialog = PathManager ( self , self . path , project_path , self . not_active_path , sync = True )
dialog . redirect_stdio . connect ( self . redirect_internalshell_stdio )
dialog . exec_ ( )
self . add_path_to_sys_path ( )
try :
encoding . writelines ( self . path , self . SPYDER_PATH )
# Saving path
encoding . writelines ( self . not_active_path , self . SPYDER_NOT_ACTIVE_PATH )
except EnvironmentError :
pass
self . sig_pythonpath_changed . emit ( )
|
def roundrobin ( * iterables ) :
"""roundrobin ( ' ABC ' , ' D ' , ' EF ' ) - - > A D E B F C"""
|
raise NotImplementedError ( 'not sure if this implementation is correct' )
# http : / / stackoverflow . com / questions / 11125212 / interleaving - lists - in - python
# sentinel = object ( )
# return ( x for x in chain ( * zip _ longest ( fillvalue = sentinel , * iterables ) ) if x is not sentinel )
pending = len ( iterables )
if six . PY2 :
nexts = cycle ( iter ( it ) . next for it in iterables )
else :
nexts = cycle ( iter ( it ) . __next__ for it in iterables )
while pending :
try :
for next in nexts :
yield next ( )
except StopIteration :
pending -= 1
nexts = cycle ( islice ( nexts , pending ) )
|
def get_logged_in_account ( token_manager = None , app_url = defaults . APP_URL ) :
"""get the account details for logged in account of the auth token _ manager"""
|
return get_logged_in_account ( token_manager = token_manager , app_url = app_url ) [ 'id' ]
|
def resizeEvent ( self , event ) :
"""Updates the position of the additional buttons when this widget \
resizes .
: param event | < QResizeEvet >"""
|
super ( XTabBar , self ) . resizeEvent ( event )
self . resized . emit ( )
|
def apply_signature ( cls , instance , async = True , countdown = None , is_heavy_task = False , ** kwargs ) :
"""Serialize input data and apply signature"""
|
serialized_instance = utils . serialize_instance ( instance )
signature = cls . get_task_signature ( instance , serialized_instance , ** kwargs )
link = cls . get_success_signature ( instance , serialized_instance , ** kwargs )
link_error = cls . get_failure_signature ( instance , serialized_instance , ** kwargs )
if async :
return signature . apply_async ( link = link , link_error = link_error , countdown = countdown , queue = is_heavy_task and 'heavy' or None )
else :
result = signature . apply ( )
callback = link if not result . failed ( ) else link_error
if callback is not None :
cls . _apply_callback ( callback , result )
return result . get ( )
|
def initialize_plot ( self , data = None , ax = None , make_plot = True , clear = False , draw = False , remove = False , priority = None ) :
"""Initialize the plot for a data array
Parameters
data : InteractiveArray or ArrayList , optional
Data object that shall be visualized .
- If not None and ` plot ` is True , the given data is visualized .
- If None and the : attr : ` data ` attribute is not None , the data in
the : attr : ` data ` attribute is visualized
- If both are None , nothing is done .
% ( Plotter . parameters . ax | make _ plot | clear ) s
% ( InteractiveBase . start _ update . parameters . draw ) s
remove : bool
If True , old effects by the formatoptions in this plotter are
undone first
priority : int
If given , initialize only the formatoption with the given priority .
This value must be out of : data : ` START ` , : data : ` BEFOREPLOTTING ` or
: data : ` END `"""
|
if data is None and self . data is not None :
data = self . data
else :
self . data = data
self . ax = ax
if data is None : # nothing to do if no data is given
return
self . no_auto_update = not ( not self . no_auto_update or not data . psy . no_auto_update )
data . psy . plotter = self
if not make_plot : # stop here if we shall not plot
return
self . logger . debug ( "Initializing plot..." )
if remove :
self . logger . debug ( " Removing old formatoptions..." )
for fmto in self . _fmtos :
try :
fmto . remove ( )
except Exception :
self . logger . debug ( "Could not remove %s while initializing" , fmto . key , exc_info = True )
if clear :
self . logger . debug ( " Clearing axes..." )
self . ax . clear ( )
self . cleared = True
# get the formatoptions . We sort them here by key to make sure that the
# order always stays the same ( easier for debugging )
fmto_groups = self . _grouped_fmtos ( self . _sorted_by_priority ( sorted ( self . _fmtos , key = lambda fmto : fmto . key ) ) )
self . plot_data = self . data
self . _updating = True
for fmto_priority , grouper in fmto_groups :
if priority is None or fmto_priority == priority :
self . _plot_by_priority ( fmto_priority , grouper , initializing = True )
self . _release_all ( True )
# finish the update
self . cleared = False
self . replot = False
self . _initialized = True
self . _updating = False
if draw is None :
draw = rcParams [ 'auto_draw' ]
if draw :
self . draw ( )
if rcParams [ 'auto_show' ] :
self . show ( )
|
def eidos_process_jsonld ( ) :
"""Process an EIDOS JSON - LD and return INDRA Statements ."""
|
if request . method == 'OPTIONS' :
return { }
response = request . body . read ( ) . decode ( 'utf-8' )
body = json . loads ( response )
eidos_json = body . get ( 'jsonld' )
ep = eidos . process_json_str ( eidos_json )
return _stmts_from_proc ( ep )
|
def better ( old_value , new_value , mode ) :
"""Check if new value is better than the old value"""
|
if ( old_value is None or np . isnan ( old_value ) ) and ( new_value is not None and not np . isnan ( new_value ) ) :
return True
if mode == 'min' :
return new_value < old_value
elif mode == 'max' :
return new_value > old_value
else :
raise RuntimeError ( f"Mode '{mode}' value is not supported" )
|
def populate_extra_files ( ) :
"""Creates a list of non - python data files to include in package distribution"""
|
out = [ 'cauldron/settings.json' ]
for entry in glob . iglob ( 'cauldron/resources/examples/**/*' , recursive = True ) :
out . append ( entry )
for entry in glob . iglob ( 'cauldron/resources/templates/**/*' , recursive = True ) :
out . append ( entry )
for entry in glob . iglob ( 'cauldron/resources/web/**/*' , recursive = True ) :
out . append ( entry )
return out
|
def parse_assertion ( self , keys = None ) :
"""Parse the assertions for a saml response .
: param keys : A string representing a RSA key or a list of strings
containing RSA keys .
: return : True if the assertions are parsed otherwise False ."""
|
if self . context == "AuthnQuery" : # can contain one or more assertions
pass
else : # This is a saml2int limitation
try :
assert ( len ( self . response . assertion ) == 1 or len ( self . response . encrypted_assertion ) == 1 or self . assertion is not None )
except AssertionError :
raise Exception ( "No assertion part" )
if self . response . assertion :
logger . debug ( "***Unencrypted assertion***" )
for assertion in self . response . assertion :
if not self . _assertion ( assertion , False ) :
return False
if self . find_encrypt_data ( self . response ) :
logger . debug ( "***Encrypted assertion/-s***" )
_enc_assertions = [ ]
resp = self . response
decr_text = str ( self . response )
decr_text_old = None
while self . find_encrypt_data ( resp ) and decr_text_old != decr_text :
decr_text_old = decr_text
try :
decr_text = self . sec . decrypt_keys ( decr_text , keys )
except DecryptError as e :
continue
else :
resp = samlp . response_from_string ( decr_text )
# check and prepare for comparison between str and unicode
if type ( decr_text_old ) != type ( decr_text ) :
if isinstance ( decr_text_old , six . binary_type ) :
decr_text_old = decr_text_old . decode ( "utf-8" )
else :
decr_text_old = decr_text_old . encode ( "utf-8" )
_enc_assertions = self . decrypt_assertions ( resp . encrypted_assertion , decr_text )
decr_text_old = None
while ( self . find_encrypt_data ( resp ) or self . find_encrypt_data_assertion_list ( _enc_assertions ) ) and decr_text_old != decr_text :
decr_text_old = decr_text
try :
decr_text = self . sec . decrypt_keys ( decr_text , keys )
except DecryptError as e :
continue
else :
resp = samlp . response_from_string ( decr_text )
_enc_assertions = self . decrypt_assertions ( resp . encrypted_assertion , decr_text , verified = True )
# check and prepare for comparison between str and unicode
if type ( decr_text_old ) != type ( decr_text ) :
if isinstance ( decr_text_old , six . binary_type ) :
decr_text_old = decr_text_old . decode ( "utf-8" )
else :
decr_text_old = decr_text_old . encode ( "utf-8" )
all_assertions = _enc_assertions
if resp . assertion :
all_assertions = all_assertions + resp . assertion
if len ( all_assertions ) > 0 :
for tmp_ass in all_assertions :
if tmp_ass . advice and tmp_ass . advice . encrypted_assertion :
advice_res = self . decrypt_assertions ( tmp_ass . advice . encrypted_assertion , decr_text , tmp_ass . issuer )
if tmp_ass . advice . assertion :
tmp_ass . advice . assertion . extend ( advice_res )
else :
tmp_ass . advice . assertion = advice_res
if len ( advice_res ) > 0 :
tmp_ass . advice . encrypted_assertion = [ ]
self . response . assertion = resp . assertion
for assertion in _enc_assertions :
if not self . _assertion ( assertion , True ) :
return False
else :
self . assertions . append ( assertion )
self . xmlstr = decr_text
if len ( _enc_assertions ) > 0 :
self . response . encrypted_assertion = [ ]
if self . response . assertion :
for assertion in self . response . assertion :
self . assertions . append ( assertion )
if self . assertions and len ( self . assertions ) > 0 :
self . assertion = self . assertions [ 0 ]
if self . context == "AuthnReq" or self . context == "AttrQuery" :
self . ava = self . get_identity ( )
logger . debug ( "--- AVA: %s" , self . ava )
return True
|
def make_requester ( self , my_args = None ) :
"""make a new requester instance and handle it from driver
: param my _ args : dict like { request _ q } . Default : None
: return : created requester proxy"""
|
LOGGER . debug ( "natsd.Driver.make_requester" )
if my_args is None :
raise exceptions . ArianeConfError ( 'requester factory arguments' )
if not self . configuration_OK or self . connection_args is None :
raise exceptions . ArianeConfError ( 'NATS connection arguments' )
requester = Requester . start ( my_args , self . connection_args ) . proxy ( )
self . requester_registry . append ( requester )
return requester
|
def unregister_a_problem ( self , prob ) :
"""Remove the problem from our problems list
and check if we are still ' impacted '
: param prob : problem to remove
: type prob : alignak . objects . schedulingitem . SchedulingItem
: return : None"""
|
self . source_problems . remove ( prob . uuid )
# For know if we are still an impact , maybe our dependencies
# are not aware of the remove of the impact state because it ' s not ordered
# so we can just look at if we still have some problem in our list
if not self . source_problems :
self . is_impact = False
# No more an impact , we can unset the impact state
self . unset_impact_state ( )
# And we register a new broks for update status
self . broks . append ( self . get_update_status_brok ( ) )
|
def _update_with_calls ( result_file , cnv_file ) :
"""Update bounds with calls from CNVkit , inferred copy numbers and p - values from THetA ."""
|
results = { }
with open ( result_file ) as in_handle :
in_handle . readline ( )
# header
_ , _ , cs , ps = in_handle . readline ( ) . strip ( ) . split ( )
for i , ( c , p ) in enumerate ( zip ( cs . split ( ":" ) , ps . split ( "," ) ) ) :
results [ i ] = ( c , p )
cnvs = { }
with open ( cnv_file ) as in_handle :
for line in in_handle :
chrom , start , end , _ , count = line . rstrip ( ) . split ( ) [ : 5 ]
cnvs [ ( chrom , start , end ) ] = count
def update ( i , line ) :
parts = line . rstrip ( ) . split ( "\t" )
chrom , start , end = parts [ 1 : 4 ]
parts += cnvs . get ( ( chrom , start , end ) , "." )
parts += list ( results [ i ] )
return "\t" . join ( parts ) + "\n"
return update
|
def AND ( classical_reg1 , classical_reg2 ) :
"""Produce an AND instruction .
NOTE : The order of operands was reversed in pyQuil < = 1.9 .
: param classical _ reg1 : The first classical register , which gets modified .
: param classical _ reg2 : The second classical register or immediate value .
: return : A ClassicalAnd instance ."""
|
left , right = unpack_reg_val_pair ( classical_reg1 , classical_reg2 )
return ClassicalAnd ( left , right )
|
def move_to_element ( self , to_element ) :
"""Moving the mouse to the middle of an element .
: Args :
- to _ element : The WebElement to move to ."""
|
if self . _driver . w3c :
self . w3c_actions . pointer_action . move_to ( to_element )
self . w3c_actions . key_action . pause ( )
else :
self . _actions . append ( lambda : self . _driver . execute ( Command . MOVE_TO , { 'element' : to_element . id } ) )
return self
|
def _free_array ( self , handle : int ) :
"""Frees the memory for the array with the given handle .
Args :
handle : The handle of the array whose memory should be freed . This
handle must come from the _ create _ array method ."""
|
with self . _lock :
if self . _arrays [ handle ] is not None :
self . _arrays [ handle ] = None
self . _count -= 1
|
def _fast_write ( self , outfile , value ) :
"""Function for fast writing to motor files ."""
|
outfile . truncate ( 0 )
outfile . write ( str ( int ( value ) ) )
outfile . flush ( )
|
def pushd ( directory : str ) -> None :
"""Context manager : changes directory and preserves the original on exit .
Example :
. . code - block : : python
with pushd ( new _ directory ) :
# do things"""
|
previous_dir = os . getcwd ( )
os . chdir ( directory )
yield
os . chdir ( previous_dir )
|
def pre_freeze_hook ( self ) :
"""Pre : meth : ` dtoolcore . ProtoDataSet . freeze ` actions .
This method is called at the beginning of the
: meth : ` dtoolcore . ProtoDataSet . freeze ` method .
It may be useful for remote storage backends to generate
caches to remove repetitive time consuming calls"""
|
allowed = set ( [ v [ 0 ] for v in _STRUCTURE_PARAMETERS . values ( ) ] )
for d in os . listdir ( self . _abspath ) :
if d not in allowed :
msg = "Rogue content in base of dataset: {}" . format ( d )
raise ( DiskStorageBrokerValidationWarning ( msg ) )
|
def printed_out ( self , name ) :
"""Create a string describing the APIObject and its children"""
|
out = ''
out += '|\n'
if self . _id_variable :
subs = '[{}]' . format ( self . _id_variable )
else :
subs = ''
out += '|---{}{}\n' . format ( name , subs )
if self . _description :
out += '| | {}\n' . format ( self . _description )
for name , action in self . _actions . items ( ) :
out += action . printed_out ( name )
return out
|
def push ( self , vs ) :
'Move given sheet ` vs ` to index 0 of list ` sheets ` .'
|
if vs :
vs . vd = self
if vs in self . sheets :
self . sheets . remove ( vs )
self . sheets . insert ( 0 , vs )
elif not vs . loaded :
self . sheets . insert ( 0 , vs )
vs . reload ( )
vs . recalc ( )
# set up Columns
else :
self . sheets . insert ( 0 , vs )
if vs . precious and vs not in vs . vd . allSheets :
vs . vd . allSheets [ vs ] = vs . name
return vs
|
def find_geom ( geom , geoms ) :
"""Returns the index of a geometry in a list of geometries avoiding
expensive equality checks of ` in ` operator ."""
|
for i , g in enumerate ( geoms ) :
if g is geom :
return i
|
def signalMinimum ( img , fitParams = None , n_std = 3 ) :
'''intersection between signal and background peak'''
|
if fitParams is None :
fitParams = FitHistogramPeaks ( img ) . fitParams
assert len ( fitParams ) > 1 , 'need 2 peaks so get minimum signal'
i = signalPeakIndex ( fitParams )
signal = fitParams [ i ]
bg = getBackgroundPeak ( fitParams )
smn = signal [ 1 ] - n_std * signal [ 2 ]
bmx = bg [ 1 ] + n_std * bg [ 2 ]
if smn > bmx :
return smn
# peaks are overlapping
# define signal min . as intersection between both Gaussians
def solve ( p1 , p2 ) :
s1 , m1 , std1 = p1
s2 , m2 , std2 = p2
a = ( 1 / ( 2 * std1 ** 2 ) ) - ( 1 / ( 2 * std2 ** 2 ) )
b = ( m2 / ( std2 ** 2 ) ) - ( m1 / ( std1 ** 2 ) )
c = ( m1 ** 2 / ( 2 * std1 ** 2 ) ) - ( m2 ** 2 / ( 2 * std2 ** 2 ) ) - np . log ( ( ( std2 * s1 ) / ( std1 * s2 ) ) )
return np . roots ( [ a , b , c ] )
i = solve ( bg , signal )
try :
return i [ np . logical_and ( i > bg [ 1 ] , i < signal [ 1 ] ) ] [ 0 ]
except IndexError : # this error shouldn ' t occur . . . well
return max ( smn , bmx )
|
def handle_single_request ( self , request_object ) :
"""Handles a single request object and returns the raw response
: param request _ object :"""
|
if not isinstance ( request_object , ( MethodCall , Notification ) ) :
raise TypeError ( "Invalid type for request_object" )
method_name = request_object . method_name
params = request_object . params
req_id = request_object . id
request_body = self . build_request_body ( method_name , params , id = req_id )
http_request = self . build_http_request_obj ( request_body )
try :
response = urllib . request . urlopen ( http_request )
except urllib . request . HTTPError as e :
raise CalledServiceError ( e )
if not req_id :
return
response_body = json . loads ( response . read ( ) . decode ( ) )
return response_body
|
def indexByComponent ( self , component ) :
"""Returns a location for the given component , or None if
it is not in the model
: param component : Component to get index for
: type component : : class : ` AbstractStimulusComponent < sparkle . stim . abstract _ component . AbstractStimulusComponent > `
: returns : ( int , int ) - - ( row , column ) of component"""
|
for row , rowcontents in enumerate ( self . _segments ) :
if component in rowcontents :
column = rowcontents . index ( component )
return ( row , column )
|
def get_last_depth ( self , symbol , _type ) :
"""获取marketdepth
: param symbol
: param type : 可选值 : { percent10 , step0 , step1 , step2 , step3 , step4 , step5 }
: return :"""
|
params = { 'symbol' : symbol , 'type' : _type }
url = u . MARKET_URL + '/market/depth'
def _wrapper ( _func ) :
@ wraps ( _func )
def handle ( ) :
_func ( http_get_request ( url , params ) )
return handle
return _wrapper
|
def parse_int_token ( token ) :
"""Parses a string to convert it to an integer based on the format used :
: param token :
The string to convert to an integer .
: type token :
` ` str ` `
: return :
` ` int ` ` or raises ` ` ValueError ` ` exception .
Usage : :
> > > parse _ int _ token ( " 0x40 " )
64
> > > parse _ int _ token ( " 040 " )
32
> > > parse _ int _ token ( " 40 " )
40
> > > parse _ int _ token ( " foobar " )
Traceback ( most recent call last ) :
ValueError : invalid literal for int ( ) with base 10 : ' foobar '"""
|
if token . startswith ( "0x" ) or token . startswith ( "0X" ) :
return int ( token , 16 )
elif token . startswith ( "0" ) :
return int ( token , 8 )
else :
return int ( token )
|
def _dict_to_name_value ( data ) :
'''Convert a dictionary to a list of dictionaries to facilitate ordering'''
|
if isinstance ( data , dict ) :
sorted_data = sorted ( data . items ( ) , key = lambda s : s [ 0 ] )
result = [ ]
for name , value in sorted_data :
if isinstance ( value , dict ) :
result . append ( { name : _dict_to_name_value ( value ) } )
else :
result . append ( { name : value } )
else :
result = data
return result
|
def camel_to_under ( name ) :
"""Converts camel - case string to lowercase string separated by underscores .
Written by epost ( http : / / stackoverflow . com / questions / 1175208 ) .
: param name : String to be converted
: return : new String with camel - case converted to lowercase , underscored"""
|
s1 = re . sub ( "(.)([A-Z][a-z]+)" , r"\1_\2" , name )
return re . sub ( "([a-z0-9])([A-Z])" , r"\1_\2" , s1 ) . lower ( )
|
def receive ( self , request : RequestType , user : UserType = None , sender_key_fetcher : Callable [ [ str ] , str ] = None , skip_author_verification : bool = False ) -> Tuple [ str , dict ] :
"""Receive a request .
For testing purposes , ` skip _ author _ verification ` can be passed . Authorship will not be verified ."""
|
self . user = user
self . get_contact_key = sender_key_fetcher
self . payload = json . loads ( decode_if_bytes ( request . body ) )
self . request = request
self . extract_actor ( )
# Verify the message is from who it claims to be
if not skip_author_verification :
self . verify_signature ( )
return self . actor , self . payload
|
def team ( page ) :
"""Return the team name"""
|
soup = BeautifulSoup ( page )
try :
return soup . find ( 'title' ) . text . split ( ' | ' ) [ 0 ] . split ( ' - ' ) [ 1 ]
except :
return None
|
def get_coordinates_by_full_name ( self , name ) :
"""Retrieves a person ' s coordinates by full name"""
|
person = self . get_person_by_full_name ( name )
if not person :
return '' , ''
return person . latitude , person . longitude
|
def parse_ggKbase_tables ( tables , id_type ) :
"""convert ggKbase genome info tables to dictionary"""
|
g2info = { }
for table in tables :
for line in open ( table ) :
line = line . strip ( ) . split ( '\t' )
if line [ 0 ] . startswith ( 'name' ) :
header = line
header [ 4 ] = 'genome size (bp)'
header [ 12 ] = '#SCGs'
header [ 13 ] = '#SCG duplicates'
continue
name , code , info = line [ 0 ] , line [ 1 ] , line
info = [ to_int ( i ) for i in info ]
if id_type is False : # try to use name and code ID
if 'UNK' in code or 'unknown' in code :
code = name
if ( name != code ) and ( name and code in g2info ) :
print ( '# duplicate name or code in table(s)' , file = sys . stderr )
print ( '# %s and/or %s' % ( name , code ) , file = sys . stderr )
exit ( )
if name not in g2info :
g2info [ name ] = { item : stat for item , stat in zip ( header , info ) }
if code not in g2info :
g2info [ code ] = { item : stat for item , stat in zip ( header , info ) }
else :
if id_type == 'name' :
ID = name
elif id_type == 'code' :
ID = code
else :
print ( '# specify name or code column using -id' , file = sys . stderr )
exit ( )
ID = ID . replace ( ' ' , '' )
g2info [ ID ] = { item : stat for item , stat in zip ( header , info ) }
if g2info [ ID ] [ 'genome size (bp)' ] == '' :
g2info [ ID ] [ 'genome size (bp)' ] = 0
return g2info
|
def invalid_multipoly_handler ( gdf , relation , way_ids ) :
"""Handles invalid multipolygon geometries when there exists e . g . a feature without
geometry ( geometry = = NaN )
Parameters
gdf : gpd . GeoDataFrame
GeoDataFrame with Polygon geometries that should be converted into a MultiPolygon object .
relation : dict
OSM ' relation ' dictionary
way _ ids : list
A list of ' way ' ids that should be converted into a MultiPolygon object ."""
|
try :
gdf_clean = gdf . dropna ( subset = [ 'geometry' ] )
multipoly = MultiPolygon ( list ( gdf_clean [ 'geometry' ] ) )
return multipoly
except Exception :
log ( "Invalid geometry at relation id %s.\nWay-ids of the invalid MultiPolygon:" % ( relation [ 'id' ] , str ( way_ids ) ) )
return None
|
def parse ( theme_file ) :
"""Parse the theme file ."""
|
data = util . read_file_json ( theme_file )
if "wallpaper" not in data :
data [ "wallpaper" ] = "None"
if "alpha" not in data :
data [ "alpha" ] = util . Color . alpha_num
# Terminal . sexy format .
if "color" in data :
data = terminal_sexy_to_wal ( data )
return data
|
def get_data_context ( context_type , options , * args , ** kwargs ) :
"""Return a data _ context object which exposes options to list datasets and get a dataset from
that context . This is a new API in Great Expectations 0.4 , and is subject to rapid change .
: param context _ type : ( string ) one of " SqlAlchemy " or " PandasCSV "
: param options : options to be passed to the data context ' s connect method .
: return : a new DataContext object"""
|
if context_type == "SqlAlchemy" :
return SqlAlchemyDataContext ( options , * args , ** kwargs )
elif context_type == "PandasCSV" :
return PandasCSVDataContext ( options , * args , ** kwargs )
else :
raise ValueError ( "Unknown data context." )
|
def parse ( cls , stream ) :
"""Return an instance of | _ TiffParser | containing the properties parsed
from the TIFF image in * stream * ."""
|
stream_rdr = cls . _make_stream_reader ( stream )
ifd0_offset = stream_rdr . read_long ( 4 )
ifd_entries = _IfdEntries . from_stream ( stream_rdr , ifd0_offset )
return cls ( ifd_entries )
|
def compose ( * funcs ) :
"""Compose any number of unary functions into a single unary function .
> > > import textwrap
> > > from six import text _ type
> > > stripped = text _ type . strip ( textwrap . dedent ( compose . _ _ doc _ _ ) )
> > > compose ( text _ type . strip , textwrap . dedent ) ( compose . _ _ doc _ _ ) = = stripped
True
Compose also allows the innermost function to take arbitrary arguments .
> > > round _ three = lambda x : round ( x , ndigits = 3)
> > > f = compose ( round _ three , int . _ _ truediv _ _ )
> > > [ f ( 3 * x , x + 1 ) for x in range ( 1,10 ) ]
[1.5 , 2.0 , 2.25 , 2.4 , 2.5 , 2.571 , 2.625 , 2.667 , 2.7]"""
|
def compose_two ( f1 , f2 ) :
return lambda * args , ** kwargs : f1 ( f2 ( * args , ** kwargs ) )
return functools . reduce ( compose_two , funcs )
|
def _process_incoming_data ( self ) :
"""Retrieve and process any incoming data .
: return :"""
|
while self . _running . is_set ( ) :
if self . poller . is_ready :
self . data_in += self . _receive ( )
self . data_in = self . _on_read_impl ( self . data_in )
|
def endInstance ( self ) :
"""Finalise the instance definition started by startInstance ( ) ."""
|
if self . currentInstance is None :
return
allInstances = self . root . findall ( '.instances' ) [ 0 ] . append ( self . currentInstance )
self . currentInstance = None
|
def print_app_tb_only ( self , file ) :
"NOT _ RPYTHON"
|
tb = self . _application_traceback
if tb :
import linecache
print >> file , "Traceback (application-level):"
while tb is not None :
co = tb . frame . pycode
lineno = tb . get_lineno ( )
fname = co . co_filename
if fname . startswith ( '<inline>\n' ) :
lines = fname . split ( '\n' )
fname = lines [ 0 ] . strip ( )
try :
l = lines [ lineno ]
except IndexError :
l = ''
else :
l = linecache . getline ( fname , lineno )
print >> file , " File \"%s\"," % fname ,
print >> file , "line" , lineno , "in" , co . co_name
if l :
if l . endswith ( '\n' ) :
l = l [ : - 1 ]
l = " " + l . lstrip ( )
print >> file , l
tb = tb . next
|
def run_helper_process ( python_file , metadata_queue , quit_event , options ) :
""": param python _ file : The absolute path of a python file containing the helper process that should be run .
It must define a class which is a subclass of BotHelperProcess .
: param metadata _ queue : A queue from which the helper process will read AgentMetadata updates .
: param quit _ event : An event which should be set when rlbot is shutting down .
: param options : A dict with arbitrary options that will be passed through to the helper process ."""
|
class_wrapper = import_class_with_base ( python_file , BotHelperProcess )
helper_class = class_wrapper . get_loaded_class ( )
helper = helper_class ( metadata_queue , quit_event , options )
helper . start ( )
|
def from_dict ( cls , d ) :
"""Reconstructs the WeightedNbSetChemenvStrategy object from a dict representation of the
WeightedNbSetChemenvStrategy object created using the as _ dict method .
: param d : dict representation of the WeightedNbSetChemenvStrategy object
: return : WeightedNbSetChemenvStrategy object"""
|
return cls ( additional_condition = d [ "additional_condition" ] , symmetry_measure_type = d [ "symmetry_measure_type" ] , nb_set_weights = d [ "nb_set_weights" ] , ce_estimator = d [ "ce_estimator" ] )
|
def recursively_register_child_states ( self , state ) :
"""A function tha registers recursively all child states of a state
: param state :
: return :"""
|
self . logger . info ( "Execution status observer add new state {}" . format ( state ) )
if isinstance ( state , ContainerState ) :
state . add_observer ( self , "add_state" , notify_after_function = self . on_add_state )
for state in list ( state . states . values ( ) ) :
self . recursively_register_child_states ( state )
state . add_observer ( self , "state_execution_status" , notify_after_function = self . on_state_execution_status_changed_after )
if isinstance ( state , LibraryState ) :
self . recursively_register_child_states ( state . state_copy )
state . add_observer ( self , "state_execution_status" , notify_after_function = self . on_state_execution_status_changed_after )
|
def find_faces ( self , image , draw_box = False ) :
"""Uses a haarcascade to detect faces inside an image .
Args :
image : The image .
draw _ box : If True , the image will be marked with a rectangle .
Return :
The faces as returned by OpenCV ' s detectMultiScale method for
cascades ."""
|
frame_gray = cv2 . cvtColor ( image , cv2 . COLOR_RGB2GRAY )
faces = self . cascade . detectMultiScale ( frame_gray , scaleFactor = 1.3 , minNeighbors = 5 , minSize = ( 50 , 50 ) , flags = 0 )
if draw_box :
for x , y , w , h in faces :
cv2 . rectangle ( image , ( x , y ) , ( x + w , y + h ) , ( 0 , 255 , 0 ) , 2 )
return faces
|
def message_iter ( evt ) :
"""provide a message iterator which checks for a response error prior to returning"""
|
for msg in evt :
if logger . isEnabledFor ( log . logging . DEBUG ) :
logger . debug ( msg . toString ( ) )
if msg . asElement ( ) . hasElement ( 'responseError' ) :
raise Exception ( msg . toString ( ) )
yield msg
|
def get_infrared ( self , callb = None ) :
"""Convenience method to request the infrared brightness from the device
This method will check whether the value has already been retrieved from the device ,
if so , it will simply return it . If no , it will request the information from the device
and request that callb be executed when a response is received . The default callback
will simply cache the value .
: param callb : Callable to be used when the response is received . If not set ,
self . resp _ set _ label will be used .
: type callb : callable
: returns : The cached value
: rtype : int"""
|
response = self . req_with_resp ( LightGetInfrared , LightStateInfrared , callb = callb )
return self . infrared_brightness
|
def get_unpatched_class ( cls ) :
"""Protect against re - patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first ."""
|
external_bases = ( cls for cls in _get_mro ( cls ) if not cls . __module__ . startswith ( 'setuptools' ) )
base = next ( external_bases )
if not base . __module__ . startswith ( 'distutils' ) :
msg = "distutils has already been patched by %r" % cls
raise AssertionError ( msg )
return base
|
def san_managers ( self ) :
"""Gets the SanManagers API client .
Returns :
SanManagers :"""
|
if not self . __san_managers :
self . __san_managers = SanManagers ( self . __connection )
return self . __san_managers
|
def get_form ( self ) :
"""Returns an instance of the form to be used in this view ."""
|
self . form = super ( SmartFormMixin , self ) . get_form ( )
fields = list ( self . derive_fields ( ) )
# apply our field filtering on our form class
exclude = self . derive_exclude ( )
exclude += self . derive_readonly ( )
# remove any excluded fields
for field in exclude :
if field in self . form . fields :
del self . form . fields [ field ]
if fields is not None : # filter out our form fields
remove = [ name for name in self . form . fields . keys ( ) if name not in fields ]
for name in remove :
del self . form . fields [ name ]
# stuff in our referer as the default location for where to return
location = forms . CharField ( widget = forms . widgets . HiddenInput ( ) , required = False )
if ( 'HTTP_REFERER' in self . request . META ) :
location . initial = self . request . META [ 'HTTP_REFERER' ]
# add the location to our form fields
self . form . fields [ 'loc' ] = location
if fields :
fields . append ( 'loc' )
# provides a hook to programmatically customize fields before rendering
for ( name , field ) in self . form . fields . items ( ) :
field = self . customize_form_field ( name , field )
self . form . fields [ name ] = field
return self . form
|
def set_archive_layout ( self , archive_id , layout_type , stylesheet = None ) :
"""Use this method to change the layout of videos in an OpenTok archive
: param String archive _ id : The ID of the archive that will be updated
: param String layout _ type : The layout type for the archive . Valid values are :
' bestFit ' , ' custom ' , ' horizontalPresentation ' , ' pip ' and ' verticalPresentation '
: param String stylesheet optional : CSS used to style the custom layout .
Specify this only if you set the type property to ' custom '"""
|
payload = { 'type' : layout_type , }
if layout_type == 'custom' :
if stylesheet is not None :
payload [ 'stylesheet' ] = stylesheet
endpoint = self . endpoints . set_archive_layout_url ( archive_id )
response = requests . put ( endpoint , data = json . dumps ( payload ) , headers = self . json_headers ( ) , proxies = self . proxies , timeout = self . timeout )
if response . status_code == 200 :
pass
elif response . status_code == 400 :
raise ArchiveError ( 'Invalid request. This response may indicate that data in your request data is invalid JSON. It may also indicate that you passed in invalid layout options.' )
elif response . status_code == 403 :
raise AuthError ( 'Authentication error.' )
else :
raise RequestError ( 'OpenTok server error.' , response . status_code )
|
def populate ( self , obj = None , section = None , parse_types = True ) :
"""Set attributes in ` ` obj ` ` with ` ` setattr ` ` from the all values in
` ` section ` ` ."""
|
section = self . default_section if section is None else section
obj = Settings ( ) if obj is None else obj
is_dict = isinstance ( obj , dict )
for k , v in self . get_options ( section ) . items ( ) :
if parse_types :
if v == 'None' :
v = None
elif self . FLOAT_REGEXP . match ( v ) :
v = float ( v )
elif self . INT_REGEXP . match ( v ) :
v = int ( v )
elif self . BOOL_REGEXP . match ( v ) :
v = v == 'True'
else :
m = self . EVAL_REGEXP . match ( v )
if m :
evalstr = m . group ( 1 )
v = eval ( evalstr )
logger . debug ( 'setting {} => {} on {}' . format ( k , v , obj ) )
if is_dict :
obj [ k ] = v
else :
setattr ( obj , k , v )
return obj
|
async def send_audio ( self , chat_id : typing . Union [ base . Integer , base . String ] , audio : typing . Union [ base . InputFile , base . String ] , caption : typing . Union [ base . String , None ] = None , parse_mode : typing . Union [ base . String , None ] = None , duration : typing . Union [ base . Integer , None ] = None , performer : typing . Union [ base . String , None ] = None , title : typing . Union [ base . String , None ] = None , thumb : typing . Union [ base . InputFile , base . String , None ] = None , disable_notification : typing . Union [ base . Boolean , None ] = None , reply_to_message_id : typing . Union [ base . Integer , None ] = None , reply_markup : typing . Union [ types . InlineKeyboardMarkup , types . ReplyKeyboardMarkup , types . ReplyKeyboardRemove , types . ForceReply , None ] = None ) -> types . Message :
"""Use this method to send audio files , if you want Telegram clients to display them in the music player .
Your audio must be in the . mp3 format .
For sending voice messages , use the sendVoice method instead .
Source : https : / / core . telegram . org / bots / api # sendaudio
: param chat _ id : Unique identifier for the target chat or username of the target channel
: type chat _ id : : obj : ` typing . Union [ base . Integer , base . String ] `
: param audio : Audio file to send
: type audio : : obj : ` typing . Union [ base . InputFile , base . String ] `
: param caption : Audio caption , 0-1024 characters
: type caption : : obj : ` typing . Union [ base . String , None ] `
: param parse _ mode : Send Markdown or HTML , if you want Telegram apps to show bold , italic ,
fixed - width text or inline URLs in your bot ' s message .
: type parse _ mode : : obj : ` typing . Union [ base . String , None ] `
: param duration : Duration of the audio in seconds
: type duration : : obj : ` typing . Union [ base . Integer , None ] `
: param performer : Performer
: type performer : : obj : ` typing . Union [ base . String , None ] `
: param title : Track name
: type title : : obj : ` typing . Union [ base . String , None ] `
: param thumb : Thumbnail of the file sent
: type thumb : : obj : ` typing . Union [ base . InputFile , base . String , None ] `
: param disable _ notification : Sends the message silently . Users will receive a notification with no sound
: type disable _ notification : : obj : ` typing . Union [ base . Boolean , None ] `
: param reply _ to _ message _ id : If the message is a reply , ID of the original message
: type reply _ to _ message _ id : : obj : ` typing . Union [ base . Integer , None ] `
: param reply _ markup : Additional interface options
: type reply _ markup : : obj : ` typing . Union [ types . InlineKeyboardMarkup , types . ReplyKeyboardMarkup ,
types . ReplyKeyboardRemove , types . ForceReply , None ] `
: return : On success , the sent Message is returned
: rtype : : obj : ` types . Message `"""
|
reply_markup = prepare_arg ( reply_markup )
payload = generate_payload ( ** locals ( ) , exclude = [ 'audio' ] )
if self . parse_mode :
payload . setdefault ( 'parse_mode' , self . parse_mode )
files = { }
prepare_file ( payload , files , 'audio' , audio )
result = await self . request ( api . Methods . SEND_AUDIO , payload , files )
return types . Message ( ** result )
|
def _session ( ) :
'''Create a session to be used when connecting to Zenoss .'''
|
config = __salt__ [ 'config.option' ] ( 'zenoss' )
session = requests . session ( )
session . auth = ( config . get ( 'username' ) , config . get ( 'password' ) )
session . verify = False
session . headers . update ( { 'Content-type' : 'application/json; charset=utf-8' } )
return session
|
def clean ( self : 'TSelf' , * , atol : float = 1e-9 ) -> 'TSelf' :
"""Remove terms with coefficients of absolute value atol or less ."""
|
negligible = [ v for v , c in self . _terms . items ( ) if abs ( c ) <= atol ]
for v in negligible :
del self . _terms [ v ]
return self
|
def _check_child_limits ( self , child_pid ) :
"""Check that inserting a child is within the limits ."""
|
if self . max_children is not None and self . children . count ( ) >= self . max_children :
raise PIDRelationConsistencyError ( "Max number of children is set to {}." . format ( self . max_children ) )
if self . max_parents is not None and PIDRelation . query . filter_by ( child = child_pid , relation_type = self . relation_type . id ) . count ( ) >= self . max_parents :
raise PIDRelationConsistencyError ( "This pid already has the maximum number of parents." )
|
def next ( self ) :
"""Return the next window"""
|
next_index = ( self . index + 1 ) % len ( self . _browser . driver . window_handles )
next_handle = self . _browser . driver . window_handles [ next_index ]
return Window ( self . _browser , next_handle )
|
def _fix_bias_shape ( self , op_name , inputs , attrs ) :
"""A workaround to reshape bias term to ( 1 , num _ channel ) ."""
|
if ( op_name == 'Add' or op_name == 'Mul' ) and ( int ( len ( self . _params ) ) > 0 ) and ( 'broadcast' in attrs and attrs [ 'broadcast' ] == 1 ) :
assert len ( list ( inputs ) ) == 2
bias_name = self . _renames . get ( inputs [ 1 ] , inputs [ 1 ] )
bias = self . _params [ bias_name ]
assert len ( bias . shape ) == 1
# reshape to ( 1 , n )
bias = mx . nd . array ( bias . asnumpy ( ) . reshape ( ( 1 , - 1 , 1 , 1 ) ) )
# broadcast _ add expects shape with sym . variable
self . _nodes [ bias_name ] = mx . sym . Variable ( name = bias_name , shape = bias . shape )
self . _params [ bias_name ] = bias
|
def response_text ( self , status , text = None , content_type = 'text/plain' , encoding = 'utf-8' , headers = None ) :
"""Send a plain - text response"""
|
if text is None :
if isinstance ( status , str ) :
text = status
else :
text = status . phrase
return self . response ( status , content_type , [ text . encode ( encoding ) ] , headers = headers )
|
def bgc ( mag_file , dir_path = "." , input_dir_path = "" , meas_file = 'measurements.txt' , spec_file = 'specimens.txt' , samp_file = 'samples.txt' , site_file = 'sites.txt' , loc_file = 'locations.txt' , append = False , location = "unknown" , site = "" , samp_con = '1' , specnum = 0 , meth_code = "LP-NO" , volume = 12 , user = "" , timezone = 'US/Pacific' , noave = False ) :
"""Convert BGC format file to MagIC file ( s )
Parameters
mag _ file : str
input file name
dir _ path : str
working directory , default " . "
input _ dir _ path : str
input file directory IF different from dir _ path , default " "
meas _ file : str
output measurement file name , default " measurements . txt "
spec _ file : str
output specimen file name , default " specimens . txt "
samp _ file : str
output sample file name , default " samples . txt "
site _ file : str
output site file name , default " sites . txt "
loc _ file : str
output location file name , default " locations . txt "
append : bool
append output files to existing files instead of overwrite , default False
location : str
location name , default " unknown "
site : str
site name , default " "
samp _ con : str
sample / site naming convention , default ' 1 ' , see info below
specnum : int
number of characters to designate a specimen , default 0
meth _ code : str
orientation method codes , default " LP - NO "
e . g . [ SO - MAG , SO - SUN , SO - SIGHT , . . . ]
volume : float
volume in ccs , default 12.
user : str
user name , default " "
timezone : str
timezone in pytz library format , default " US / Pacific "
list of timezones can be found at http : / / pytz . sourceforge . net /
noave : bool
do not average duplicate measurements , default False ( so by default , DO average )
Returns
Tuple : ( True or False indicating if conversion was sucessful , meas _ file name written )
Info
Sample naming convention :
[1 ] XXXXY : where XXXX is an arbitrary length site designation and Y
is the single character sample designation . e . g . , TG001a is the
first sample from site TG001 . [ default ]
[2 ] XXXX - YY : YY sample from site XXXX ( XXX , YY of arbitary length )
[3 ] XXXX . YY : YY sample from site XXXX ( XXX , YY of arbitary length )
[4 - Z ] XXXX [ YYY ] : YYY is sample designation with Z characters from site XXX
[5 ] site name same as sample
[6 ] site is entered under a separate column - - NOT CURRENTLY SUPPORTED
[7 - Z ] [ XXXX ] YYY : XXXX is site designation with Z characters with sample name XXXXYYYY"""
|
version_num = pmag . get_version ( )
input_dir_path , output_dir_path = pmag . fix_directories ( input_dir_path , dir_path )
samp_con = str ( samp_con )
specnum = - int ( specnum )
volume *= 1e-6
# convert cc to m ^ 3
if "4" in samp_con :
if "-" not in samp_con :
print ( "option [4] must be in form 4-Z where Z is an integer" )
return False , "option [4] must be in form 4-Z where Z is an integer"
else :
Z = int ( samp_con . split ( "-" ) [ 1 ] )
samp_con = "4"
if "7" in samp_con :
if "-" not in samp_con :
print ( "option [7] must be in form 7-Z where Z is an integer" )
return False , "option [7] must be in form 7-Z where Z is an integer"
else :
Z = int ( samp_con . split ( "-" ) [ 1 ] )
samp_con = "7"
else :
Z = 1
# format variables
mag_file = os . path . join ( input_dir_path , mag_file )
if not os . path . isfile ( mag_file ) :
print ( "%s is not a BGC file" % mag_file )
return False , 'You must provide a BCG format file'
# Open up the BGC file and read the header information
print ( 'mag_file in bgc_magic' , mag_file )
pre_data = open ( mag_file , 'r' )
line = pre_data . readline ( )
line_items = line . split ( ' ' )
specimen = line_items [ 2 ]
specimen = specimen . replace ( '\n' , '' )
line = pre_data . readline ( )
line = pre_data . readline ( )
line_items = line . split ( '\t' )
azimuth = float ( line_items [ 1 ] )
dip = float ( line_items [ 2 ] )
bed_dip = line_items [ 3 ]
sample_bed_azimuth = line_items [ 4 ]
lon = line_items [ 5 ]
lat = line_items [ 6 ]
tmp_volume = line_items [ 7 ]
if tmp_volume != 0.0 :
volume = float ( tmp_volume ) * 1e-6
pre_data . close ( )
data = pd . read_csv ( mag_file , sep = '\t' , header = 3 , index_col = False )
cart = np . array ( [ data [ 'X' ] , data [ 'Y' ] , data [ 'Z' ] ] ) . transpose ( )
direction = pmag . cart2dir ( cart ) . transpose ( )
data [ 'dir_dec' ] = direction [ 0 ]
data [ 'dir_inc' ] = direction [ 1 ]
# the data are in EMU - this converts to Am ^ 2
data [ 'magn_moment' ] = direction [ 2 ] / 1000
data [ 'magn_volume' ] = ( direction [ 2 ] / 1000 ) / volume
# EMU - data converted to A / m
# Configure the magic _ measurements table
MeasRecs , SpecRecs , SampRecs , SiteRecs , LocRecs = [ ] , [ ] , [ ] , [ ] , [ ]
for rowNum , row in data . iterrows ( ) :
MeasRec , SpecRec , SampRec , SiteRec , LocRec = { } , { } , { } , { } , { }
if specnum != 0 :
sample = specimen [ : specnum ]
else :
sample = specimen
if site == '' :
site = pmag . parse_site ( sample , samp_con , Z )
if specimen != "" and specimen not in [ x [ 'specimen' ] if 'specimen' in list ( x . keys ( ) ) else "" for x in SpecRecs ] :
SpecRec [ 'specimen' ] = specimen
SpecRec [ 'sample' ] = sample
SpecRec [ 'volume' ] = volume
SpecRec [ 'analysts' ] = user
SpecRec [ 'citations' ] = 'This study'
SpecRecs . append ( SpecRec )
if sample != "" and sample not in [ x [ 'sample' ] if 'sample' in list ( x . keys ( ) ) else "" for x in SampRecs ] :
SampRec [ 'sample' ] = sample
SampRec [ 'site' ] = site
SampRec [ 'azimuth' ] = azimuth
SampRec [ 'dip' ] = dip
SampRec [ 'bed_dip_direction' ] = sample_bed_azimuth
SampRec [ 'bed_dip' ] = bed_dip
SampRec [ 'method_codes' ] = meth_code
SampRec [ 'analysts' ] = user
SampRec [ 'citations' ] = 'This study'
SampRecs . append ( SampRec )
if site != "" and site not in [ x [ 'site' ] if 'site' in list ( x . keys ( ) ) else "" for x in SiteRecs ] :
SiteRec [ 'site' ] = site
SiteRec [ 'location' ] = location
SiteRec [ 'lat' ] = lat
SiteRec [ 'lon' ] = lon
SiteRec [ 'analysts' ] = user
SiteRec [ 'citations' ] = 'This study'
SiteRecs . append ( SiteRec )
if location != "" and location not in [ x [ 'location' ] if 'location' in list ( x . keys ( ) ) else "" for x in LocRecs ] :
LocRec [ 'location' ] = location
LocRec [ 'analysts' ] = user
LocRec [ 'citations' ] = 'This study'
LocRec [ 'lat_n' ] = lat
LocRec [ 'lon_e' ] = lon
LocRec [ 'lat_s' ] = lat
LocRec [ 'lon_w' ] = lon
LocRecs . append ( LocRec )
MeasRec [ 'description' ] = 'Date: ' + str ( row [ 'Date' ] ) + ' Time: ' + str ( row [ 'Time' ] )
if '.' in row [ 'Date' ] :
datelist = row [ 'Date' ] . split ( '.' )
elif '/' in row [ 'Date' ] :
datelist = row [ 'Date' ] . split ( '/' )
elif '-' in row [ 'Date' ] :
datelist = row [ 'Date' ] . split ( '-' )
else :
print ( "unrecogized date formating on one of the measurement entries for specimen %s" % specimen )
datelist = [ '' , '' , '' ]
if ':' in row [ 'Time' ] :
timelist = row [ 'Time' ] . split ( ':' )
else :
print ( "unrecogized time formating on one of the measurement entries for specimen %s" % specimen )
timelist = [ '' , '' , '' ]
datelist [ 2 ] = '19' + datelist [ 2 ] if len ( datelist [ 2 ] ) <= 2 else datelist [ 2 ]
dt = ":" . join ( [ datelist [ 1 ] , datelist [ 0 ] , datelist [ 2 ] , timelist [ 0 ] , timelist [ 1 ] , timelist [ 2 ] ] )
local = pytz . timezone ( timezone )
naive = datetime . datetime . strptime ( dt , "%m:%d:%Y:%H:%M:%S" )
local_dt = local . localize ( naive , is_dst = None )
utc_dt = local_dt . astimezone ( pytz . utc )
timestamp = utc_dt . strftime ( "%Y-%m-%dT%H:%M:%S" ) + "Z"
MeasRec [ "timestamp" ] = timestamp
MeasRec [ "citations" ] = "This study"
MeasRec [ 'software_packages' ] = version_num
MeasRec [ "treat_temp" ] = '%8.3e' % ( 273 )
# room temp in kelvin
MeasRec [ "meas_temp" ] = '%8.3e' % ( 273 )
# room temp in kelvin
MeasRec [ "quality" ] = 'g'
MeasRec [ "standard" ] = 'u'
MeasRec [ "treat_step_num" ] = rowNum
MeasRec [ "specimen" ] = specimen
MeasRec [ "treat_ac_field" ] = '0'
if row [ 'DM Val' ] == '0' :
meas_type = "LT-NO"
elif int ( row [ 'DM Type' ] ) > 0.0 :
meas_type = "LT-AF-Z"
treat = float ( row [ 'DM Val' ] )
MeasRec [ "treat_ac_field" ] = '%8.3e' % ( treat * 1e-3 )
# convert from mT to tesla
elif int ( row [ 'DM Type' ] ) == - 1 :
meas_type = "LT-T-Z"
treat = float ( row [ 'DM Val' ] )
MeasRec [ "treat_temp" ] = '%8.3e' % ( treat + 273. )
# temp in kelvin
else :
print ( "measurement type unknown:" , row [ 'DM Type' ] , " in row " , rowNum )
MeasRec [ "magn_moment" ] = str ( row [ 'magn_moment' ] )
MeasRec [ "magn_volume" ] = str ( row [ 'magn_volume' ] )
MeasRec [ "dir_dec" ] = str ( row [ 'dir_dec' ] )
MeasRec [ "dir_inc" ] = str ( row [ 'dir_inc' ] )
MeasRec [ 'method_codes' ] = meas_type
MeasRec [ 'dir_csd' ] = '0.0'
# added due to magic . write error
MeasRec [ 'meas_n_orient' ] = '1'
# added due to magic . write error
MeasRecs . append ( MeasRec . copy ( ) )
con = cb . Contribution ( output_dir_path , read_tables = [ ] )
con . add_magic_table_from_data ( dtype = 'specimens' , data = SpecRecs )
con . add_magic_table_from_data ( dtype = 'samples' , data = SampRecs )
con . add_magic_table_from_data ( dtype = 'sites' , data = SiteRecs )
con . add_magic_table_from_data ( dtype = 'locations' , data = LocRecs )
MeasOuts = pmag . measurements_methods3 ( MeasRecs , noave )
con . add_magic_table_from_data ( dtype = 'measurements' , data = MeasOuts )
con . write_table_to_file ( 'specimens' , custom_name = spec_file , append = append )
con . write_table_to_file ( 'samples' , custom_name = samp_file , append = append )
con . write_table_to_file ( 'sites' , custom_name = site_file , append = append )
con . write_table_to_file ( 'locations' , custom_name = loc_file , append = append )
meas_file = con . write_table_to_file ( 'measurements' , custom_name = meas_file , append = append )
return True , meas_file
|
def _get_kwsdag ( self , goids , go2obj , ** kws_all ) :
"""Get keyword args for a GoSubDag ."""
|
kws_dag = { }
# Term Counts for GO Term information score
tcntobj = self . _get_tcntobj ( goids , go2obj , ** kws_all )
# TermCounts or None
if tcntobj is not None :
kws_dag [ 'tcntobj' ] = tcntobj
# GO letters specified by the user
if 'go_aliases' in kws_all :
fin_go_aliases = kws_all [ 'go_aliases' ]
if os . path . exists ( fin_go_aliases ) :
go2letter = read_d1_letter ( fin_go_aliases )
if go2letter :
kws_dag [ 'go2letter' ] = go2letter
return kws_dag
|
def computeRatModuleParametersFromCellCount ( cellsPerAxis , baselineCellsPerAxis = 6 ) :
"""Compute ' cellsPerAxis ' , ' bumpSigma ' , and ' activeFiringRate ' parameters for
: class : ` ThresholdedGaussian2DLocationModule ` given the number of cells per
axis . See : func : ` createRatModuleFromCellCount `"""
|
bumpSigma = RAT_BUMP_SIGMA * ( baselineCellsPerAxis / float ( cellsPerAxis ) )
activeFiringRate = ThresholdedGaussian2DLocationModule . chooseReliableActiveFiringRate ( cellsPerAxis , bumpSigma )
return { "cellsPerAxis" : cellsPerAxis , "bumpSigma" : bumpSigma , "activeFiringRate" : activeFiringRate }
|
def change_encryption ( self , current_password , cipher , new_password , new_password_id ) :
"""Starts encryption of this medium . This means that the stored data in the
medium is encrypted .
This medium will be placed to : py : attr : ` MediumState . locked _ write `
state .
Please note that the results can be either returned straight away ,
or later as the result of the background operation via the object
returned via the @ a progress parameter .
in current _ password of type str
The current password the medium is protected with . Use an empty string to indicate
that the medium isn ' t encrypted .
in cipher of type str
The cipher to use for encryption . An empty string indicates no encryption for the
result .
in new _ password of type str
The new password the medium should be protected with . An empty password and password ID
will result in the medium being encrypted with the current password .
in new _ password _ id of type str
The ID of the new password when unlocking the medium .
return progress of type : class : ` IProgress `
Progress object to track the operation completion .
raises : class : ` VBoxErrorNotSupported `
Encryption is not supported for this medium because it is attached to more than one VM
or has children ."""
|
if not isinstance ( current_password , basestring ) :
raise TypeError ( "current_password can only be an instance of type basestring" )
if not isinstance ( cipher , basestring ) :
raise TypeError ( "cipher can only be an instance of type basestring" )
if not isinstance ( new_password , basestring ) :
raise TypeError ( "new_password can only be an instance of type basestring" )
if not isinstance ( new_password_id , basestring ) :
raise TypeError ( "new_password_id can only be an instance of type basestring" )
progress = self . _call ( "changeEncryption" , in_p = [ current_password , cipher , new_password , new_password_id ] )
progress = IProgress ( progress )
return progress
|
def _getDiagnosticString ( ) :
"""Generate a diagnostic string , showing the module version , the platform , current directory etc .
Returns :
A descriptive string ."""
|
text = '\n## Diagnostic output from minimalmodbus ## \n\n'
text += 'Minimalmodbus version: ' + __version__ + '\n'
text += 'Minimalmodbus status: ' + __status__ + '\n'
text += 'File name (with relative path): ' + __file__ + '\n'
text += 'Full file path: ' + os . path . abspath ( __file__ ) + '\n\n'
text += 'pySerial version: ' + serial . VERSION + '\n'
text += 'pySerial full file path: ' + os . path . abspath ( serial . __file__ ) + '\n\n'
text += 'Platform: ' + sys . platform + '\n'
text += 'Filesystem encoding: ' + repr ( sys . getfilesystemencoding ( ) ) + '\n'
text += 'Byteorder: ' + sys . byteorder + '\n'
text += 'Python version: ' + sys . version + '\n'
text += 'Python version info: ' + repr ( sys . version_info ) + '\n'
text += 'Python flags: ' + repr ( sys . flags ) + '\n'
text += 'Python argv: ' + repr ( sys . argv ) + '\n'
text += 'Python prefix: ' + repr ( sys . prefix ) + '\n'
text += 'Python exec prefix: ' + repr ( sys . exec_prefix ) + '\n'
text += 'Python executable: ' + repr ( sys . executable ) + '\n'
try :
text += 'Long info: ' + repr ( sys . long_info ) + '\n'
except :
text += 'Long info: (none)\n'
# For Python3 compatibility
try :
text += 'Float repr style: ' + repr ( sys . float_repr_style ) + '\n\n'
except :
text += 'Float repr style: (none) \n\n'
# For Python 2.6 compatibility
text += 'Variable __name__: ' + __name__ + '\n'
text += 'Current directory: ' + os . getcwd ( ) + '\n\n'
text += 'Python path: \n'
text += '\n' . join ( sys . path ) + '\n'
text += '\n## End of diagnostic output ## \n'
return text
|
def _read_mode_rsralt ( self , size , kind ) :
"""Read Router Alert option .
Positional arguments :
size - int , length of option
kind - int , 148 ( RTRALT )
Returns :
* dict - - extracted Router Alert ( RTRALT ) option
Structure of Router Alert ( RTRALT ) option [ RFC 2113 ] :
| 10010100 | 00000100 | 2 octet value |
Octets Bits Name Description
0 0 ip . rsralt . kind Kind ( 148)
0 0 ip . rsralt . type . copy Copied Flag ( 1)
0 1 ip . rsralt . type . class Option Class ( 0)
0 3 ip . rsralt . type . number Option Number ( 20)
1 8 ip . rsralt . length Length ( 4)
2 16 ip . rsralt . alert Alert
2 16 ip . rsralt . code Alert Code"""
|
if size != 4 :
raise ProtocolError ( f'{self.alias}: [Optno {kind}] invalid format' )
_code = self . _read_unpack ( 2 )
data = dict ( kind = kind , type = self . _read_opt_type ( kind ) , length = size , alert = _ROUTER_ALERT . get ( _code , 'Reserved' ) , code = _code , )
return data
|
def clean ( outputdir , drivers = None ) :
"""Remove driver executables from the specified outputdir .
drivers can be a list of drivers to filter which executables
to remove . Specify a version using an equal sign i . e . : ' chrome = 2.2'"""
|
if drivers : # Generate a list of tuples : [ ( driver _ name , requested _ version ) ]
# If driver string does not contain a version , the second element
# of the tuple is None .
# Example :
# [ ( ' driver _ a ' , ' 2.2 ' ) , ( ' driver _ b ' , None ) ]
drivers_split = [ helpers . split_driver_name_and_version ( x ) for x in drivers ]
file_data = [ ( helpers . normalize_driver_name ( x [ 0 ] ) , x [ 1 ] ) for x in drivers_split ]
else :
file_data = [ ( x , None ) for x in config . ALL_DRIVERS ]
files = [ file for file in os . listdir ( outputdir ) if os . path . isfile ( os . path . join ( outputdir , file ) ) ]
for file in files :
for data in file_data :
prefix , version = data
starts_with = file . startswith ( prefix )
version_match = 'N/A'
if version is not None :
file_version = helpers . extract_version_from_filename ( file )
if file_version == version :
version_match = True
else :
version_match = False
if starts_with and version_match in [ True , 'N/A' ] :
filepath = os . path . join ( outputdir , file )
try :
os . remove ( filepath )
except OSError :
pass
finally :
logger . info ( 'removed {}' . format ( file ) )
break
|
def get_relation ( self , relation ) :
"""Get the relation instance for the given relation name .
: rtype : orator . orm . relations . Relation"""
|
from . relations import Relation
with Relation . no_constraints ( True ) :
rel = getattr ( self . get_model ( ) , relation ) ( )
nested = self . _nested_relations ( relation )
if len ( nested ) > 0 :
rel . get_query ( ) . with_ ( nested )
return rel
|
def scan ( self ) :
"""Trigger the wifi interface to scan ."""
|
self . _logger . info ( "iface '%s' scans" , self . name ( ) )
self . _wifi_ctrl . scan ( self . _raw_obj )
|
def present ( name , pipeline_objects = None , pipeline_objects_from_pillars = 'boto_datapipeline_pipeline_objects' , parameter_objects = None , parameter_objects_from_pillars = 'boto_datapipeline_parameter_objects' , parameter_values = None , parameter_values_from_pillars = 'boto_datapipeline_parameter_values' , region = None , key = None , keyid = None , profile = None ) :
'''Ensure the data pipeline exists with matching definition .
name
Name of the service to ensure a data pipeline exists for .
pipeline _ objects
Pipeline objects to use . Will override objects read from pillars .
pipeline _ objects _ from _ pillars
The pillar key to use for lookup .
parameter _ objects
Parameter objects to use . Will override objects read from pillars .
parameter _ objects _ from _ pillars
The pillar key to use for lookup .
parameter _ values
Parameter values to use . Will override values read from pillars .
parameter _ values _ from _ pillars
The pillar key to use for lookup .
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string )
that contains a dict with region , key and keyid .'''
|
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
pipeline_objects = pipeline_objects or { }
parameter_objects = parameter_objects or { }
parameter_values = parameter_values or { }
present , old_pipeline_definition = _pipeline_present_with_definition ( name , _pipeline_objects ( pipeline_objects_from_pillars , pipeline_objects ) , _parameter_objects ( parameter_objects_from_pillars , parameter_objects ) , _parameter_values ( parameter_values_from_pillars , parameter_values ) , region = region , key = key , keyid = keyid , profile = profile , )
if present :
ret [ 'comment' ] = 'AWS data pipeline {0} present' . format ( name )
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Data pipeline {0} is set to be created or updated' . format ( name )
ret [ 'result' ] = None
return ret
result_create_pipeline = __salt__ [ 'boto_datapipeline.create_pipeline' ] ( name , name , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_create_pipeline :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create data pipeline {0}: {1}' . format ( name , result_create_pipeline [ 'error' ] )
return ret
pipeline_id = result_create_pipeline [ 'result' ]
result_pipeline_definition = __salt__ [ 'boto_datapipeline.put_pipeline_definition' ] ( pipeline_id , _pipeline_objects ( pipeline_objects_from_pillars , pipeline_objects ) , parameter_objects = _parameter_objects ( parameter_objects_from_pillars , parameter_objects ) , parameter_values = _parameter_values ( parameter_values_from_pillars , parameter_values ) , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_pipeline_definition :
if _immutable_fields_error ( result_pipeline_definition ) : # If update not possible , delete and retry
result_delete_pipeline = __salt__ [ 'boto_datapipeline.delete_pipeline' ] ( pipeline_id , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_delete_pipeline :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to delete data pipeline {0}: {1}' . format ( pipeline_id , result_delete_pipeline [ 'error' ] )
return ret
result_create_pipeline = __salt__ [ 'boto_datapipeline.create_pipeline' ] ( name , name , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_create_pipeline :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create data pipeline {0}: {1}' . format ( name , result_create_pipeline [ 'error' ] )
return ret
pipeline_id = result_create_pipeline [ 'result' ]
result_pipeline_definition = __salt__ [ 'boto_datapipeline.put_pipeline_definition' ] ( pipeline_id , _pipeline_objects ( pipeline_objects_from_pillars , pipeline_objects ) , parameter_objects = _parameter_objects ( parameter_objects_from_pillars , parameter_objects ) , parameter_values = _parameter_values ( parameter_values_from_pillars , parameter_values ) , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_pipeline_definition : # Still erroring after possible retry
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create data pipeline {0}: {1}' . format ( name , result_pipeline_definition [ 'error' ] )
return ret
result_activate_pipeline = __salt__ [ 'boto_datapipeline.activate_pipeline' ] ( pipeline_id , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in result_activate_pipeline :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create data pipeline {0}: {1}' . format ( name , result_pipeline_definition [ 'error' ] )
return ret
pipeline_definition_result = __salt__ [ 'boto_datapipeline.get_pipeline_definition' ] ( pipeline_id , version = 'active' , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' in pipeline_definition_result :
new_pipeline_definition = { }
else :
new_pipeline_definition = _standardize ( pipeline_definition_result [ 'result' ] )
if not old_pipeline_definition :
ret [ 'changes' ] [ 'new' ] = 'Pipeline created.'
ret [ 'comment' ] = 'Data pipeline {0} created' . format ( name )
else :
ret [ 'changes' ] [ 'diff' ] = _diff ( old_pipeline_definition , new_pipeline_definition )
ret [ 'comment' ] = 'Data pipeline {0} updated' . format ( name )
return ret
|
def query ( self , sql , parameters = None ) :
"""A generator to issue a query on the server , mogrifying the
parameters against the sql statement . Results are returned as a
: py : class : ` queries . Results ` object which can act as an iterator and
has multiple ways to access the result data .
: param str sql : The SQL statement
: param dict parameters : A dictionary of query parameters
: rtype : queries . Results
: raises : queries . DataError
: raises : queries . DatabaseError
: raises : queries . IntegrityError
: raises : queries . InternalError
: raises : queries . InterfaceError
: raises : queries . NotSupportedError
: raises : queries . OperationalError
: raises : queries . ProgrammingError"""
|
try :
self . _cursor . execute ( sql , parameters )
except psycopg2 . Error as err :
self . _incr_exceptions ( )
raise err
finally :
self . _incr_executions ( )
return results . Results ( self . _cursor )
|
def from_array ( cls , arr ) :
"""Convert a structured NumPy array into a Table ."""
|
return cls ( ) . with_columns ( [ ( f , arr [ f ] ) for f in arr . dtype . names ] )
|
def stonith_present ( name , stonith_id , stonith_device_type , stonith_device_options = None , cibname = None ) :
'''Ensure that a fencing resource is created
Should be run on one cluster node only
( there may be races )
Can only be run on a node with a functional pacemaker / corosync
name
Irrelevant , not used ( recommended : pcs _ stonith _ _ created _ { { stonith _ id } } )
stonith _ id
name for the stonith resource
stonith _ device _ type
name of the stonith agent fence _ eps , fence _ xvm f . e .
stonith _ device _ options
additional options for creating the stonith resource
cibname
use a cached CIB - file named like cibname instead of the live CIB
Example :
. . code - block : : yaml
pcs _ stonith _ _ created _ eps _ fence :
pcs . stonith _ present :
- stonith _ id : eps _ fence
- stonith _ device _ type : fence _ eps
- stonith _ device _ options :
- ' pcmk _ host _ map = node1 . example . org : 01 ; node2 . example . org : 02'
- ' ipaddr = myepsdevice . example . org '
- ' power _ wait = 5'
- ' verbose = 1'
- ' debug = / var / log / pcsd / eps _ fence . log '
- ' login = hidden '
- ' passwd = hoonetorg '
- cibname : cib _ for _ stonith'''
|
return _item_present ( name = name , item = 'stonith' , item_id = stonith_id , item_type = stonith_device_type , extra_args = stonith_device_options , cibname = cibname )
|
def shell_safe_json_parse ( json_or_dict_string , preserve_order = False ) :
"""Allows the passing of JSON or Python dictionary strings . This is needed because certain
JSON strings in CMD shell are not received in main ' s argv . This allows the user to specify
the alternative notation , which does not have this problem ( but is technically not JSON ) ."""
|
try :
if not preserve_order :
return json . loads ( json_or_dict_string )
from collections import OrderedDict
return json . loads ( json_or_dict_string , object_pairs_hook = OrderedDict )
except ValueError :
import ast
return ast . literal_eval ( json_or_dict_string )
|
def visualize_diagram ( bpmn_diagram ) :
"""Shows a simple visualization of diagram
: param bpmn _ diagram : an instance of BPMNDiagramGraph class ."""
|
g = bpmn_diagram . diagram_graph
pos = bpmn_diagram . get_nodes_positions ( )
nx . draw_networkx_nodes ( g , pos , node_shape = 's' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . task ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 's' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . subprocess ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'd' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . complex_gateway ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'o' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . event_based_gateway ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'd' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . inclusive_gateway ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'd' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . exclusive_gateway ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'd' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . parallel_gateway ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'o' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . start_event ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'o' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . intermediate_catch_event ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'o' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . end_event ) )
nx . draw_networkx_nodes ( g , pos , node_shape = 'o' , node_color = 'white' , nodelist = bpmn_diagram . get_nodes_id_list_by_type ( consts . Consts . intermediate_throw_event ) )
node_labels = { }
for node in g . nodes ( data = True ) :
node_labels [ node [ 0 ] ] = node [ 1 ] . get ( consts . Consts . node_name )
nx . draw_networkx_labels ( g , pos , node_labels )
nx . draw_networkx_edges ( g , pos )
edge_labels = { }
for edge in g . edges ( data = True ) :
edge_labels [ ( edge [ 0 ] , edge [ 1 ] ) ] = edge [ 2 ] . get ( consts . Consts . name )
nx . draw_networkx_edge_labels ( g , pos , edge_labels )
plt . show ( )
|
def _writeResponse ( self , response , request , status = 200 ) :
"""request - - request message
response - - - response message
status - - HTTP Status"""
|
request . setResponseCode ( status )
if self . encoding is not None :
mimeType = 'text/xml; charset="%s"' % self . encoding
else :
mimeType = "text/xml"
request . setHeader ( "Content-Type" , mimeType )
request . setHeader ( "Content-Length" , str ( len ( response ) ) )
request . write ( response )
request . finish ( )
|
def parameters_changed ( self ) :
"""Parameters have now changed"""
|
# np . set _ printoptions ( 16)
# print ( self . param _ array )
# Get the model matrices from the kernel
( F , L , Qc , H , P_inf , P0 , dFt , dQct , dP_inft , dP0t ) = self . kern . sde ( )
# necessary parameters
measurement_dim = self . output_dim
grad_params_no = dFt . shape [ 2 ] + 1
# we also add measurement noise as a parameter
# add measurement noise as a parameter and get the gradient matrices
dF = np . zeros ( [ dFt . shape [ 0 ] , dFt . shape [ 1 ] , grad_params_no ] )
dQc = np . zeros ( [ dQct . shape [ 0 ] , dQct . shape [ 1 ] , grad_params_no ] )
dP_inf = np . zeros ( [ dP_inft . shape [ 0 ] , dP_inft . shape [ 1 ] , grad_params_no ] )
dP0 = np . zeros ( [ dP0t . shape [ 0 ] , dP0t . shape [ 1 ] , grad_params_no ] )
# Assign the values for the kernel function
dF [ : , : , : - 1 ] = dFt
dQc [ : , : , : - 1 ] = dQct
dP_inf [ : , : , : - 1 ] = dP_inft
dP0 [ : , : , : - 1 ] = dP0t
# The sigma2 derivative
dR = np . zeros ( [ measurement_dim , measurement_dim , grad_params_no ] )
dR [ : , : , - 1 ] = np . eye ( measurement_dim )
# Balancing
if self . balance :
( F , L , Qc , H , P_inf , P0 , dF , dQc , dP_inf , dP0 ) = ssm . balance_ss_model ( F , L , Qc , H , P_inf , P0 , dF , dQc , dP_inf , dP0 )
print ( "SSM parameters_changed balancing!" )
# Use the Kalman filter to evaluate the likelihood
grad_calc_params = { }
grad_calc_params [ 'dP_inf' ] = dP_inf
grad_calc_params [ 'dF' ] = dF
grad_calc_params [ 'dQc' ] = dQc
grad_calc_params [ 'dR' ] = dR
grad_calc_params [ 'dP_init' ] = dP0
kalman_filter_type = self . kalman_filter_type
# The following code is required because sometimes the shapes of self . Y
# becomes 3D even though is must be 2D . The reason is undiscovered .
Y = self . Y
if self . ts_number is None :
Y . shape = ( self . num_data , 1 )
else :
Y . shape = ( self . num_data , 1 , self . ts_number )
( filter_means , filter_covs , log_likelihood , grad_log_likelihood , SmootherMatrObject ) = ssm . ContDescrStateSpace . cont_discr_kalman_filter ( F , L , Qc , H , float ( self . Gaussian_noise . variance ) , P_inf , self . X , Y , m_init = None , P_init = P0 , p_kalman_filter_type = kalman_filter_type , calc_log_likelihood = True , calc_grad_log_likelihood = True , grad_params_no = grad_params_no , grad_calc_params = grad_calc_params )
if np . any ( np . isfinite ( log_likelihood ) == False ) : # import pdb ; pdb . set _ trace ( )
print ( "State-Space: NaN valkues in the log_likelihood" )
if np . any ( np . isfinite ( grad_log_likelihood ) == False ) : # import pdb ; pdb . set _ trace ( )
print ( "State-Space: NaN values in the grad_log_likelihood" )
# print ( grad _ log _ likelihood )
grad_log_likelihood_sum = np . sum ( grad_log_likelihood , axis = 1 )
grad_log_likelihood_sum . shape = ( grad_log_likelihood_sum . shape [ 0 ] , 1 )
self . _log_marginal_likelihood = np . sum ( log_likelihood , axis = 1 )
self . likelihood . update_gradients ( grad_log_likelihood_sum [ - 1 , 0 ] )
self . kern . sde_update_gradient_full ( grad_log_likelihood_sum [ : - 1 , 0 ] )
|
def checkout_moses_tokenizer ( workspace_dir : str ) :
"""Checkout Moses tokenizer ( sparse checkout of Moses ) .
: param workspace _ dir : Workspace directory ."""
|
# Prerequisites
check_git ( )
check_perl ( )
# Check cache
dest = os . path . join ( workspace_dir , DIR_THIRD_PARTY , MOSES_DEST )
if confirm_checkout ( dest , MOSES_COMMIT ) :
logging . info ( "Usable: %s" , dest )
return
# Need to ( re - ) checkout
if os . path . exists ( dest ) :
shutil . rmtree ( dest )
logging . info ( "Checkout: %s -> %s" , MOSES_REPO , dest )
os . makedirs ( dest )
log_fname = os . path . join ( workspace_dir , DIR_LOGS , "checkout.{}.{}.log" . format ( MOSES_DEST , os . getpid ( ) ) )
with open ( log_fname , "wb" ) as log :
logging . info ( "Log: %s" , log_fname )
subprocess . call ( [ "git" , "init" ] , cwd = dest , stdout = log , stderr = log )
subprocess . call ( [ "git" , "remote" , "add" , "origin" , MOSES_REPO ] , cwd = dest , stdout = log , stderr = log )
subprocess . call ( [ "git" , "config" , "core.sparsecheckout" , "true" ] , cwd = dest , stdout = log , stderr = log )
with open ( os . path . join ( dest , ".git" , "info" , "sparse-checkout" ) , "w" ) as out :
for path in MOSES_SPARSE_CHECKOUT :
print ( path , file = out )
subprocess . call ( [ "git" , "pull" , "origin" , "master" ] , cwd = dest , stdout = log , stderr = log )
subprocess . call ( [ "git" , "checkout" , MOSES_COMMIT ] , cwd = dest , stdout = log , stderr = log )
|
def get_velocity ( samplemat , Hz , blinks = None ) :
'''Compute velocity of eye - movements .
Samplemat must contain fields ' x ' and ' y ' , specifying the x , y coordinates
of gaze location . The function assumes that the values in x , y are sampled
continously at a rate specified by ' Hz ' .'''
|
Hz = float ( Hz )
distance = ( ( np . diff ( samplemat . x ) ** 2 ) + ( np . diff ( samplemat . y ) ** 2 ) ) ** .5
distance = np . hstack ( ( [ distance [ 0 ] ] , distance ) )
if blinks is not None :
distance [ blinks [ 1 : ] ] = np . nan
win = np . ones ( ( velocity_window_size ) ) / float ( velocity_window_size )
velocity = np . convolve ( distance , win , mode = 'same' )
velocity = velocity / ( velocity_window_size / Hz )
acceleration = np . diff ( velocity ) / ( 1. / Hz )
acceleration = abs ( np . hstack ( ( [ acceleration [ 0 ] ] , acceleration ) ) )
return velocity , acceleration
|
def cursor_query ( self , collection , query ) :
""": param str collection : The name of the collection for the request .
: param dict query : Dictionary of solr args .
Will page through the result set in increments using cursorMark until it has all items . Sort is required for cursorMark queries , if you don ' t specify it , the default is ' id desc ' .
Returns an iterator of SolrResponse objects . For Example : :
> > > for res in solr . cursor _ query ( ' SolrClient _ unittest ' , { ' q ' : ' * : * ' } ) :
print ( res )"""
|
cursor = '*'
if 'sort' not in query :
query [ 'sort' ] = 'id desc'
while True :
query [ 'cursorMark' ] = cursor
# Get data with starting cursorMark
results = self . query ( collection , query )
if results . get_results_count ( ) :
cursor = results . get_cursor ( )
yield results
else :
self . logger . debug ( "Got zero Results with cursor: {}" . format ( cursor ) )
break
|
def waitForDeferred ( d , result = None ) :
"""Block current greenlet for Deferred , waiting until result is not a Deferred or a failure is encountered"""
|
from twisted . internet import reactor
assert reactor . greenlet != getcurrent ( ) , "can't invoke this in the reactor greenlet"
if result is None :
result = AsyncResult ( )
def cb ( res ) :
if isinstance ( res , defer . Deferred ) :
waitForDeferred ( res , result )
else :
result . set ( res )
def eb ( res ) :
result . set_exception ( res )
d . addCallbacks ( cb , eb )
try :
return result . get ( )
except failure . Failure , ex :
ex . raiseException ( )
|
def get_zoneID ( self , headers , zone ) :
"""Get the zone id for the zone ."""
|
zoneIDurl = self . BASE_URL + '?name=' + zone
zoneIDrequest = requests . get ( zoneIDurl , headers = headers )
zoneID = zoneIDrequest . json ( ) [ 'result' ] [ 0 ] [ 'id' ]
return zoneID
|
def transform_search_hit ( self , pid , record_hit , links_factory = None , ** kwargs ) :
"""Transform search result hit into an intermediate representation ."""
|
context = kwargs . get ( 'marshmallow_context' , { } )
context . setdefault ( 'pid' , pid )
return self . dump ( self . preprocess_search_hit ( pid , record_hit , links_factory = links_factory , ** kwargs ) , context )
|
def is_entailed_by ( self , other ) :
"""Fewer members = more information ( exception is empty set , which means
all members of the domain )
(1 ) when self is empty and others is not ( equal to containing the
entire domain )
(2 ) when other contains more members than self"""
|
if not self . same_domain ( other ) :
return False
if not other . values :
if self . values : # None can never entail - None
return False
else : # None entails None
return True
return not self . values or self . values . issuperset ( other . values )
|
def read_bucket ( self , bucket_name ) :
'''a method to retrieve properties of a bucket in s3
: param bucket _ name : string with name of bucket
: return : dictionary with details of bucket'''
|
title = '%s.read_bucket' % self . __class__ . __name__
# validate inputs
input_fields = { 'bucket_name' : bucket_name }
for key , value in input_fields . items ( ) :
object_title = '%s(%s=%s)' % ( title , key , str ( value ) )
self . fields . validate ( value , '.%s' % key , object_title )
# validate existence of bucket
if not bucket_name in self . bucket_list :
if not bucket_name in self . list_buckets ( ) :
raise ValueError ( 'S3 Bucket "%s" does not exist in aws region %s.' % ( bucket_name , self . iam . region_name ) )
# create details dictionary
bucket_details = { 'bucket_name' : bucket_name , 'access_control' : 'private' , 'version_control' : False , 'log_destination' : { } , 'lifecycle_rules' : [ ] , 'tag_list' : [ ] , 'notification_settings' : [ ] , 'region_replication' : { } , 'access_policy' : { } }
# retrieve access control details
try :
response = self . connection . get_bucket_acl ( Bucket = bucket_name )
if len ( response [ 'Grants' ] ) > 1 :
log_user = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
log_delivery = False
public_user = 'http://acs.amazonaws.com/groups/global/AllUsers'
public_perm = [ ]
for grant in response [ 'Grants' ] :
if 'URI' in grant [ 'Grantee' ] :
if grant [ 'Grantee' ] [ 'URI' ] == log_user :
log_delivery = True
if grant [ 'Grantee' ] [ 'URI' ] == public_user :
public_perm . append ( grant [ 'Permission' ] )
if public_perm :
if len ( public_perm ) > 1 :
bucket_details [ 'access_control' ] = 'public-read-write'
else :
bucket_details [ 'access_control' ] = 'public-read'
elif log_delivery :
bucket_details [ 'access_control' ] = 'log-delivery-write'
else :
bucket_details [ 'access_control' ] = 'authenticated-read'
except :
raise AWSConnectionError ( title )
# retrieve version control details
try :
response = self . connection . get_bucket_versioning ( Bucket = bucket_name )
if 'Status' in response . keys ( ) :
if response [ 'Status' ] == 'Enabled' :
bucket_details [ 'version_control' ] = True
except :
raise AWSConnectionError ( title )
# retrieve log destination details
try :
response = self . connection . get_bucket_logging ( Bucket = bucket_name )
if 'LoggingEnabled' in response :
res = response [ 'LoggingEnabled' ]
bucket_details [ 'log_destination' ] [ 'name' ] = res [ 'TargetBucket' ]
bucket_details [ 'log_destination' ] [ 'prefix' ] = ''
if 'TargetPrefix' in res . keys ( ) :
bucket_details [ 'log_destination' ] [ 'prefix' ] = res [ 'TargetPrefix' ]
except :
raise AWSConnectionError ( title )
# retrieve lifecycle rules details
try :
response = self . connection . get_bucket_lifecycle ( Bucket = bucket_name )
for rule in response [ 'Rules' ] :
if rule [ 'Status' ] == 'Enabled' :
details = { "prefix" : rule [ 'Prefix' ] }
if 'Transition' in rule . keys ( ) :
details [ 'longevity' ] = rule [ 'Transition' ] [ 'Days' ]
details [ 'current_version' ] = True
details [ 'action' ] = 'archive'
elif 'Expiration' in rule . keys ( ) :
details [ 'longevity' ] = rule [ 'Expiration' ] [ 'Days' ]
details [ 'current_version' ] = True
details [ 'action' ] = 'delete'
elif 'NoncurrentVersionTransition' in rule . keys ( ) :
details [ 'longevity' ] = rule [ 'NoncurrentVersionTransition' ] [ 'NoncurrentDays' ]
details [ 'current_version' ] = False
details [ 'action' ] = 'archive'
elif 'NoncurrentVersionExpiration' in rule . keys ( ) :
details [ 'longevity' ] = rule [ 'NoncurrentVersionExpiration' ] [ 'NoncurrentDays' ]
details [ 'current_version' ] = False
details [ 'action' ] = 'delete'
bucket_details [ 'lifecycle_rules' ] . append ( details )
except :
pass
# retrieve bucket tag details
try :
response = self . connection . get_bucket_tagging ( Bucket = bucket_name )
for tag in response [ 'TagSet' ] :
bucket_details [ 'tag_list' ] . append ( tag )
except :
pass
# retrieve notification settings details
try :
response = self . connection . get_bucket_notification_configuration ( Bucket = bucket_name )
if 'TopicConfigurations' in response . keys ( ) :
for notification in response [ 'TopicConfigurations' ] :
details = { 'service' : 'sns' , 'arn' : notification [ 'TopicArn' ] , 'event' : notification [ 'Events' ] [ 0 ] , 'filters' : { } }
if 'Filter' in notification . keys ( ) :
for rule in notification [ 'Filter' ] [ 'Key' ] [ 'FilterRules' ] :
details [ 'filters' ] [ rule [ 'Name' ] ] = rule [ 'Value' ]
bucket_details [ 'notification_settings' ] . append ( details )
if 'QueueConfigurations' in response . keys ( ) :
for notification in response [ 'QueueConfigurations' ] :
details = { 'service' : 'sqs' , 'arn' : notification [ 'QueueArn' ] , 'event' : notification [ 'Events' ] [ 0 ] , 'filters' : { } }
if 'Filter' in notification . keys ( ) :
for rule in notification [ 'Filter' ] [ 'Key' ] [ 'FilterRules' ] :
details [ 'filters' ] [ rule [ 'Name' ] ] = rule [ 'Value' ]
bucket_details [ 'notification_settings' ] . append ( details )
if 'LambdaFunctionConfigurations' in response . keys ( ) :
for notification in response [ 'LambdaFunctionConfigurations' ] :
details = { 'service' : 'lambda' , 'arn' : notification [ 'LambdaFunctionArn' ] , 'event' : notification [ 'Events' ] [ 0 ] , 'filters' : { } }
if 'Filter' in notification . keys ( ) :
for rule in notification [ 'Filter' ] [ 'Key' ] [ 'FilterRules' ] :
details [ 'filters' ] [ rule [ 'Name' ] ] = rule [ 'Value' ]
bucket_details [ 'notification_settings' ] . append ( details )
except :
raise AWSConnectionError ( title )
# TODO : retrieve region replication details
# try :
# response = self . connection . get _ bucket _ replication ( Bucket = bucket _ name )
# except :
# pass
# TODO : retrieve access policy details
# try :
# response = self . connection . get _ bucket _ policy ( Bucket = bucket _ name )
# except :
# pass
return self . iam . ingest ( bucket_details )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.