signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _load_nested_libraries ( self , library_path , target_dict ) :
"""Recursively load libraries within path
Adds all libraries specified in a given path and stores them into the provided library dictionary . The library
entries in the dictionary consist only of the path to the library in the file system .
: param library _ path : the path to add all libraries from
: param target _ dict : the target dictionary to store all loaded libraries to"""
|
for library_name in os . listdir ( library_path ) :
library_folder_path , library_name = self . check_clean_path_of_library ( library_path , library_name )
full_library_path = os . path . join ( library_path , library_name )
if os . path . isdir ( full_library_path ) and library_name [ 0 ] != '.' :
if os . path . exists ( os . path . join ( full_library_path , storage . STATEMACHINE_FILE ) ) or os . path . exists ( os . path . join ( full_library_path , storage . STATEMACHINE_FILE_OLD ) ) :
target_dict [ library_name ] = full_library_path
else :
target_dict [ library_name ] = { }
self . _load_nested_libraries ( full_library_path , target_dict [ library_name ] )
target_dict [ library_name ] = OrderedDict ( sorted ( target_dict [ library_name ] . items ( ) ) )
|
def mousePressEvent ( self , event ) :
"""In Auto - parameter selection mode , mouse press over an item emits
` componentSelected `"""
|
if self . mode == BuildMode :
super ( StimulusView , self ) . mousePressEvent ( event )
else : # select and de - select components
index = self . indexAt ( event . pos ( ) )
if index . isValid ( ) :
self . selectionModel ( ) . select ( index , QtGui . QItemSelectionModel . Toggle )
comp = self . model ( ) . data ( index , AbstractDragView . DragRole )
self . componentSelected . emit ( comp )
self . hintRequested . emit ( 'Click components to toggle more members of auto-parameter\n\n-or-\n\nEdit fields of auto-parameter (parameter type should be selected first)' )
|
def drop_indexes ( quiet = True , stdout = None ) :
"""Discover and drop all indexes .
: type : bool
: return : None"""
|
results , meta = db . cypher_query ( "CALL db.indexes()" )
pattern = re . compile ( ':(.*)\((.*)\)' )
for index in results :
db . cypher_query ( 'DROP ' + index [ 0 ] )
match = pattern . search ( index [ 0 ] )
stdout . write ( ' - Dropping index on label {0} with property {1}.\n' . format ( match . group ( 1 ) , match . group ( 2 ) ) )
stdout . write ( "\n" )
|
def get_base_url ( self , request ) :
"""Creates base url string .
: param request : OGC - type request with specified bounding box , cloud coverage for specific product .
: type request : OgcRequest or GeopediaRequest
: return : base string for url to Sentinel Hub ' s OGC service for this product .
: rtype : str"""
|
url = self . base_url + request . service_type . value
# These 2 lines are temporal and will be removed after the use of uswest url wont be required anymore :
if hasattr ( request , 'data_source' ) and request . data_source . is_uswest_source ( ) :
url = 'https://services-uswest2.sentinel-hub.com/ogc/{}' . format ( request . service_type . value )
if hasattr ( request , 'data_source' ) and request . data_source not in DataSource . get_available_sources ( ) :
raise ValueError ( "{} is not available for service at ogc_base_url={}" . format ( request . data_source , SHConfig ( ) . ogc_base_url ) )
return url
|
def get_slot ( self ) :
"""Get the slot position of this pair ."""
|
corner , edge = self . get_pair ( )
corner_slot , edge_slot = corner . location . replace ( "D" , "" , 1 ) , edge . location
if "U" not in corner_slot and corner_slot not in [ "FR" , "RB" , "BL" , "LF" ] :
corner_slot = [ "FR" , "RB" , "BL" , "LF" ] [ [ "RF" , "BR" , "LB" , "FL" ] . index ( corner_slot ) ]
if "U" not in edge_slot and edge_slot not in [ "FR" , "RB" , "BL" , "LF" ] :
edge_slot = [ "FR" , "RB" , "BL" , "LF" ] [ [ "RF" , "BR" , "LB" , "FL" ] . index ( edge_slot ) ]
if "U" in corner_slot and "U" in edge_slot :
return ( "SLOTFREE" , ( None , None ) , ( corner , edge ) )
if "U" in corner_slot :
return ( "CSLOTFREE" , ( None , edge_slot ) , ( corner , edge ) )
if "U" in edge_slot :
return ( "ESLOTFREE" , ( corner_slot , None ) , ( corner , edge ) )
if corner_slot not in [ edge_slot , edge_slot [ : : - 1 ] ] :
return ( "DIFFSLOT" , ( corner_slot , edge_slot ) , ( corner , edge ) )
if ( corner , edge ) == self . estimated_position ( ) :
return ( "SOLVED" , ( corner_slot , edge_slot ) , ( corner , edge ) )
return ( "WRONGSLOT" , ( corner_slot , edge_slot ) , ( corner , edge ) )
|
def disable ( self , ids ) :
"""Disable Pool Members Running Script
: param ids : List of ids
: return : None on success
: raise PoolMemberDoesNotExistException
: raise InvalidIdPoolMemberException
: raise ScriptDisablePoolException
: raise NetworkAPIException"""
|
data = dict ( )
data [ "ids" ] = ids
uri = "api/pools/disable/"
return self . post ( uri , data )
|
def time_conflict ( self , schedule ) :
"""Internal use . Determines when the given time range conflicts with the set of
excluded time ranges ."""
|
if is_nil ( schedule ) :
return True
for timerange in self . _excluded_times :
if timerange . conflicts_with ( schedule ) :
return False
return True
|
def score ( args ) :
"""% prog score main _ results / cached _ data / contigsfasta
Score the current LACHESIS CLM ."""
|
p = OptionParser ( score . __doc__ )
p . set_cpus ( )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
mdir , cdir , contigsfasta = args
orderingfiles = natsorted ( iglob ( mdir , "*.ordering" ) )
sizes = Sizes ( contigsfasta )
contig_names = list ( sizes . iter_names ( ) )
contig_ids = dict ( ( name , i ) for ( i , name ) in enumerate ( contig_names ) )
oo = [ ]
# Load contact matrix
glm = op . join ( cdir , "all.GLM" )
N = len ( contig_ids )
M = np . zeros ( ( N , N ) , dtype = int )
fp = open ( glm )
for row in fp :
if row [ 0 ] == '#' :
continue
x , y , z = row . split ( )
if x == 'X' :
continue
M [ int ( x ) , int ( y ) ] = int ( z )
fwtour = open ( "tour" , "w" )
def callback ( tour , gen , oo ) :
fitness = tour . fitness if hasattr ( tour , "fitness" ) else None
label = "GA-{0}" . format ( gen )
if fitness :
fitness = "{0}" . format ( fitness ) . split ( "," ) [ 0 ] . replace ( "(" , "" )
label += "-" + fitness
print_tour ( fwtour , tour , label , contig_names , oo )
return tour
for ofile in orderingfiles :
co = ContigOrdering ( ofile )
for x in co :
contig_id = contig_ids [ x . contig_name ]
oo . append ( contig_id )
pf = op . basename ( ofile ) . split ( "." ) [ 0 ]
print ( pf )
print ( oo )
tour , tour_sizes , tour_M = prepare_ec ( oo , sizes , M )
# Store INIT tour
print_tour ( fwtour , tour , "INIT" , contig_names , oo )
# Faster Cython version for evaluation
from . chic import score_evaluate_M
callbacki = partial ( callback , oo = oo )
toolbox = GA_setup ( tour )
toolbox . register ( "evaluate" , score_evaluate_M , tour_sizes = tour_sizes , tour_M = tour_M )
tour , tour . fitness = GA_run ( toolbox , npop = 100 , cpus = opts . cpus , callback = callbacki )
print ( tour , tour . fitness )
break
fwtour . close ( )
|
def _needs_region_update ( out_file , samples ) :
"""Check if we need to update BED file of regions , supporting back compatibility ."""
|
nblock_files = [ x [ "regions" ] [ "nblock" ] for x in samples if "regions" in x ]
# For older approaches and do not create a new set of analysis
# regions , since the new algorithm will re - do all BAM and variant
# steps with new regions
for nblock_file in nblock_files :
test_old = nblock_file . replace ( "-nblocks" , "-analysisblocks" )
if os . path . exists ( test_old ) :
return False
# Check if any of the local files have changed so we need to refresh
for noblock_file in nblock_files :
if not utils . file_uptodate ( out_file , noblock_file ) :
return True
return False
|
def add_sma ( self , periods = 20 , column = None , name = '' , str = None , ** kwargs ) :
"""Add Simple Moving Average ( SMA ) study to QuantFigure . studies
Parameters :
periods : int or list ( int )
Number of periods
column : string
Defines the data column name that contains the
data over which the study will be applied .
Default : ' close '
name : string
Name given to the study
str : string
Label factory for studies
The following wildcards can be used :
{ name } : Name of the column
{ study } : Name of the study
{ period } : Period used
Examples :
' study : { study } - period : { period } '
kwargs :
legendgroup : bool
If true , all legend items are grouped into a
single one
All formatting values available on iplot ( )"""
|
if not column :
column = self . _d [ 'close' ]
study = { 'kind' : 'sma' , 'name' : name , 'params' : { 'periods' : periods , 'column' : column , 'str' : str } , 'display' : utils . merge_dict ( { 'legendgroup' : False } , kwargs ) }
self . _add_study ( study )
|
def list_channels ( current ) :
"""List channel memberships of current user
. . code - block : : python
# request :
' view ' : ' _ zops _ list _ channels ' ,
# response :
' channels ' : [
{ ' name ' : string , # name of channel
' key ' : key , # key of channel
' unread ' : int , # unread message count
' type ' : int , # channel type ,
# 15 : public channels ( chat room / broadcast channel distinction
comes from " read _ only " flag )
# 10 : direct channels
# 5 : one and only private channel which is " Notifications "
' read _ only ' : boolean ,
# true if this is a read - only subscription to a broadcast channel
# false if it ' s a public chat room
' actions ' : [ ( ' action name ' , ' view name ' ) , ]"""
|
current . output = { 'status' : 'OK' , 'code' : 200 , 'channels' : [ ] }
for sbs in current . user . subscriptions . objects . filter ( is_visible = True ) :
try :
current . output [ 'channels' ] . append ( sbs . get_channel_listing ( ) )
except ObjectDoesNotExist : # FIXME : This should not happen ,
log . exception ( "UNPAIRED DIRECT EXCHANGES!!!!" )
sbs . delete ( )
|
def _get_table_width ( table_spec ) :
"""Calculate the width of a table based on its spec .
Args
table _ spec : str
The LaTeX column specification for a table .
Returns
int
The width of a table which uses the specification supplied ."""
|
# Remove things like { \ bfseries }
cleaner_spec = re . sub ( r'{[^}]*}' , '' , table_spec )
# Remove X [ ] in tabu environments so they dont interfere with column count
cleaner_spec = re . sub ( r'X\[(.*?(.))\]' , r'\2' , cleaner_spec )
spec_counter = Counter ( cleaner_spec )
return sum ( spec_counter [ l ] for l in COLUMN_LETTERS )
|
def to ( self , unit ) :
"""Convert this velocity to the given AstroPy unit ."""
|
from astropy . units import au , d
return ( self . au_per_d * au / d ) . to ( unit )
|
def reduce_source_model ( smlt_file , source_ids , remove = True ) :
"""Extract sources from the composite source model"""
|
found = 0
to_remove = [ ]
for paths in logictree . collect_info ( smlt_file ) . smpaths . values ( ) :
for path in paths :
logging . info ( 'Reading %s' , path )
root = nrml . read ( path )
model = Node ( 'sourceModel' , root [ 0 ] . attrib )
origmodel = root [ 0 ]
if root [ 'xmlns' ] == 'http://openquake.org/xmlns/nrml/0.4' :
for src_node in origmodel :
if src_node [ 'id' ] in source_ids :
model . nodes . append ( src_node )
else : # nrml / 0.5
for src_group in origmodel :
sg = copy . copy ( src_group )
sg . nodes = [ ]
weights = src_group . get ( 'srcs_weights' )
if weights :
assert len ( weights ) == len ( src_group . nodes )
else :
weights = [ 1 ] * len ( src_group . nodes )
src_group [ 'srcs_weights' ] = reduced_weigths = [ ]
for src_node , weight in zip ( src_group , weights ) :
if src_node [ 'id' ] in source_ids :
found += 1
sg . nodes . append ( src_node )
reduced_weigths . append ( weight )
if sg . nodes :
model . nodes . append ( sg )
shutil . copy ( path , path + '.bak' )
if model :
with open ( path , 'wb' ) as f :
nrml . write ( [ model ] , f , xmlns = root [ 'xmlns' ] )
elif remove : # remove the files completely reduced
to_remove . append ( path )
if found :
for path in to_remove :
os . remove ( path )
|
def get_filelikeobject ( filename : str = None , blob : bytes = None ) -> BinaryIO :
"""Open a file - like object .
Guard the use of this function with ` ` with ` ` .
Args :
filename : for specifying via a filename
blob : for specifying via an in - memory ` ` bytes ` ` object
Returns :
a : class : ` BinaryIO ` object"""
|
if not filename and not blob :
raise ValueError ( "no filename and no blob" )
if filename and blob :
raise ValueError ( "specify either filename or blob" )
if filename :
return open ( filename , 'rb' )
else :
return io . BytesIO ( blob )
|
def get_network ( network_id ) :
"""Get the network with the given id ."""
|
try :
net = models . Network . query . filter_by ( id = network_id ) . one ( )
except NoResultFound :
return error_response ( error_type = "/network GET: no network found" , status = 403 )
# return the data
return success_response ( field = "network" , data = net . __json__ ( ) , request_type = "network get" )
|
def from_bigquery ( sql ) :
"""Create a Metrics instance from a bigquery query or table .
Returns :
a Metrics instance .
Args :
sql : A BigQuery table name or a query ."""
|
if isinstance ( sql , bq . Query ) :
sql = sql . _expanded_sql ( )
parts = sql . split ( '.' )
if len ( parts ) == 1 or len ( parts ) > 3 or any ( ' ' in x for x in parts ) :
sql = '(' + sql + ')'
# query , not a table name
else :
sql = '`' + sql + '`'
# table name
metrics = Metrics ( bigquery = sql )
return metrics
|
def has_file_with_suffix ( self , suffixes ) :
"""Finds out if there is a file with one of suffixes in the archive .
Args :
suffixes : list of suffixes or single suffix to look for
Returns :
True if there is at least one file with at least one given suffix
in the archive , False otherwise ( or archive can ' t be opened )"""
|
if not isinstance ( suffixes , list ) :
suffixes = [ suffixes ]
if self . handle :
for member in self . handle . getmembers ( ) :
if os . path . splitext ( member . name ) [ 1 ] in suffixes :
return True
else : # hack for . zip files , where directories are not returned
# themselves , therefore we can ' t find e . g . . egg - info
for suffix in suffixes :
if '{0}/' . format ( suffix ) in member . name :
return True
return False
|
def scale ( self , data , unit ) :
"""Scales quantity to obtain dimensionful quantity .
Args :
data ( numpy . array ) : the quantity that should be scaled .
dim ( str ) : the dimension of data as defined in phyvars .
Return :
( float , str ) : scaling factor and unit string .
Other Parameters :
conf . scaling . dimensional : if set to False ( default ) , the factor is
always 1."""
|
if self . par [ 'switches' ] [ 'dimensional_units' ] or not conf . scaling . dimensional or unit == '1' :
return data , ''
scaling = phyvars . SCALES [ unit ] ( self . scales )
factor = conf . scaling . factors . get ( unit , ' ' )
if conf . scaling . time_in_y and unit == 's' :
scaling /= conf . scaling . yearins
unit = 'yr'
elif conf . scaling . vel_in_cmpy and unit == 'm/s' :
scaling *= 100 * conf . scaling . yearins
unit = 'cm/y'
if factor in phyvars . PREFIXES :
scaling *= 10 ** ( - 3 * ( phyvars . PREFIXES . index ( factor ) + 1 ) )
unit = factor + unit
return data * scaling , unit
|
def objectprep ( self ) :
"""If the script is being run as part of a pipeline , create and populate the objects for the current analysis"""
|
for sample in self . metadata :
setattr ( sample , self . analysistype , GenObject ( ) )
# Set the destination folder
sample [ self . analysistype ] . outputdir = os . path . join ( self . path , self . analysistype )
# Make the destination folder
make_path ( sample [ self . analysistype ] . outputdir )
sample [ self . analysistype ] . baitedfastq = os . path . join ( sample [ self . analysistype ] . outputdir , '{at}_targetMatches.fastq.gz' . format ( at = self . analysistype ) )
# Set the file type for the downstream analysis
sample [ self . analysistype ] . filetype = self . filetype
if self . filetype == 'fasta' :
sample [ self . analysistype ] . assemblyfile = sample . general . bestassemblyfile
|
def read_content_types ( archive ) :
"""Read content types ."""
|
xml_source = archive . read ( ARC_CONTENT_TYPES )
root = fromstring ( xml_source )
contents_root = root . findall ( '{%s}Override' % CONTYPES_NS )
for type in contents_root :
yield type . get ( 'ContentType' ) , type . get ( 'PartName' )
|
def clean_up ( self ) :
'''Clean up child and orphaned processes .'''
|
if self . tunnel . is_open :
print 'Closing tunnel...'
self . tunnel . close ( )
print 'Done.'
else :
pass
|
def trace_on ( full = False ) :
"""Start tracing of the current thread ( and the current thread only ) ."""
|
if full :
sys . settrace ( _trace_full )
else :
sys . settrace ( _trace )
|
def is_longitudinal ( self ) :
"""Returns
boolean :
longitudinal status of this project"""
|
return len ( self . events ) > 0 and len ( self . arm_nums ) > 0 and len ( self . arm_names ) > 0
|
def get_scores ( self , * args ) :
'''In this case , parameters a and b aren ' t used , since this information is taken
directly from the corpus categories .
Returns'''
|
def jelinek_mercer_smoothing ( cat ) :
p_hat_w = self . tdf_ [ cat ] * 1. / self . tdf_ [ cat ] . sum ( )
c_hat_w = ( self . smoothing_lambda_ ) * self . tdf_ . sum ( axis = 1 ) * 1. / self . tdf_ . sum ( ) . sum ( )
return ( 1 - self . smoothing_lambda_ ) * p_hat_w + self . smoothing_lambda_ * c_hat_w
p_w = jelinek_mercer_smoothing ( 'cat' )
q_w = jelinek_mercer_smoothing ( 'ncat' )
kl_divergence = p_w * np . log ( p_w / q_w ) / np . log ( 2 )
tt , pvals = self . get_t_statistics ( )
return kl_divergence * ( pvals < self . min_p_ )
|
def replace_namespaced_deployment ( self , name , namespace , body , ** kwargs ) :
"""replace the specified Deployment
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ namespaced _ deployment ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the Deployment ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1Deployment body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: return : V1Deployment
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_namespaced_deployment_with_http_info ( name , namespace , body , ** kwargs )
else :
( data ) = self . replace_namespaced_deployment_with_http_info ( name , namespace , body , ** kwargs )
return data
|
def _WriteSerializedAttributeContainerList ( self , container_type ) :
"""Writes a serialized attribute container list .
Args :
container _ type ( str ) : attribute container type ."""
|
if container_type == self . _CONTAINER_TYPE_EVENT :
if not self . _serialized_event_heap . data_size :
return
number_of_attribute_containers = ( self . _serialized_event_heap . number_of_events )
else :
container_list = self . _GetSerializedAttributeContainerList ( container_type )
if not container_list . data_size :
return
number_of_attribute_containers = ( container_list . number_of_attribute_containers )
if self . _serializers_profiler :
self . _serializers_profiler . StartTiming ( 'write' )
if container_type == self . _CONTAINER_TYPE_EVENT :
query = 'INSERT INTO event (_timestamp, _data) VALUES (?, ?)'
else :
query = 'INSERT INTO {0:s} (_data) VALUES (?)' . format ( container_type )
# TODO : directly use container _ list instead of values _ tuple _ list .
values_tuple_list = [ ]
for _ in range ( number_of_attribute_containers ) :
if container_type == self . _CONTAINER_TYPE_EVENT :
timestamp , serialized_data = self . _serialized_event_heap . PopEvent ( )
else :
serialized_data = container_list . PopAttributeContainer ( )
if self . compression_format == definitions . COMPRESSION_FORMAT_ZLIB :
compressed_data = zlib . compress ( serialized_data )
serialized_data = sqlite3 . Binary ( compressed_data )
else :
compressed_data = ''
if self . _storage_profiler :
self . _storage_profiler . Sample ( 'write' , container_type , len ( serialized_data ) , len ( compressed_data ) )
if container_type == self . _CONTAINER_TYPE_EVENT :
values_tuple_list . append ( ( timestamp , serialized_data ) )
else :
values_tuple_list . append ( ( serialized_data , ) )
self . _cursor . executemany ( query , values_tuple_list )
if self . _serializers_profiler :
self . _serializers_profiler . StopTiming ( 'write' )
if container_type == self . _CONTAINER_TYPE_EVENT :
self . _serialized_event_heap . Empty ( )
else :
container_list . Empty ( )
|
def _decode_buffer ( f ) :
"""String types are normal ( byte ) strings
starting with an integer followed by ' : '
which designates the string ’ s length .
Since there ’ s no way to specify the byte type
in bencoded files , we have to guess"""
|
strlen = int ( _readuntil ( f , _TYPE_SEP ) )
buf = f . read ( strlen )
if not len ( buf ) == strlen :
raise ValueError ( 'string expected to be {} bytes long but the file ended after {} bytes' . format ( strlen , len ( buf ) ) )
try :
return buf . decode ( )
except UnicodeDecodeError :
return buf
|
def like ( self , ** kwargs ) :
"""Like the list .
: param kwargs : Extra request options
: type kwargs : : class : ` ~ python : dict `
: return : Boolean to indicate if the request was successful
: rtype : : class : ` ~ python : bool `"""
|
return self . _client [ 'users/*/lists/*' ] . like ( self . username , self . id , ** kwargs )
|
def strip_prompt ( self , a_string ) :
"""Strip the trailing router prompt from the output ."""
|
response_list = a_string . split ( self . RESPONSE_RETURN )
new_response_list = [ ]
for line in response_list :
if self . base_prompt not in line :
new_response_list . append ( line )
output = self . RESPONSE_RETURN . join ( new_response_list )
return self . strip_context_items ( output )
|
def p_assignment ( self , p ) :
'assignment : ASSIGN lvalue EQUALS rvalue SEMICOLON'
|
p [ 0 ] = Assign ( p [ 2 ] , p [ 4 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) )
|
def _read_header ( self ) :
"""Process the header information ( data model / grid spec )"""
|
self . _header_pos = self . fp . tell ( )
line = self . fp . readline ( '20sffii' )
modelname , res0 , res1 , halfpolar , center180 = line
self . _attributes . update ( { "modelname" : str ( modelname , 'utf-8' ) . strip ( ) , "halfpolar" : halfpolar , "center180" : center180 , "res" : ( res0 , res1 ) } )
self . __setattr__ ( 'modelname' , modelname )
self . __setattr__ ( 'res' , ( res0 , res1 ) )
self . __setattr__ ( 'halfpolar' , halfpolar )
self . __setattr__ ( 'center180' , center180 )
# Re - wind the file
self . fp . seek ( self . _header_pos )
|
def choice ( * es ) :
"""Create a PEG function to match an ordered choice ."""
|
msg = 'Expected one of: {}' . format ( ', ' . join ( map ( repr , es ) ) )
def match_choice ( s , grm = None , pos = 0 ) :
errs = [ ]
for e in es :
try :
return e ( s , grm , pos )
except PegreError as ex :
errs . append ( ( ex . message , ex . position ) )
if errs :
raise PegreChoiceError ( errs , pos )
return match_choice
|
def _auth_headers ( self ) :
"""Headers required to authenticate a request .
Assumes your ` ` Context ` ` already has a authentication token or
cookie , either provided explicitly or obtained by logging
into the Splunk instance .
: returns : A list of 2 - tuples containing key and value"""
|
if self . has_cookies ( ) :
return [ ( "Cookie" , _make_cookie_header ( list ( self . get_cookies ( ) . items ( ) ) ) ) ]
elif self . basic and ( self . username and self . password ) :
token = 'Basic %s' % b64encode ( ( "%s:%s" % ( self . username , self . password ) ) . encode ( 'utf-8' ) ) . decode ( 'ascii' )
return [ ( "Authorization" , token ) ]
elif self . token is _NoAuthenticationToken :
return [ ]
else : # Ensure the token is properly formatted
if self . token . startswith ( 'Splunk ' ) :
token = self . token
else :
token = 'Splunk %s' % self . token
return [ ( "Authorization" , token ) ]
|
def add_item ( self , item ) :
"""Adds an item to the batch ."""
|
if not isinstance ( item , JsonRpcResponse ) :
raise TypeError ( "Expected JsonRpcResponse but got {} instead" . format ( type ( item ) . __name__ ) )
self . items . append ( item )
|
def get ( self , timeout = None , raise_error = True ) :
"""Args :
timeout ( float ) : timeout for query element , unit seconds
Default 10s
raise _ error ( bool ) : whether to raise error if element not found
Returns :
Element : UI Element
Raises :
WDAElementNotFoundError if raise _ error is True else None"""
|
start_time = time . time ( )
if timeout is None :
timeout = self . timeout
while True :
elems = self . find_elements ( )
if len ( elems ) > 0 :
return elems [ 0 ]
if start_time + timeout < time . time ( ) :
break
time . sleep ( 0.01 )
# check alert again
if self . session . alert . exists and self . http . alert_callback :
self . http . alert_callback ( )
return self . get ( timeout , raise_error )
if raise_error :
raise WDAElementNotFoundError ( "element not found" )
|
def delete ( self , key , cas = 0 ) :
"""Delete a key / value from server . If key does not exist , it returns True .
: param key : Key ' s name to be deleted
: param cas : CAS of the key
: return : True in case o success and False in case of failure ."""
|
returns = [ ]
for server in self . servers :
returns . append ( server . delete ( key , cas ) )
return any ( returns )
|
def _validated ( self , data ) :
"""Validate data if any subschema validates it ."""
|
errors = [ ]
for sub in self . schemas :
try :
return sub ( data )
except NotValid as ex :
errors . extend ( ex . args )
raise NotValid ( ' and ' . join ( errors ) )
|
def saveCurrentNetworkToNdex ( self , body , verbose = None ) :
"""Save current network / collection to NDEx
: param body : Properties required to save current network to NDEx .
: param verbose : print more
: returns : 200 : successful operation ; 404 : Current network does not exist"""
|
surl = self . ___url
sv = surl . split ( '/' ) [ - 1 ]
surl = surl . rstrip ( sv + '/' )
PARAMS = set_param ( [ 'body' ] , [ body ] )
response = api ( url = surl + '/cyndex2/' + sv + '/networks/current' , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response
|
def mounts ( cls ) :
"""Return tuple of current mount points
: return : tuple of WMountPoint"""
|
result = [ ]
with open ( cls . __mounts_file__ ) as f :
for mount_record in f :
result . append ( WMountPoint ( mount_record ) )
return tuple ( result )
|
def gen_lt ( self ) :
"""Generate a new LoginTicket and add it to the list of valid LT for the user"""
|
self . request . session [ 'lt' ] = self . request . session . get ( 'lt' , [ ] ) + [ utils . gen_lt ( ) ]
if len ( self . request . session [ 'lt' ] ) > 100 :
self . request . session [ 'lt' ] = self . request . session [ 'lt' ] [ - 100 : ]
|
def get_file_list ( self ) :
"""Retrieve the list of absolute paths to all the files in this data source .
Returns :
List [ str ] List of absolute paths ."""
|
if os . path . isdir ( self . root_path ) :
return [ os . path . join ( self . root_path , f ) for f in os . listdir ( self . root_path ) if os . path . isfile ( os . path . join ( self . root_path , f ) ) ]
else :
return [ self . root_path ]
|
def write ( self , path , wrap_ttl = None , ** kwargs ) :
"""POST / < path >
: param path :
: type path :
: param wrap _ ttl :
: type wrap _ ttl :
: param kwargs :
: type kwargs :
: return :
: rtype :"""
|
response = self . _adapter . post ( '/v1/{0}' . format ( path ) , json = kwargs , wrap_ttl = wrap_ttl )
if response . status_code == 200 :
return response . json ( )
|
def fit ( self , X , y = None , init = None ) :
"""Computes the position of the points in the embedding space
Parameters
X : array , shape = [ n _ samples , n _ features ] , or [ n _ samples , n _ samples ] \
if dissimilarity = ' precomputed '
Input data .
init : { None or ndarray , shape ( n _ samples , ) } , optional
If None , randomly chooses the initial configuration
if ndarray , initialize the SMACOF algorithm with this array ."""
|
self . fit_transform ( X , init = init )
return self
|
def add_tsig ( self , keyname , secret , fudge , id , tsig_error , other_data , request_mac , algorithm = dns . tsig . default_algorithm ) :
"""Add a TSIG signature to the message .
@ param keyname : the TSIG key name
@ type keyname : dns . name . Name object
@ param secret : the secret to use
@ type secret : string
@ param fudge : TSIG time fudge
@ type fudge : int
@ param id : the message id to encode in the tsig signature
@ type id : int
@ param tsig _ error : TSIG error code ; default is 0.
@ type tsig _ error : int
@ param other _ data : TSIG other data .
@ type other _ data : string
@ param request _ mac : This message is a response to the request which
had the specified MAC .
@ type request _ mac : string
@ param algorithm : the TSIG algorithm to use
@ type algorithm : dns . name . Name object"""
|
self . _set_section ( ADDITIONAL )
before = self . output . tell ( )
s = self . output . getvalue ( )
( tsig_rdata , self . mac , ctx ) = dns . tsig . sign ( s , keyname , secret , int ( time . time ( ) ) , fudge , id , tsig_error , other_data , request_mac , algorithm = algorithm )
keyname . to_wire ( self . output , self . compress , self . origin )
self . output . write ( struct . pack ( '!HHIH' , dns . rdatatype . TSIG , dns . rdataclass . ANY , 0 , 0 ) )
rdata_start = self . output . tell ( )
self . output . write ( tsig_rdata )
after = self . output . tell ( )
assert after - rdata_start < 65536
if after >= self . max_size :
self . _rollback ( before )
raise dns . exception . TooBig
self . output . seek ( rdata_start - 2 )
self . output . write ( struct . pack ( '!H' , after - rdata_start ) )
self . counts [ ADDITIONAL ] += 1
self . output . seek ( 10 )
self . output . write ( struct . pack ( '!H' , self . counts [ ADDITIONAL ] ) )
self . output . seek ( 0 , 2 )
|
def execute_java ( classpath , main , jvm_options = None , args = None , executor = None , workunit_factory = None , workunit_name = None , workunit_labels = None , cwd = None , workunit_log_config = None , distribution = None , create_synthetic_jar = True , synthetic_jar_dir = None , stdin = None ) :
"""Executes the java program defined by the classpath and main .
If ` workunit _ factory ` is supplied , does so in the context of a workunit .
: param list classpath : the classpath for the java program
: param string main : the fully qualified class name of the java program ' s entry point
: param list jvm _ options : an optional sequence of options for the underlying jvm
: param list args : an optional sequence of args to pass to the java program
: param executor : an optional java executor to use to launch the program ; defaults to a subprocess
spawn of the default java distribution
: param workunit _ factory : an optional callable that can produce a workunit context
: param string workunit _ name : an optional name for the work unit ; defaults to the main
: param list workunit _ labels : an optional sequence of labels for the work unit
: param string cwd : optionally set the working directory
: param WorkUnit . LogConfig workunit _ log _ config : an optional tuple of options affecting reporting
: param bool create _ synthetic _ jar : whether to create a synthentic jar that includes the original
classpath in its manifest .
: param string synthetic _ jar _ dir : an optional directory to store the synthetic jar , if ` None `
a temporary directory will be provided and cleaned up upon process exit .
: param file stdin : The stdin handle to use : by default None , meaning that stdin will
not be propagated into the process .
Returns the exit code of the java program .
Raises ` pants . java . Executor . Error ` if there was a problem launching java itself ."""
|
runner = _get_runner ( classpath , main , jvm_options , args , executor , cwd , distribution , create_synthetic_jar , synthetic_jar_dir )
workunit_name = workunit_name or main
return execute_runner ( runner , workunit_factory = workunit_factory , workunit_name = workunit_name , workunit_labels = workunit_labels , workunit_log_config = workunit_log_config , stdin = stdin )
|
def get_compound_ids ( self ) :
"""Extract the current compound ids in the database . Updates the self . compound _ ids list"""
|
cursor = self . conn . cursor ( )
cursor . execute ( 'SELECT inchikey_id FROM metab_compound' )
self . conn . commit ( )
for row in cursor :
if not row [ 0 ] in self . compound_ids :
self . compound_ids . append ( row [ 0 ] )
|
def create_user ( self , user_id , roles = None , netmask = None , secret = None , pubkey = None ) :
u"""Create user for the Merchant given in the X - Mcash - Merchant header .
Arguments :
user _ id :
Identifier for the user
roles :
Role
netmask :
Limit user connections by netmask , for example 192.168.1.0/24
secret :
Secret used when authenticating with mCASH
pubkey :
RSA key used for authenticating by signing"""
|
arguments = { 'id' : user_id , 'roles' : roles , 'netmask' : netmask , 'secret' : secret , 'pubkey' : pubkey }
return self . do_req ( 'POST' , self . merchant_api_base_url + '/user/' , arguments ) . json ( )
|
def auto_set_dir ( action = None , name = None ) :
"""Use : func : ` logger . set _ logger _ dir ` to set log directory to
" . / train _ log / { scriptname } : { name } " . " scriptname " is the name of the main python file currently running"""
|
mod = sys . modules [ '__main__' ]
basename = os . path . basename ( mod . __file__ )
auto_dirname = os . path . join ( 'train_log' , basename [ : basename . rfind ( '.' ) ] )
if name :
auto_dirname += '_%s' % name if os . name == 'nt' else ':%s' % name
set_logger_dir ( auto_dirname , action = action )
|
def variablename ( var ) :
"""Returns the string of a variable name ."""
|
s = [ tpl [ 0 ] for tpl in itertools . ifilter ( lambda x : var is x [ 1 ] , globals ( ) . items ( ) ) ]
s = s [ 0 ] . upper ( )
return s
|
def _R2deriv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rforce
PURPOSE :
evaluate the second radial derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the second radial derivative
HISTORY :
2016-05-13 - Written - Aladdin"""
|
return self . _denom ( R , z ) ** - 1.5 - 3. * R ** 2 * self . _denom ( R , z ) ** - 2.5
|
def save ( self , fname = None , link_copy = False , raiseError = False ) :
"""link _ copy : only works in hfd5 format
save space by creating link when identical arrays are found ,
it may slows down the saving ( 3 or 4 folds ) but saves space
when saving different dataset together ( since it does not duplicate
arrays )"""
|
if fname is None :
fname = self . filename
assert fname is not None
save ( fname , self , link_copy = link_copy , raiseError = raiseError )
|
def jacobian ( self , maps ) :
"""Returns the Jacobian for transforming mchirp and eta to mass1 and
mass2."""
|
mchirp = maps [ parameters . mchirp ]
eta = maps [ parameters . eta ]
m1 = conversions . mass1_from_mchirp_eta ( mchirp , eta )
m2 = conversions . mass2_from_mchirp_eta ( mchirp , eta )
return mchirp * ( m1 - m2 ) / ( m1 + m2 ) ** 3
|
def add ( self , song ) :
"""往播放列表末尾添加一首歌曲"""
|
if song in self . _songs :
return
self . _songs . append ( song )
logger . debug ( 'Add %s to player playlist' , song )
|
def is_valid ( self , wordid ) -> bool :
"""Ensures < / s > is only generated when the hypothesis is completed .
: param wordid : The wordid to validate .
: return : True if all constraints are already met or the word ID is not the EOS id ."""
|
return self . finished ( ) or wordid != self . eos_id or ( self . num_needed ( ) == 1 and self . eos_id in self . allowed ( ) )
|
def send ( self , request ) :
"""Send a request to the server and wait for its response .
Args :
request ( Request ) : Reference to a request object that is sent to the server .
Returns :
Response : The response from the server to the request ."""
|
self . _connection . connection . rpush ( self . _request_key , pickle . dumps ( request ) )
resp_key = '{}:{}' . format ( SIGNAL_REDIS_PREFIX , request . uid )
while True :
if self . _connection . polling_time > 0.0 :
sleep ( self . _connection . polling_time )
response_data = self . _connection . connection . get ( resp_key )
if response_data is not None :
self . _connection . connection . delete ( resp_key )
break
return pickle . loads ( response_data )
|
def stats ( self ) :
'''Returns the final high level stats from the Ansible run
Example :
{ ' dark ' : { } , ' failures ' : { } , ' skipped ' : { } , ' ok ' : { u ' localhost ' : 2 } , ' processed ' : { u ' localhost ' : 1 } }'''
|
last_event = list ( filter ( lambda x : 'event' in x and x [ 'event' ] == 'playbook_on_stats' , self . events ) )
if not last_event :
return None
last_event = last_event [ 0 ] [ 'event_data' ]
return dict ( skipped = last_event [ 'skipped' ] , ok = last_event [ 'ok' ] , dark = last_event [ 'dark' ] , failures = last_event [ 'failures' ] , processed = last_event [ 'processed' ] )
|
def get_url ( self , action , obj = None , domain = True ) :
"""Returns an RFC3987 IRI for a HTML representation of the given object , action .
If domain is true , the current site ' s domain will be added ."""
|
if not obj :
url = reverse ( 'actstream_detail' , None , ( action . pk , ) )
elif hasattr ( obj , 'get_absolute_url' ) :
url = obj . get_absolute_url ( )
else :
ctype = ContentType . objects . get_for_model ( obj )
url = reverse ( 'actstream_actor' , None , ( ctype . pk , obj . pk ) )
if domain :
return add_domain ( Site . objects . get_current ( ) . domain , url )
return url
|
def from_json ( cls , data ) :
"""Create an analysis period from a dictionary .
Args :
data : {
st _ month : An integer between 1-12 for starting month ( default = 1)
st _ day : An integer between 1-31 for starting day ( default = 1 ) .
Note that some months are shorter than 31 days .
st _ hour : An integer between 0-23 for starting hour ( default = 0)
end _ month : An integer between 1-12 for ending month ( default = 12)
end _ day : An integer between 1-31 for ending day ( default = 31)
Note that some months are shorter than 31 days .
end _ hour : An integer between 0-23 for ending hour ( default = 23)
timestep : An integer number from 1 , 2 , 3 , 4 , 5 , 6 , 10 , 12 , 15 , 20 , 30 , 60"""
|
keys = ( 'st_month' , 'st_day' , 'st_hour' , 'end_month' , 'end_day' , 'end_hour' , 'timestep' , 'is_leap_year' )
for key in keys :
if key not in data :
data [ key ] = None
return cls ( data [ 'st_month' ] , data [ 'st_day' ] , data [ 'st_hour' ] , data [ 'end_month' ] , data [ 'end_day' ] , data [ 'end_hour' ] , data [ 'timestep' ] , data [ 'is_leap_year' ] )
|
def read_json ( self , params = None ) :
"""Get information about the current entity .
Call : meth : ` read _ raw ` . Check the response status code , decode JSON and
return the decoded JSON as a dict .
: return : A dict . The server ' s response , with all JSON decoded .
: raises : ` ` requests . exceptions . HTTPError ` ` if the response has an HTTP
4XX or 5XX status code .
: raises : ` ` ValueError ` ` If the response JSON can not be decoded ."""
|
response = self . read_raw ( params = params )
response . raise_for_status ( )
return response . json ( )
|
def select_catalogue_events ( self , id0 ) :
'''Orders the events in the catalogue according to an indexing vector .
: param np . ndarray id0:
Pointer array indicating the locations of selected events'''
|
for key in self . data :
if isinstance ( self . data [ key ] , np . ndarray ) and len ( self . data [ key ] ) > 0 : # Dictionary element is numpy array - use logical indexing
self . data [ key ] = self . data [ key ] [ id0 ]
elif isinstance ( self . data [ key ] , list ) and len ( self . data [ key ] ) > 0 : # Dictionary element is list
self . data [ key ] = [ self . data [ key ] [ iloc ] for iloc in id0 ]
else :
continue
|
def add_image_info_cb ( self , gshell , channel , iminfo ) :
"""Add entries related to an added image ."""
|
timestamp = iminfo . time_modified
if timestamp is None : # Not an image we are interested in tracking
return
self . add_entry ( channel . name , iminfo )
|
def create_parser ( ) :
"""Creat a commandline parser for epubcheck
: return Argumentparser :"""
|
parser = ArgumentParser ( prog = 'epubcheck' , description = "EpubCheck v%s - Validate your ebooks" % __version__ )
# Arguments
parser . add_argument ( 'path' , nargs = '?' , default = getcwd ( ) , help = "Path to EPUB-file or folder for batch validation. " "The current directory will be processed if this argument " "is not specified." )
# Options
parser . add_argument ( '-x' , '--xls' , nargs = '?' , type = FileType ( mode = 'wb' ) , const = 'epubcheck_report.xls' , help = 'Create a detailed Excel report.' )
parser . add_argument ( '-c' , '--csv' , nargs = '?' , type = FileType ( mode = 'wb' ) , const = 'epubcheck_report.csv' , help = 'Create a CSV report.' )
parser . add_argument ( '-r' , '--recursive' , action = 'store_true' , help = 'Recurse into subfolders.' )
return parser
|
def get_unique_id ( element ) :
"""Returns a unique id for a given element"""
|
this_id = make_id ( element )
dup = True
while dup :
if this_id not in ids :
dup = False
ids . append ( this_id )
else :
this_id = make_id ( element )
return ids [ - 1 ]
|
def search_cloud_integration_entities ( self , ** kwargs ) : # noqa : E501
"""Search over a customer ' s non - deleted cloud integrations # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . search _ cloud _ integration _ entities ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param SortableSearchRequest body :
: return : ResponseContainerPagedCloudIntegration
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . search_cloud_integration_entities_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . search_cloud_integration_entities_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def join ( self , right_table = None , fields = None , condition = None , join_type = 'JOIN' , schema = None , left_table = None , extract_fields = True , prefix_fields = False , field_prefix = None , allow_duplicates = False ) :
"""Joins a table to another table based on a condition and adds fields from the joined table
to the returned fields .
: type right _ table : str or dict or : class : ` Table < querybuilder . tables . Table > `
: param right _ table : The table being joined with . This can be a string of the table
name , a dict of { ' alias ' : table } , or a ` ` Table ` ` instance
: type fields : str or tuple or list or : class : ` Field < querybuilder . fields . Field > `
: param fields : The fields to select from ` ` right _ table ` ` . Defaults to ` None ` . This can be
a single field , a tuple of fields , or a list of fields . Each field can be a string
or ` ` Field ` ` instance
: type condition : str
: param condition : The join condition specifying the fields being joined . If the two tables being
joined are instances of ` ` ModelTable ` ` then the condition should be created automatically .
: type join _ type : str
: param join _ type : The type of join ( JOIN , LEFT JOIN , INNER JOIN , etc ) . Defaults to ' JOIN '
: type schema : str
: param schema : This is not implemented , but it will be a string of the db schema name
: type left _ table : str or dict or Table
: param left _ table : The left table being joined with . This can be a string of the table
name , a dict of { ' alias ' : table } , or a ` ` Table ` ` instance . Defaults to the first table
in the query .
: type extract _ fields : bool
: param extract _ fields : If True and joining with a ` ` ModelTable ` ` , then ' * '
fields will be converted to individual fields for each column in the table . Defaults
to True .
: type prefix _ fields : bool
: param prefix _ fields : If True , then the joined table will have each of its field names
prefixed with the field _ prefix . If not field _ prefix is specified , a name will be
generated based on the join field name . This is usually used with nesting results
in order to create models in python or javascript . Defaults to True .
: type field _ prefix : str
: param field _ prefix : The field prefix to be used in front of each field name if prefix _ fields
is set to True . If no field _ prefix is set , one will be automatically created based on
the join field name .
: rtype : : class : ` Query < querybuilder . query . Query > `
: return : self"""
|
# self . mark _ dirty ( )
# TODO : fix bug when joining from simple table to model table with no condition
# it assumes left _ table . model
# if there is no left table , assume the query ' s first table
# TODO : add test for auto left table to replace old auto left table
# if left _ table is None and len ( self . tables ) :
# left _ table = self . tables [ 0]
# left _ table = TableFactory ( left _ table )
# right _ table = TableFactory ( right _ table )
# create the join item
new_join_item = Join ( left_table = left_table , right_table = right_table , fields = fields , condition = condition , join_type = join_type , schema = schema , owner = self , extract_fields = extract_fields , prefix_fields = prefix_fields , field_prefix = field_prefix , )
# check if this table is already joined upon
# TODO : add test for this
if allow_duplicates is False :
for join_item in self . joins :
if join_item . right_table . get_identifier ( ) == new_join_item . right_table . get_identifier ( ) :
if join_item . left_table . get_identifier ( ) == new_join_item . left_table . get_identifier ( ) :
return self
self . joins . append ( new_join_item )
return self
|
def by_id ( self , region , encrypted_summoner_id ) :
"""Get a summoner by summoner ID .
: param string region : The region to execute this request on
: param string encrypted _ summoner _ id : Summoner ID
: returns : SummonerDTO : represents a summoner"""
|
url , query = SummonerApiV4Urls . by_id ( region = region , encrypted_summoner_id = encrypted_summoner_id )
return self . _raw_request ( self . by_id . __name__ , region , url , query )
|
def _create_regex_pattern_add_optional_spaces_to_word_characters ( word ) :
r"""Add the regex special characters ( \ s * ) to allow optional spaces .
: param word : ( string ) the word to be inserted into a regex pattern .
: return : ( string ) the regex pattern for that word with optional spaces
between all of its characters ."""
|
new_word = u""
for ch in word :
if ch . isspace ( ) :
new_word += ch
else :
new_word += ch + r'\s*'
return new_word
|
def copy_results ( self , copy_to_dir , rename_model_to = None , force_rerun = False ) :
"""Copy the raw information from I - TASSER modeling to a new folder .
Copies all files in the list _ attrs _ to _ copy .
Args :
copy _ to _ dir ( str ) : Directory to copy the minimal set of results per sequence .
rename _ model _ to ( str ) : New file name ( without extension )
force _ rerun ( bool ) : If existing models and results should be overwritten ."""
|
# Save path to the structure and copy it if specified
if not rename_model_to :
rename_model_to = self . model_to_use
new_model_path = op . join ( copy_to_dir , '{}.pdb' . format ( rename_model_to ) )
if self . structure_path :
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = new_model_path ) : # Clean and save it
custom_clean = CleanPDB ( )
my_pdb = StructureIO ( self . structure_path )
new_model_path = my_pdb . write_pdb ( custom_selection = custom_clean , custom_name = rename_model_to , out_dir = copy_to_dir , force_rerun = force_rerun )
# Update the structure _ path to be the new clean file
self . load_structure_path ( structure_path = new_model_path , file_type = 'pdb' )
# Other modeling results - store in a new folder
dest_itasser_dir = op . join ( copy_to_dir , '{}_itasser' . format ( rename_model_to ) )
if not op . exists ( dest_itasser_dir ) :
os . mkdir ( dest_itasser_dir )
for attr in self . _attrs_to_copy :
old_file_path = getattr ( self , attr )
new_file_path = op . join ( dest_itasser_dir , op . basename ( old_file_path ) )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = new_file_path ) :
shutil . copy2 ( old_file_path , new_file_path )
log . debug ( '{}: copied from {}' . format ( new_file_path , old_file_path ) )
else :
log . debug ( '{}: file already exists' . format ( new_file_path ) )
setattr ( self , attr , new_file_path )
|
def get_referencer ( registry ) :
"""Get the referencer class
: rtype : pyramid _ urireferencer . referencer . AbstractReferencer"""
|
# Argument might be a config or request
regis = getattr ( registry , 'registry' , None )
if regis is None :
regis = registry
return regis . queryUtility ( IReferencer )
|
def _fix_permissions ( self ) :
"""Because docker run as root we need to fix permission and ownership to allow user to interact
with it from their filesystem and do operation like file delete"""
|
state = yield from self . _get_container_state ( )
if state == "stopped" or state == "exited" : # We need to restart it to fix permissions
yield from self . manager . query ( "POST" , "containers/{}/start" . format ( self . _cid ) )
for volume in self . _volumes :
log . debug ( "Docker container '{name}' [{image}] fix ownership on {path}" . format ( name = self . _name , image = self . _image , path = volume ) )
process = yield from asyncio . subprocess . create_subprocess_exec ( "docker" , "exec" , self . _cid , "/gns3/bin/busybox" , "sh" , "-c" , "(" "/gns3/bin/busybox find \"{path}\" -depth -print0" " | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c '%a:%u:%g:%n' > \"{path}/.gns3_perms\"" ")" " && /gns3/bin/busybox chmod -R u+rX \"{path}\"" " && /gns3/bin/busybox chown {uid}:{gid} -R \"{path}\"" . format ( uid = os . getuid ( ) , gid = os . getgid ( ) , path = volume ) , )
yield from process . wait ( )
|
async def persist_event ( topic , event , pool ) :
"""Track event to prevent duplication of work
and potential loss of event
: param topic : The event topic
: param event : The event object"""
|
# Event to json
json_event = json . dumps ( event . __dict__ )
# Connect to database or create and connect if non existent
conn = await pool . acquire ( )
# Insert event if not processed
try :
query = """
CREATE TABLE IF NOT EXISTS public."topic_placeholder"
(
id SERIAL PRIMARY KEY,
event json NOT NULL,
issued_at timestamp without time zone NOT NULL
)
WITH (
OIDS=FALSE
);
ALTER TABLE public."topic_placeholder"
OWNER TO root;
"""
query = query . replace ( 'topic_placeholder' , topic )
await conn . execute ( query )
issued_at = datetime . utcnow ( )
query = 'INSERT INTO "%s" (event, issued_at) VALUES ($1, $2)' % topic
await conn . execute ( query , json_event , issued_at )
finally :
await pool . release ( conn )
|
def get_formatter_report ( f : logging . Formatter ) -> Optional [ Dict [ str , str ] ] :
"""Returns information on a log formatter , as a dictionary .
For debugging ."""
|
if f is None :
return None
return { '_fmt' : f . _fmt , 'datefmt' : f . datefmt , '_style' : str ( f . _style ) , }
|
def make_nfs_path ( path ) :
"""Make a nfs version of a file path .
This just puts / nfs at the beginning instead of / gpfs"""
|
if os . path . isabs ( path ) :
fullpath = path
else :
fullpath = os . path . abspath ( path )
if len ( fullpath ) < 6 :
return fullpath
if fullpath [ 0 : 6 ] == '/gpfs/' :
fullpath = fullpath . replace ( '/gpfs/' , '/nfs/' )
return fullpath
|
def _read_miraligner ( fn ) :
"""Read ouput of miraligner and create compatible output ."""
|
reads = defaultdict ( realign )
with open ( fn ) as in_handle :
in_handle . next ( )
for line in in_handle :
cols = line . strip ( ) . split ( "\t" )
iso = isomir ( )
query_name , seq = cols [ 1 ] , cols [ 0 ]
chrom , reference_start = cols [ - 2 ] , cols [ 3 ]
iso . mirna = cols [ 3 ]
subs , add , iso . t5 , iso . t3 = cols [ 6 : 10 ]
if query_name not in reads :
reads [ query_name ] . sequence = seq
iso . align = line
iso . start = reference_start
iso . subs , iso . add = _parse_mut ( subs ) , add
logger . debug ( "%s %s %s %s %s" % ( query_name , reference_start , chrom , iso . subs , iso . add ) )
reads [ query_name ] . set_precursor ( chrom , iso )
return reads
|
def fetch_by_client_id ( self , client_id ) :
"""Retrieves a client by its identifier .
: param client _ id : The identifier of a client .
: return : An instance of : class : ` oauth2 . datatype . Client ` .
: raises : : class : ` oauth2 . error . ClientError ` if no client could be
retrieved ."""
|
grants = None
redirect_uris = None
response_types = None
client_data = self . fetchone ( self . fetch_client_query , client_id )
if client_data is None :
raise ClientNotFoundError
grant_data = self . fetchall ( self . fetch_grants_query , client_data [ 0 ] )
if grant_data :
grants = [ ]
for grant in grant_data :
grants . append ( grant [ 0 ] )
redirect_uris_data = self . fetchall ( self . fetch_redirect_uris_query , client_data [ 0 ] )
if redirect_uris_data :
redirect_uris = [ ]
for redirect_uri in redirect_uris_data :
redirect_uris . append ( redirect_uri [ 0 ] )
response_types_data = self . fetchall ( self . fetch_response_types_query , client_data [ 0 ] )
if response_types_data :
response_types = [ ]
for response_type in response_types_data :
response_types . append ( response_type [ 0 ] )
return Client ( identifier = client_data [ 1 ] , secret = client_data [ 2 ] , authorized_grants = grants , authorized_response_types = response_types , redirect_uris = redirect_uris )
|
def Element ( self , elem , ** params ) :
"""Ensure that the input element is immutable by the transformation . Returns a single element ."""
|
res = self . __call__ ( deepcopy ( elem ) , ** params )
if len ( res ) > 0 :
return res [ 0 ]
else :
return None
|
async def load_cache ( self , archive : bool = False ) -> int :
"""Load caches and archive enough to go offline and be able to verify proof
on content marked of interest in configuration .
Return timestamp ( epoch seconds ) of cache load event , also used as subdirectory
for cache archives .
: param archive : whether to archive caches to disk
: return : cache load event timestamp ( epoch seconds )"""
|
LOGGER . debug ( 'Verifier.load_cache >>> archive: %s' , archive )
rv = int ( time ( ) )
for s_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'schema_id' , { } ) :
with SCHEMA_CACHE . lock :
await self . get_schema ( s_id )
for cd_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'cred_def_id' , { } ) :
with CRED_DEF_CACHE . lock :
await self . get_cred_def ( cd_id )
for rr_id in self . cfg . get ( 'archive-on-close' , { } ) . get ( 'rev_reg_id' , { } ) :
await self . _get_rev_reg_def ( rr_id )
with REVO_CACHE . lock :
revo_cache_entry = REVO_CACHE . get ( rr_id , None )
if revo_cache_entry :
try :
await revo_cache_entry . get_state_json ( self . _build_rr_state_json , rv , rv )
except ClosedPool :
LOGGER . warning ( 'Verifier %s is offline from pool %s, cannot update revo cache reg state for %s to %s' , self . wallet . name , self . pool . name , rr_id , rv )
if archive :
Caches . archive ( self . dir_cache )
LOGGER . debug ( 'Verifier.load_cache <<< %s' , rv )
return rv
|
def listdir_nohidden ( path ) :
"""List not hidden files or directories under path"""
|
for f in os . listdir ( path ) :
if isinstance ( f , str ) :
f = unicode ( f , "utf-8" )
if not f . startswith ( '.' ) :
yield f
|
def on_enter_specimen ( self , event ) :
"""upon enter on the specimen box it makes that specimen the current
specimen"""
|
new_specimen = self . specimens_box . GetValue ( )
if new_specimen not in self . specimens :
self . user_warning ( "%s is not a valid specimen with measurement data, aborting" % ( new_specimen ) )
self . specimens_box . SetValue ( self . s )
return
self . select_specimen ( new_specimen )
if self . ie_open :
self . ie . change_selected ( self . current_fit )
self . update_selection ( )
|
def coarse_grain ( self , user_sets ) :
r"""Coarse - grains the flux onto user - defined sets .
Parameters
user _ sets : list of int - iterables
sets of states that shall be distinguished in the coarse - grained flux .
Returns
( sets , tpt ) : ( list of int - iterables , tpt - object )
sets contains the sets tpt is computed on . The tpt states of the new
tpt object correspond to these sets of states in this order . Sets might
be identical , if the user has already provided a complete partition that
respects the boundary between A , B and the intermediates . If not , Sets
will have more members than provided by the user , containing the
" remainder " states and reflecting the splitting at the A and B
boundaries .
tpt contains a new tpt object for the coarse - grained flux . All its
quantities ( gross _ flux , net _ flux , A , B , committor , backward _ committor )
are coarse - grained to sets .
Notes
All user - specified sets will be split ( if necessary ) to
preserve the boundary between A , B and the intermediate
states ."""
|
# coarse - grain sets
( tpt_sets , Aindexes , Bindexes ) = self . _compute_coarse_sets ( user_sets )
nnew = len ( tpt_sets )
# coarse - grain fluxHere we should branch between sparse and dense implementations , but currently there is only a
F_coarse = tptapi . coarsegrain ( self . _gross_flux , tpt_sets )
Fnet_coarse = tptapi . to_netflux ( F_coarse )
# coarse - grain stationary probability and committors - this can be done all dense
pstat_coarse = np . zeros ( ( nnew ) )
forward_committor_coarse = np . zeros ( ( nnew ) )
backward_committor_coarse = np . zeros ( ( nnew ) )
for i in range ( 0 , nnew ) :
I = list ( tpt_sets [ i ] )
muI = self . _mu [ I ]
pstat_coarse [ i ] = np . sum ( muI )
partialI = muI / pstat_coarse [ i ]
# normalized stationary probability over I
forward_committor_coarse [ i ] = np . dot ( partialI , self . _qplus [ I ] )
backward_committor_coarse [ i ] = np . dot ( partialI , self . _qminus [ I ] )
res = ReactiveFlux ( Aindexes , Bindexes , Fnet_coarse , mu = pstat_coarse , qminus = backward_committor_coarse , qplus = forward_committor_coarse , gross_flux = F_coarse )
return ( tpt_sets , res )
|
def show ( self , wildcard = '*' ) :
'''show parameters'''
|
k = sorted ( self . keys ( ) )
for p in k :
if fnmatch . fnmatch ( str ( p ) . upper ( ) , wildcard . upper ( ) ) :
print ( "%-16.16s %f" % ( str ( p ) , self . get ( p ) ) )
|
def v2 ( self ) :
""": returns : Version v2 of chat
: rtype : twilio . rest . chat . v2 . V2"""
|
if self . _v2 is None :
self . _v2 = V2 ( self )
return self . _v2
|
def _ParseStorageMediaImageOptions ( self , options ) :
"""Parses the storage media image options .
Args :
options ( argparse . Namespace ) : command line arguments .
Raises :
BadConfigOption : if the options are invalid ."""
|
self . _partitions = getattr ( options , 'partitions' , None )
if self . _partitions :
try :
self . _ParseVolumeIdentifiersString ( self . _partitions , prefix = 'p' )
except ValueError :
raise errors . BadConfigOption ( 'Unsupported partitions' )
self . _volumes = getattr ( options , 'volumes' , None )
if self . _volumes :
try :
self . _ParseVolumeIdentifiersString ( self . _volumes , prefix = 'apfs' )
except ValueError :
raise errors . BadConfigOption ( 'Unsupported volumes' )
|
def get_draft ( self , layer_id , expand = [ ] ) :
"""Get the current draft version of a layer .
: raises NotFound : if there is no draft version ."""
|
target_url = self . client . get_url ( 'VERSION' , 'GET' , 'draft' , { 'layer_id' : layer_id } )
return self . _get ( target_url , expand = expand )
|
def modprobe ( state , host , name , present = True , force = False ) :
'''Load / unload kernel modules .
+ name : name of the module to manage
+ present : whether the module should be loaded or not
+ force : whether to force any add / remove modules'''
|
modules = host . fact . kernel_modules
is_present = name in modules
args = ''
if force :
args = ' -f'
# Module is loaded and we don ' t want it ?
if not present and is_present :
yield 'modprobe{0} -r {1}' . format ( args , name )
# Module isn ' t loaded and we want it ?
elif present and not is_present :
yield 'modprobe{0} {1}' . format ( args , name )
|
def add_transform ( self , key , xslt ) :
"""Add or update a transform .
@ param key : Transform key to use when executing transformations
@ param xslt : Text or file name of an xslt transform"""
|
self . _remove_converter ( key )
self . _xsltLibrary [ key ] = xslt
self . _add_converter ( key )
|
def wnexpd ( left , right , window ) :
"""Expand each of the intervals of a double precision window .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / wnexpd _ c . html
: param left : Amount subtracted from each left endpoint .
: type left : float
: param right : Amount added to each right endpoint .
: type right : float
: param window : Window to be expanded .
: type window : spiceypy . utils . support _ types . SpiceCell
: return : Expanded Window .
: rtype : spiceypy . utils . support _ types . SpiceCell"""
|
assert isinstance ( window , stypes . SpiceCell )
assert window . dtype == 1
left = ctypes . c_double ( left )
right = ctypes . c_double ( right )
libspice . wnexpd_c ( left , right , ctypes . byref ( window ) )
return window
|
def save_config ( self , cmd = "write mem" , confirm = False , confirm_response = "" ) :
"""Saves Config"""
|
return super ( CiscoAsaSSH , self ) . save_config ( cmd = cmd , confirm = confirm , confirm_response = confirm_response )
|
def wiki_update ( self , page_id , title = None , body = None , other_names = None , is_locked = None , is_deleted = None ) :
"""Action to lets you update a wiki page ( Requires login ) ( UNTESTED ) .
Parameters :
page _ id ( int ) : Whre page _ id is the wiki page id .
title ( str ) : Page title .
body ( str ) : Page content .
other _ names ( str ) : Other names .
is _ locked ( int ) : Can be : 0 , 1 ( Builder + ) .
is _ deleted ( int ) : Can be : 0 , 1 ( Builder + ) ."""
|
params = { 'wiki_page[title]' : title , 'wiki_page[body]' : body , 'wiki_page[other_names]' : other_names }
return self . _get ( 'wiki_pages/{0}.json' . format ( page_id ) , params , method = 'PUT' , auth = True )
|
def check_scan_process ( self , scan_id ) :
"""Check the scan ' s process , and terminate the scan if not alive ."""
|
scan_process = self . scan_processes [ scan_id ]
progress = self . get_scan_progress ( scan_id )
if progress < 100 and not scan_process . is_alive ( ) :
self . set_scan_status ( scan_id , ScanStatus . STOPPED )
self . add_scan_error ( scan_id , name = "" , host = "" , value = "Scan process failure." )
logger . info ( "%s: Scan stopped with errors." , scan_id )
elif progress == 100 :
scan_process . join ( )
|
def get_colr ( txt , argd ) :
"""Return a Colr instance based on user args ."""
|
fore = parse_colr_arg ( get_name_arg ( argd , '--fore' , 'FORE' , default = None ) , rgb_mode = argd [ '--truecolor' ] , )
back = parse_colr_arg ( get_name_arg ( argd , '--back' , 'BACK' , default = None ) , rgb_mode = argd [ '--truecolor' ] , )
style = get_name_arg ( argd , '--style' , 'STYLE' , default = None )
if argd [ '--gradient' ] : # Build a gradient from user args .
return C ( txt ) . gradient ( name = argd [ '--gradient' ] , spread = try_int ( argd [ '--spread' ] , 1 , minimum = 0 ) , fore = fore , back = back , style = style , rgb_mode = argd [ '--truecolor' ] , )
if argd [ '--gradientrgb' ] : # Build an rgb gradient from user args .
rgb_start , rgb_stop = parse_gradient_rgb_args ( argd [ '--gradientrgb' ] )
return C ( txt ) . gradient_rgb ( fore = fore , back = back , style = style , start = rgb_start , stop = rgb_stop , )
if argd [ '--rainbow' ] :
return C ( txt ) . rainbow ( fore = fore , back = back , style = style , freq = try_float ( argd [ '--frequency' ] , 0.1 , minimum = 0 ) , offset = try_int ( argd [ '--offset' ] , randint ( 0 , 255 ) , minimum = 0 ) , spread = try_float ( argd [ '--spread' ] , 3.0 , minimum = 0 ) , rgb_mode = argd [ '--truecolor' ] , )
# Normal colored output .
return C ( txt , fore = fore , back = back , style = style )
|
def gumbel_softmax ( x , z_size , mode , softmax_k = 0 , temperature_warmup_steps = 150000 , summary = True , name = None ) :
"""Gumbel softmax discretization bottleneck .
Args :
x : Input to the discretization bottleneck .
z _ size : Number of bits , where discrete codes range from 1 to 2 * * z _ size .
mode : tf . estimator . ModeKeys .
softmax _ k : If > 0 then do top - k softmax .
temperature _ warmup _ steps : Number of steps it takes to decay temperature to
summary : Whether to write summaries .
name : Name for the bottleneck scope .
Returns :
Embedding function , discrete code , and loss ."""
|
with tf . variable_scope ( name , default_name = "gumbel_softmax" ) :
m = tf . layers . dense ( x , 2 ** z_size , name = "mask" )
if softmax_k > 0 :
m , kl = top_k_softmax ( m , softmax_k )
return m , m , 1.0 - tf . reduce_mean ( kl )
logsm = tf . nn . log_softmax ( m )
# Gumbel - softmax sample .
gumbel_samples = gumbel_sample ( common_layers . shape_list ( m ) )
steps = temperature_warmup_steps
gumbel_samples *= common_layers . inverse_exp_decay ( steps // 5 ) * 0.5
temperature = 1.2 - common_layers . inverse_lin_decay ( steps )
# 10 % of the time keep reasonably high temperature to keep learning .
temperature = tf . cond ( tf . less ( tf . random_uniform ( [ ] ) , 0.9 ) , lambda : temperature , lambda : tf . random_uniform ( [ ] , minval = 0.5 , maxval = 1.0 ) )
s = tf . nn . softmax ( ( logsm + gumbel_samples ) / temperature )
m = tf . nn . softmax ( m )
kl = - tf . reduce_max ( logsm , axis = - 1 )
if summary :
tf . summary . histogram ( "max-log" , tf . reshape ( kl , [ - 1 ] ) )
# Calculate the argmax and construct hot vectors .
maxvec = tf . reshape ( tf . argmax ( m , axis = - 1 ) , [ - 1 ] )
maxvhot = tf . stop_gradient ( tf . one_hot ( maxvec , 2 ** z_size ) )
# Add losses that prevent too few being used .
distrib = tf . reshape ( logsm , [ - 1 , 2 ** z_size ] ) * maxvhot
d_mean = tf . reduce_mean ( distrib , axis = [ 0 ] , keep_dims = True )
d_variance = tf . reduce_mean ( tf . squared_difference ( distrib , d_mean ) , axis = [ 0 ] )
d_dev = - tf . reduce_mean ( d_variance )
ret = s
if mode != tf . estimator . ModeKeys . TRAIN :
ret = tf . reshape ( maxvhot , common_layers . shape_list ( s ) )
# Just hot @ eval .
return m , ret , d_dev * 5.0 + tf . reduce_mean ( kl ) * 0.002
|
def session ( self ) :
"""This is what you should use to make requests . It sill authenticate for you .
: return : requests . sessions . Session"""
|
if not self . _session :
self . _session = requests . Session ( )
self . _session . headers . update ( dict ( Authorization = 'Bearer {0}' . format ( self . token ) ) )
return self . _session
|
def get_method ( self , name , arg_types = ( ) ) :
"""searches for the method matching the name and having argument type
descriptors matching those in arg _ types .
Parameters
arg _ types : sequence of strings
each string is a parameter type , in the non - pretty format .
Returns
method : ` JavaMemberInfo ` or ` None `
the single matching , non - bridging method of matching name
and parameter types ."""
|
# ensure any lists or iterables are converted to tuple for
# comparison against get _ arg _ type _ descriptors ( )
arg_types = tuple ( arg_types )
for m in self . get_methods_by_name ( name ) :
if ( ( ( not m . is_bridge ( ) ) and m . get_arg_type_descriptors ( ) == arg_types ) ) :
return m
return None
|
def write_pbm ( matrix , version , out , scale = 1 , border = None , plain = False ) :
"""Serializes the matrix as ` PBM < http : / / netpbm . sourceforge . net / doc / pbm . html > ` _
image .
: param matrix : The matrix to serialize .
: param int version : The ( Micro ) QR code version
: param out : Filename or a file - like object supporting to write binary data .
: param scale : Indicates the size of a single module ( default : 1 which
corresponds to 1 x 1 pixel per module ) .
: param int border : Integer indicating the size of the quiet zone .
If set to ` ` None ` ` ( default ) , the recommended border size
will be used ( ` ` 4 ` ` for QR Codes , ` ` 2 ` ` for a Micro QR Codes ) .
: param bool plain : Indicates if a P1 ( ASCII encoding ) image should be
created ( default : False ) . By default a ( binary ) P4 image is created ."""
|
row_iter = matrix_iter ( matrix , version , scale , border )
width , height = get_symbol_size ( version , scale = scale , border = border )
with writable ( out , 'wb' ) as f :
write = f . write
write ( '{0}\n' '# Created by {1}\n' '{2} {3}\n' . format ( ( 'P4' if not plain else 'P1' ) , CREATOR , width , height ) . encode ( 'ascii' ) )
if not plain :
for row in row_iter :
write ( bytearray ( _pack_bits_into_byte ( row ) ) )
else :
for row in row_iter :
write ( b'' . join ( str ( i ) . encode ( 'ascii' ) for i in row ) )
write ( b'\n' )
|
def plot_parallel ( data , var_names = None , coords = None , figsize = None , textsize = None , legend = True , colornd = "k" , colord = "C1" , shadend = 0.025 , ax = None , norm_method = None , ) :
"""Plot parallel coordinates plot showing posterior points with and without divergences .
Described by https : / / arxiv . org / abs / 1709.01449 , suggested by Ari Hartikainen
Parameters
data : obj
Any object that can be converted to an az . InferenceData object
Refer to documentation of az . convert _ to _ dataset for details
var _ names : list of variable names
Variables to be plotted , if None all variable are plotted . Can be used to change the order
of the plotted variables
coords : mapping , optional
Coordinates of var _ names to be plotted . Passed to ` Dataset . sel `
figsize : tuple
Figure size . If None it will be defined automatically .
textsize : float
Text size scaling factor for labels , titles and lines . If None it will be autoscaled based
on figsize .
legend : bool
Flag for plotting legend ( defaults to True )
colornd : valid matplotlib color
color for non - divergent points . Defaults to ' k '
colord : valid matplotlib color
color for divergent points . Defaults to ' C1'
shadend : float
Alpha blending value for non - divergent points , between 0 ( invisible ) and 1 ( opaque ) .
Defaults to . 025
ax : axes
Matplotlib axes .
norm _ method : str
Method for normalizing the data . Methods include normal , minmax and rank .
Defaults to none .
Returns
ax : matplotlib axes
Examples
Plot default parallel plot
. . plot : :
: context : close - figs
> > > import arviz as az
> > > data = az . load _ arviz _ data ( ' centered _ eight ' )
> > > az . plot _ parallel ( data , var _ names = [ " mu " , " tau " ] )
Plot parallel plot with normalization
. . plot : :
: context : close - figs
> > > az . plot _ parallel ( data , var _ names = [ " mu " , " tau " ] , norm _ method = ' normal ' )"""
|
if coords is None :
coords = { }
# Get diverging draws and combine chains
divergent_data = convert_to_dataset ( data , group = "sample_stats" )
_ , diverging_mask = xarray_to_ndarray ( divergent_data , var_names = ( "diverging" , ) , combined = True )
diverging_mask = np . squeeze ( diverging_mask )
# Get posterior draws and combine chains
posterior_data = convert_to_dataset ( data , group = "posterior" )
var_names = _var_names ( var_names , posterior_data )
var_names , _posterior = xarray_to_ndarray ( get_coords ( posterior_data , coords ) , var_names = var_names , combined = True )
if len ( var_names ) < 2 :
raise ValueError ( "This plot needs at least two variables" )
if norm_method is not None :
if norm_method == "normal" :
mean = np . mean ( _posterior , axis = 1 )
standard_deviation = np . std ( _posterior , axis = 1 )
for i in range ( 0 , np . shape ( mean ) [ 0 ] ) :
_posterior [ i , : ] = ( _posterior [ i , : ] - mean [ i ] ) / standard_deviation [ i ]
elif norm_method == "minmax" :
min_elem = np . min ( _posterior , axis = 1 )
max_elem = np . max ( _posterior , axis = 1 )
for i in range ( 0 , np . shape ( min_elem ) [ 0 ] ) :
_posterior [ i , : ] = ( ( _posterior [ i , : ] ) - min_elem [ i ] ) / ( max_elem [ i ] - min_elem [ i ] )
elif norm_method == "rank" :
_posterior = rankdata ( _posterior , axis = 1 )
else :
raise ValueError ( "{} is not supported. Use normal, minmax or rank." . format ( norm_method ) )
figsize , _ , _ , xt_labelsize , _ , _ = _scale_fig_size ( figsize , textsize , 1 , 1 )
if ax is None :
_ , ax = plt . subplots ( figsize = figsize , constrained_layout = True )
ax . plot ( _posterior [ : , ~ diverging_mask ] , color = colornd , alpha = shadend )
if np . any ( diverging_mask ) :
ax . plot ( _posterior [ : , diverging_mask ] , color = colord , lw = 1 )
ax . tick_params ( labelsize = textsize )
ax . set_xticks ( range ( len ( var_names ) ) )
ax . set_xticklabels ( var_names )
if legend :
ax . plot ( [ ] , color = colornd , label = "non-divergent" )
if np . any ( diverging_mask ) :
ax . plot ( [ ] , color = colord , label = "divergent" )
ax . legend ( fontsize = xt_labelsize )
return ax
|
def infer_enum_class ( node ) :
"""Specific inference for enums ."""
|
for basename in node . basenames : # TODO : doesn ' t handle subclasses yet . This implementation
# is a hack to support enums .
if basename not in ENUM_BASE_NAMES :
continue
if node . root ( ) . name == "enum" : # Skip if the class is directly from enum module .
break
for local , values in node . locals . items ( ) :
if any ( not isinstance ( value , nodes . AssignName ) for value in values ) :
continue
targets = [ ]
stmt = values [ 0 ] . statement ( )
if isinstance ( stmt , nodes . Assign ) :
if isinstance ( stmt . targets [ 0 ] , nodes . Tuple ) :
targets = stmt . targets [ 0 ] . itered ( )
else :
targets = stmt . targets
elif isinstance ( stmt , nodes . AnnAssign ) :
targets = [ stmt . target ]
inferred_return_value = None
if isinstance ( stmt , nodes . Assign ) :
if isinstance ( stmt . value , nodes . Const ) :
if isinstance ( stmt . value . value , str ) :
inferred_return_value = repr ( stmt . value . value )
else :
inferred_return_value = stmt . value . value
else :
inferred_return_value = stmt . value . as_string ( )
new_targets = [ ]
for target in targets : # Replace all the assignments with our mocked class .
classdef = dedent ( """
class {name}({types}):
@property
def value(self):
return {return_value}
@property
def name(self):
return "{name}"
""" . format ( name = target . name , types = ", " . join ( node . basenames ) , return_value = inferred_return_value , ) )
if "IntFlag" in basename : # Alright , we need to add some additional methods .
# Unfortunately we still can ' t infer the resulting objects as
# Enum members , but once we ' ll be able to do that , the following
# should result in some nice symbolic execution
classdef += INT_FLAG_ADDITION_METHODS . format ( name = target . name )
fake = AstroidBuilder ( MANAGER ) . string_build ( classdef ) [ target . name ]
fake . parent = target . parent
for method in node . mymethods ( ) :
fake . locals [ method . name ] = [ method ]
new_targets . append ( fake . instantiate_class ( ) )
node . locals [ local ] = new_targets
break
return node
|
def OnDestroy ( self , event ) :
"""Called on panel destruction ."""
|
# deregister observers
if hasattr ( self , 'cardmonitor' ) :
self . cardmonitor . deleteObserver ( self . cardtreecardobserver )
if hasattr ( self , 'readermonitor' ) :
self . readermonitor . deleteObserver ( self . readertreereaderobserver )
self . cardmonitor . deleteObserver ( self . readertreecardobserver )
event . Skip ( )
|
def _is_active_model ( cls , model ) :
"""Check is model app name is in list of INSTALLED _ APPS"""
|
# We need to use such tricky way to check because of inconsistent apps names :
# some apps are included in format " < module _ name > . < app _ name > " like " waldur _ core . openstack "
# other apps are included in format " < app _ name > " like " nodecondcutor _ sugarcrm "
return ( '.' . join ( model . __module__ . split ( '.' ) [ : 2 ] ) in settings . INSTALLED_APPS or '.' . join ( model . __module__ . split ( '.' ) [ : 1 ] ) in settings . INSTALLED_APPS )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.