signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def potentiallayers ( self , x , y , layers , aq = None ) :
'''Returns array of size len ( layers )
only used in building equations'''
|
if aq is None :
aq = self . model . aq . find_aquifer_data ( x , y )
pot = np . sum ( self . potential ( x , y , aq ) * aq . eigvec , 1 )
return pot [ layers ]
|
def hash_file ( file_obj , hash_function = hashlib . md5 ) :
"""Get the hash of an open file - like object .
Parameters
file _ obj : file like object
hash _ function : function to use to hash data
Returns
hashed : str , hex version of result"""
|
# before we read the file data save the current position
# in the file ( which is probably 0)
file_position = file_obj . tell ( )
# create an instance of the hash object
hasher = hash_function ( )
# read all data from the file into the hasher
hasher . update ( file_obj . read ( ) )
# get a hex version of the result
hashed = hasher . hexdigest ( )
# return the file object to its original position
file_obj . seek ( file_position )
return hashed
|
def __validate_logical ( self , operator , definitions , field , value ) :
"""Validates value against all definitions and logs errors according
to the operator ."""
|
valid_counter = 0
_errors = errors . ErrorList ( )
for i , definition in enumerate ( definitions ) :
schema = { field : definition . copy ( ) }
for rule in ( 'allow_unknown' , 'type' ) :
if rule not in schema [ field ] and rule in self . schema [ field ] :
schema [ field ] [ rule ] = self . schema [ field ] [ rule ]
if 'allow_unknown' not in schema [ field ] :
schema [ field ] [ 'allow_unknown' ] = self . allow_unknown
validator = self . _get_child_validator ( schema_crumb = ( field , operator , i ) , schema = schema , allow_unknown = True )
if validator ( self . document , update = self . update , normalize = False ) :
valid_counter += 1
else :
self . _drop_nodes_from_errorpaths ( validator . _errors , [ ] , [ 3 ] )
_errors . extend ( validator . _errors )
return valid_counter , _errors
|
def parse_cell ( cell , rules ) :
"""Applies the rules to the bunch of text describing a cell .
@ param string cell
A network / cell from iwlist scan .
@ param dictionary rules
A dictionary of parse rules .
@ return dictionary
parsed networks ."""
|
parsed_cell = { }
for key in rules :
rule = rules [ key ]
parsed_cell . update ( { key : rule ( cell ) } )
return parsed_cell
|
def handle ( self , state , message = False ) :
"""Handle a state update .
: param state : the new chat state
: type state : : class : ` ~ aioxmpp . chatstates . ChatState `
: param message : pass true to indicate that we handle the
: data : ` ACTIVE ` state that is implied by
sending a content message .
: type message : : class : ` bool `
: returns : whether a standalone notification must be sent for
this state update , respective if a chat state
notification must be included with the message .
: raises ValueError : if ` message ` is true and a state other
than : data : ` ACTIVE ` is passed ."""
|
if message :
if state != chatstates_xso . ChatState . ACTIVE :
raise ValueError ( "Only the state ACTIVE can be sent with messages." )
elif self . _state == state :
return False
self . _state = state
return self . _strategy . sending
|
def is_python_interpreter ( filename ) :
"""Evaluate wether a file is a python interpreter or not ."""
|
real_filename = os . path . realpath ( filename )
# To follow symlink if existent
if ( not osp . isfile ( real_filename ) or not is_python_interpreter_valid_name ( filename ) ) :
return False
elif is_pythonw ( filename ) :
if os . name == 'nt' : # pythonw is a binary on Windows
if not encoding . is_text_file ( real_filename ) :
return True
else :
return False
elif sys . platform == 'darwin' : # pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda ( ) and encoding . is_text_file ( real_filename ) :
return True
elif not encoding . is_text_file ( real_filename ) :
return True
else :
return False
else : # There ' s no pythonw in other systems
return False
elif encoding . is_text_file ( real_filename ) : # At this point we can ' t have a text file
return False
else :
return check_python_help ( filename )
|
def generate_content_encoding ( self ) :
"""Means decoding value when it ' s encoded by base64.
. . code - block : : python
' contentEncoding ' : ' base64 ' ,"""
|
if self . _definition [ 'contentEncoding' ] == 'base64' :
with self . l ( 'if isinstance({variable}, str):' ) :
with self . l ( 'try:' ) :
self . l ( 'import base64' )
self . l ( '{variable} = base64.b64decode({variable})' )
with self . l ( 'except Exception:' ) :
self . l ( 'raise JsonSchemaException("{name} must be encoded by base64")' )
with self . l ( 'if {variable} == "":' ) :
self . l ( 'raise JsonSchemaException("contentEncoding must be base64")' )
|
def cluster ( args ) :
"""% prog cluster prefix fastqfiles
Use ` vsearch ` to remove duplicate reads . This routine is heavily influenced
by PyRAD : < https : / / github . com / dereneaton / pyrad > ."""
|
p = OptionParser ( cluster . __doc__ )
add_consensus_options ( p )
p . set_align ( pctid = 95 )
p . set_outdir ( )
p . set_cpus ( )
opts , args = p . parse_args ( args )
if len ( args ) < 2 :
sys . exit ( not p . print_help ( ) )
prefix = args [ 0 ]
fastqfiles = args [ 1 : ]
cpus = opts . cpus
pctid = opts . pctid
mindepth = opts . mindepth
minlength = opts . minlength
fastafile , qualfile = fasta ( fastqfiles + [ "--seqtk" , "--outdir={0}" . format ( opts . outdir ) , "--outfile={0}" . format ( prefix + ".fasta" ) ] )
prefix = op . join ( opts . outdir , prefix )
pf = prefix + ".P{0}" . format ( pctid )
derepfile = prefix + ".derep"
if need_update ( fastafile , derepfile ) :
derep ( fastafile , derepfile , minlength , cpus )
userfile = pf + ".u"
notmatchedfile = pf + ".notmatched"
if need_update ( derepfile , userfile ) :
cluster_smallmem ( derepfile , userfile , notmatchedfile , minlength , pctid , cpus )
clustfile = pf + ".clust"
if need_update ( ( derepfile , userfile , notmatchedfile ) , clustfile ) :
makeclust ( derepfile , userfile , notmatchedfile , clustfile , mindepth = mindepth )
clustSfile = pf + ".clustS"
if need_update ( clustfile , clustSfile ) :
parallel_musclewrap ( clustfile , cpus )
statsfile = pf + ".stats"
if need_update ( clustSfile , statsfile ) :
makestats ( clustSfile , statsfile , mindepth = mindepth )
|
def get_votes ( self ) :
"""Get all votes for this election ."""
|
candidate_elections = CandidateElection . objects . filter ( election = self )
votes = None
for ce in candidate_elections :
votes = votes | ce . votes . all ( )
return votes
|
def _request_modify_dns_record ( self , record ) :
"""Sends Modify _ DNS _ Record request"""
|
return self . _request_internal ( "Modify_DNS_Record" , domain = self . domain , record = record )
|
def merge_wcs_counts_cubes ( filelist ) :
"""Merge all the files in filelist , assuming that they WCS counts cubes"""
|
out_prim = None
out_ebounds = None
datalist_gti = [ ]
exposure_sum = 0.
nfiles = len ( filelist )
ngti = np . zeros ( nfiles , int )
for i , filename in enumerate ( filelist ) :
fin = fits . open ( filename )
sys . stdout . write ( '.' )
sys . stdout . flush ( )
if i == 0 :
out_prim = update_primary ( fin [ 0 ] , out_prim )
out_ebounds = update_ebounds ( fin [ "EBOUNDS" ] , out_ebounds )
( gti_data , exposure , tstop ) = extract_gti_data ( fin [ "GTI" ] )
datalist_gti . append ( gti_data )
exposure_sum += exposure
ngti [ i ] = len ( gti_data )
if i == 0 :
first = fin
elif i == nfiles - 1 :
date_end = fin [ 0 ] . header [ 'DATE-END' ]
else :
fin . close ( )
out_gti = merge_all_gti_data ( datalist_gti , ngti , first [ 'GTI' ] )
out_gti . header [ 'EXPOSURE' ] = exposure_sum
out_gti . header [ 'TSTOP' ] = tstop
hdulist = [ out_prim , out_ebounds , out_gti ]
for hdu in hdulist :
hdu . header [ 'DATE-END' ] = date_end
out_prim . update_header ( )
sys . stdout . write ( "!\n" )
return fits . HDUList ( hdulist )
|
def store ( self ) :
'''Return a thread local : class : ` dossier . store . Store ` client .'''
|
if self . _store is None :
config = global_config ( 'dossier.store' )
self . _store = self . create ( ElasticStore , config = config )
return self . _store
|
def persist ( self , key ) :
"""Remove the existing timeout on key ."""
|
fut = self . execute ( b'PERSIST' , key )
return wait_convert ( fut , bool )
|
def main ( ) -> None :
"""Command - line processor . See ` ` - - help ` ` for details ."""
|
logging . basicConfig ( level = logging . DEBUG )
parser = argparse . ArgumentParser ( )
parser . add_argument ( "inputfile" , nargs = "?" , help = "Input file name" )
parser . add_argument ( "--availability" , nargs = '*' , help = "File extensions to check availability for (use a '.' prefix, " "and use the special extension 'None' to check the fallback " "processor" )
parser . add_argument ( "--plain" , action = "store_true" , help = "Keep it plain: minimize layout (e.g. of tables) and maximize " "the likelihood of source sentence flowing properly in the " "output (e.g. for natural language processing)." )
parser . add_argument ( "--width" , type = int , default = DEFAULT_WIDTH , help = "Word wrapping width (default {})" . format ( DEFAULT_WIDTH ) )
parser . add_argument ( "--min-col-width" , type = int , default = DEFAULT_MIN_COL_WIDTH , help = "Minimum column width for tables (default {})" . format ( DEFAULT_MIN_COL_WIDTH ) )
args = parser . parse_args ( )
if args . availability :
for ext in args . availability :
if ext . lower ( ) == 'none' :
ext = None
available = is_text_extractor_available ( ext )
print ( "Extractor for extension {} present: {}" . format ( ext , available ) )
return
if not args . inputfile :
parser . print_help ( sys . stderr )
return
config = TextProcessingConfig ( width = args . width , min_col_width = args . min_col_width , plain = args . plain , )
result = document_to_text ( filename = args . inputfile , config = config )
if result is None :
return
else :
print ( result )
|
def fill_document ( doc ) :
"""Add a section , a subsection and some text to the document .
: param doc : the document
: type doc : : class : ` pylatex . document . Document ` instance"""
|
with doc . create ( Section ( 'A section' ) ) :
doc . append ( 'Some regular text and some ' )
doc . append ( italic ( 'italic text. ' ) )
with doc . create ( Subsection ( 'A subsection' ) ) :
doc . append ( 'Also some crazy characters: $&#{}' )
|
def compute_amount ( self ) :
"""Auto - assign and return the total amount for this tax ."""
|
self . amount = self . base_amount * self . aliquot / 100
return self . amount
|
def _from_dict ( cls , _dict ) :
"""Initialize a RuntimeIntent object from a json dictionary ."""
|
args = { }
xtra = _dict . copy ( )
if 'intent' in _dict :
args [ 'intent' ] = _dict . get ( 'intent' )
del xtra [ 'intent' ]
else :
raise ValueError ( 'Required property \'intent\' not present in RuntimeIntent JSON' )
if 'confidence' in _dict :
args [ 'confidence' ] = _dict . get ( 'confidence' )
del xtra [ 'confidence' ]
else :
raise ValueError ( 'Required property \'confidence\' not present in RuntimeIntent JSON' )
args . update ( xtra )
return cls ( ** args )
|
def _build_schema ( self , s ) :
"""Recursive schema builder , called by ` json _ schema ` ."""
|
w = self . _whatis ( s )
if w == self . IS_LIST :
w0 = self . _whatis ( s [ 0 ] )
js = { "type" : "array" , "items" : { "type" : self . _jstype ( w0 , s [ 0 ] ) } }
elif w == self . IS_DICT :
js = { "type" : "object" , "properties" : { key : self . _build_schema ( val ) for key , val in s . items ( ) } }
req = [ key for key , val in s . items ( ) if not val . is_optional ]
if req :
js [ "required" ] = req
else :
js = { "type" : self . _jstype ( w , s ) }
for k , v in self . _json_schema_keys . items ( ) :
if k not in js :
js [ k ] = v
return js
|
def asdict ( self ) :
"""Return dict presentation of this service .
Useful for dumping the device information into JSON ."""
|
return { "methods" : { m . name : m . asdict ( ) for m in self . methods } , "protocols" : self . protocols , "notifications" : { n . name : n . asdict ( ) for n in self . notifications } , }
|
def drag_events ( self ) :
"""Return a list of all mouse events in the current drag operation .
Returns None if there is no current drag operation ."""
|
if not self . is_dragging :
return None
event = self
events = [ ]
while True : # mouse _ press events can only be the start of a trail
if event is None or event . type == 'mouse_press' :
break
events . append ( event )
event = event . last_event
return events [ : : - 1 ]
|
def _get_parameter_symbols ( self , n_counter , k_counter ) :
r"""Calculates parameters Y expressions and beta coefficients in
: math : ` X = { A ( \ beta _ 0 , \ beta _ 1 \ ldots \ beta _ n ) \ cdot Y } `
: param n _ counter : a list of : class : ` ~ means . core . descriptors . Moment ` \ s representing central moments
: type n _ counter : list [ : class : ` ~ means . core . descriptors . Moment ` ]
: param k _ counter : a list of : class : ` ~ means . core . descriptors . Moment ` \ s representing raw moments
: type k _ counter : list [ : class : ` ~ means . core . descriptors . Moment ` ]
: return : two column matrices Y expressions and beta multipliers"""
|
n_moment = self . max_order + 1
expectation_symbols = sp . Matrix ( [ n . symbol for n in k_counter if n . order == 1 ] )
n_species = len ( expectation_symbols )
# Create auxiliary symbolic species Y _ { ij } , for i , j = 0 . . . ( n - 1 ) and mirror , so that Y _ { ij } = Y _ { ji }
symbolic_species = sp . Matrix ( [ [ sp . Symbol ( ( 'Y_{0}' . format ( str ( j ) ) ) + '{0}' . format ( str ( i ) ) ) for i in range ( n_species ) ] for j in range ( n_species ) ] )
for i in range ( n_species ) :
for j in range ( i + 1 , n_species ) :
symbolic_species [ j , i ] = symbolic_species [ i , j ]
# Obtain beta terms explaining how original variables are derived from auxiliary ones
if self . is_multivariate : # : math : ` X _ i = \ sum _ j Y _ { ij } `
beta_in_matrix = sp . Matrix ( [ sum ( symbolic_species [ i , : ] ) for i in range ( n_species ) ] )
else : # In univariate case , only the diagonal elements are needed
beta_in_matrix = sp . Matrix ( [ symbolic_species [ i , i ] for i in range ( n_species ) ] )
# Covariance symbols are read into a matrix . Variances are the diagonal elements
covariance_matrix = sp . Matrix ( n_species , n_species , lambda x , y : self . _get_covariance_symbol ( n_counter , x , y ) )
variance_symbols = sp . Matrix ( [ covariance_matrix [ i , i ] for i in range ( n_species ) ] )
# Compute : math : ` \ beta _ i = Var ( X _ i ) / \ mathbb { E } ( X _ i ) \ bar \ alpha _ i = \ mathbb { E } ( X _ i ) ^ 2 / Var ( X _ i ) `
beta_exprs = sp . Matrix ( [ v / e for e , v in zip ( expectation_symbols , variance_symbols ) ] )
alpha_bar_exprs = sp . Matrix ( [ ( e ** 2 ) / v for e , v in zip ( expectation_symbols , variance_symbols ) ] )
if self . is_multivariate : # Calculate nondiagonal elements from covariances
alpha_exprs = sp . Matrix ( n_species , n_species , lambda i , j : covariance_matrix [ i , j ] / ( beta_exprs [ i ] * beta_exprs [ j ] ) )
else : # Covariances are zero in univariate case
alpha_exprs = sp . Matrix ( n_species , n_species , lambda i , j : 0 )
for sp_idx in range ( n_species ) : # Compute diagonal elements as : math : ` \ alpha _ { ii } = \ bar \ alpha _ { i } - \ sum ( \ alpha _ { ij } ) ` / / equiv to \ bar \ alpha _ { i } in univariate
alpha_exprs [ sp_idx , sp_idx ] = 0
alpha_exprs [ sp_idx , sp_idx ] = alpha_bar_exprs [ sp_idx ] - sum ( alpha_exprs [ sp_idx , : ] )
# Each row in moment matrix contains the exponents of Xs for a given moment
# Each row in Y _ exprs and beta _ multipliers has elements on the appropriate power
# determined by the corresponding row in the moment matrix
Y_exprs = [ ]
beta_multipliers = [ ]
positive_n_counter = [ n for n in n_counter if n . order > 0 ]
for mom in positive_n_counter :
Y_exprs . append ( product ( [ ( b ** s ) . expand ( ) for b , s in zip ( beta_in_matrix , mom . n_vector ) ] ) )
beta_multipliers . append ( product ( [ ( b ** s ) . expand ( ) for b , s in zip ( beta_exprs , mom . n_vector ) ] ) )
Y_exprs = sp . Matrix ( Y_exprs ) . applyfunc ( sp . expand )
beta_multipliers = sp . Matrix ( beta_multipliers )
# Substitute alpha expressions in place of symbolic species Ys
# by going through all powers up to the moment order for closure
subs_pairs = [ ]
for i , a in enumerate ( alpha_exprs ) :
Y_to_substitute = [ symbolic_species [ i ] ** n for n in range ( 2 , n_moment + 1 ) ]
# Obtain alpha term for higher order moments : math : ` \ mathbb { E } ( Y _ { ij } ^ n ) \ rightarrow ( \ alpha _ { ij } ) _ n `
alpha_m = [ self . _gamma_factorial ( a , n ) for n in range ( 2 , n_moment + 1 ) ]
# Substitute alpha term for symbolic species
subs_pairs += zip ( Y_to_substitute , alpha_m )
subs_pairs . append ( ( symbolic_species [ i ] , a ) )
# Add first order expression to the end
Y_exprs = substitute_all ( Y_exprs , subs_pairs )
return Y_exprs , beta_multipliers
|
def resample ( self , destination = None , datasets = None , generate = True , unload = True , resampler = None , reduce_data = True , ** resample_kwargs ) :
"""Resample datasets and return a new scene .
Args :
destination ( AreaDefinition , GridDefinition ) : area definition to
resample to . If not specified then the area returned by
` Scene . max _ area ( ) ` will be used .
datasets ( list ) : Limit datasets to resample to these specified
` DatasetID ` objects . By default all currently loaded
datasets are resampled .
generate ( bool ) : Generate any requested composites that could not
be previously due to incompatible areas ( default : True ) .
unload ( bool ) : Remove any datasets no longer needed after
requested composites have been generated ( default : True ) .
resampler ( str ) : Name of resampling method to use . By default ,
this is a nearest neighbor KDTree - based resampling
( ' nearest ' ) . Other possible values include ' native ' , ' ewa ' ,
etc . See the : mod : ` ~ satpy . resample ` documentation for more
information .
reduce _ data ( bool ) : Reduce data by matching the input and output
areas and slicing the data arrays ( default : True )
resample _ kwargs : Remaining keyword arguments to pass to individual
resampler classes . See the individual resampler class
documentation : mod : ` here < satpy . resample > ` for available
arguments ."""
|
to_resample_ids = [ dsid for ( dsid , dataset ) in self . datasets . items ( ) if ( not datasets ) or dsid in datasets ]
if destination is None :
destination = self . max_area ( to_resample_ids )
new_scn = self . copy ( datasets = to_resample_ids )
# we may have some datasets we asked for but don ' t exist yet
new_scn . wishlist = self . wishlist . copy ( )
self . _resampled_scene ( new_scn , destination , resampler = resampler , reduce_data = reduce_data , ** resample_kwargs )
# regenerate anything from the wishlist that needs it ( combining
# multiple resolutions , etc . )
if generate :
keepables = new_scn . generate_composites ( )
else : # don ' t lose datasets that we may need later for generating
# composites
keepables = set ( new_scn . datasets . keys ( ) ) | new_scn . wishlist
if new_scn . missing_datasets : # copy the set of missing datasets because they won ' t be valid
# after they are removed in the next line
missing = new_scn . missing_datasets . copy ( )
new_scn . _remove_failed_datasets ( keepables )
missing_str = ", " . join ( str ( x ) for x in missing )
LOG . warning ( "The following datasets " "were not created: {}" . format ( missing_str ) )
if unload :
new_scn . unload ( keepables )
return new_scn
|
def write_representative_sequences_file ( self , outname , outdir = None , set_ids_from_model = True ) :
"""Write all the model ' s sequences as a single FASTA file . By default , sets IDs to model gene IDs .
Args :
outname ( str ) : Name of the output FASTA file without the extension
outdir ( str ) : Path to output directory of downloaded files , must be set if GEM - PRO directories
were not created initially
set _ ids _ from _ model ( bool ) : If the gene ID source should be the model gene IDs , not the original sequence ID"""
|
if not outdir :
outdir = self . data_dir
if not outdir :
raise ValueError ( 'Output directory must be specified' )
outfile = op . join ( outdir , outname + '.faa' )
tmp = [ ]
for x in self . genes_with_a_representative_sequence :
repseq = x . protein . representative_sequence
copied_seq_record = copy ( repseq )
if set_ids_from_model :
copied_seq_record . id = x . id
tmp . append ( copied_seq_record )
SeqIO . write ( tmp , outfile , "fasta" )
log . info ( '{}: wrote all representative sequences to file' . format ( outfile ) )
self . genome_path = outfile
return self . genome_path
|
def build_system_components ( device_type , os_id , navigator_id ) :
"""For given os _ id build random platform and oscpu
components
Returns dict { platform _ version , platform , ua _ platform , oscpu }
platform _ version is OS name used in different places
ua _ platform goes to navigator . platform
platform is used in building navigator . userAgent
oscpu goes to navigator . oscpu"""
|
if os_id == 'win' :
platform_version = choice ( OS_PLATFORM [ 'win' ] )
cpu = choice ( OS_CPU [ 'win' ] )
if cpu :
platform = '%s; %s' % ( platform_version , cpu )
else :
platform = platform_version
res = { 'platform_version' : platform_version , 'platform' : platform , 'ua_platform' : platform , 'oscpu' : platform , }
elif os_id == 'linux' :
cpu = choice ( OS_CPU [ 'linux' ] )
platform_version = choice ( OS_PLATFORM [ 'linux' ] )
platform = '%s %s' % ( platform_version , cpu )
res = { 'platform_version' : platform_version , 'platform' : platform , 'ua_platform' : platform , 'oscpu' : 'Linux %s' % cpu , }
elif os_id == 'mac' :
cpu = choice ( OS_CPU [ 'mac' ] )
platform_version = choice ( OS_PLATFORM [ 'mac' ] )
platform = platform_version
if navigator_id == 'chrome' :
platform = fix_chrome_mac_platform ( platform )
res = { 'platform_version' : platform_version , 'platform' : 'MacIntel' , 'ua_platform' : platform , 'oscpu' : 'Intel Mac OS X %s' % platform . split ( ' ' ) [ - 1 ] , }
elif os_id == 'android' :
assert navigator_id in ( 'firefox' , 'chrome' )
assert device_type in ( 'smartphone' , 'tablet' )
platform_version = choice ( OS_PLATFORM [ 'android' ] )
if navigator_id == 'firefox' :
if device_type == 'smartphone' :
ua_platform = '%s; Mobile' % platform_version
elif device_type == 'tablet' :
ua_platform = '%s; Tablet' % platform_version
elif navigator_id == 'chrome' :
device_id = choice ( SMARTPHONE_DEV_IDS )
ua_platform = 'Linux; %s; %s' % ( platform_version , device_id )
oscpu = 'Linux %s' % choice ( OS_CPU [ 'android' ] )
res = { 'platform_version' : platform_version , 'ua_platform' : ua_platform , 'platform' : oscpu , 'oscpu' : oscpu , }
return res
|
def register_app ( app_name , app_setting , web_application_setting , mainfile , package_space ) :
"""insert current project root path into sys path"""
|
from turbo import log
app_config . app_name = app_name
app_config . app_setting = app_setting
app_config . project_name = os . path . basename ( get_base_dir ( mainfile , 2 ) )
app_config . web_application_setting . update ( web_application_setting )
if app_setting . get ( 'session_config' ) :
app_config . session_config . update ( app_setting [ 'session_config' ] )
log . getLogger ( ** app_setting . log )
_install_app ( package_space )
|
def _update_validation_response ( error , response ) :
"""Actualiza la respuesta por default acorde a un error de
validación ."""
|
new_response = response . copy ( )
# El status del catálogo entero será ERROR
new_response [ "status" ] = "ERROR"
# Adapto la información del ValidationError recibido a los fines
# del validador de DataJsons
error_info = { # Error Code 1 para " campo obligatorio faltante "
# Error Code 2 para " error en tipo o formato de campo "
"error_code" : 1 if error . validator == "required" else 2 , "message" : error . message , "validator" : error . validator , "validator_value" : error . validator_value , "path" : list ( error . path ) , # La instancia validada es irrelevante si el error es de tipo 1
"instance" : ( None if error . validator == "required" else error . instance ) }
# Identifico a qué nivel de jerarquía sucedió el error .
if len ( error . path ) >= 2 and error . path [ 0 ] == "dataset" : # El error está a nivel de un dataset particular o inferior
position = new_response [ "error" ] [ "dataset" ] [ error . path [ 1 ] ]
else : # El error está a nivel de catálogo
position = new_response [ "error" ] [ "catalog" ]
position [ "status" ] = "ERROR"
position [ "errors" ] . append ( error_info )
return new_response
|
def roles_accepted ( * role_names ) :
"""| This decorator ensures that the current user is logged in ,
| and has * at least one * of the specified roles ( OR operation ) .
Example : :
@ route ( ' / edit _ article ' )
@ roles _ accepted ( ' Writer ' , ' Editor ' )
def edit _ article ( ) : # User must be ' Writer ' OR ' Editor '
| Calls unauthenticated _ view ( ) when the user is not logged in
or when user has not confirmed their email address .
| Calls unauthorized _ view ( ) when the user does not have the required roles .
| Calls the decorated view otherwise ."""
|
# convert the list to a list containing that list .
# Because roles _ required ( a , b ) requires A AND B
# while roles _ required ( [ a , b ] ) requires A OR B
def wrapper ( view_function ) :
@ wraps ( view_function ) # Tells debuggers that is is a function wrapper
def decorator ( * args , ** kwargs ) :
user_manager = current_app . user_manager
# User must be logged in with a confirmed email address
allowed = _is_logged_in_with_confirmed_email ( user_manager )
if not allowed : # Redirect to unauthenticated page
return user_manager . unauthenticated_view ( )
# User must have the required roles
# NB : roles _ required would call has _ roles ( * role _ names ) : ( ' A ' , ' B ' ) - - > ( ' A ' , ' B ' )
# But : roles _ accepted must call has _ roles ( role _ names ) : ( ' A ' , ' B ' ) - - < ( ( ' A ' , ' B ' ) , )
if not current_user . has_roles ( role_names ) : # Redirect to the unauthorized page
return user_manager . unauthorized_view ( )
# It ' s OK to call the view
return view_function ( * args , ** kwargs )
return decorator
return wrapper
|
def dump_bulk ( cls , parent = None , keep_ids = True ) :
"""Dumps a tree branch to a python data structure ."""
|
cls = get_result_class ( cls )
# Because of fix _ tree , this method assumes that the depth
# and numchild properties in the nodes can be incorrect ,
# so no helper methods are used
qset = cls . _get_serializable_model ( ) . objects . all ( )
if parent :
qset = qset . filter ( path__startswith = parent . path )
ret , lnk = [ ] , { }
for pyobj in serializers . serialize ( 'python' , qset ) : # django ' s serializer stores the attributes in ' fields '
fields = pyobj [ 'fields' ]
path = fields [ 'path' ]
depth = int ( len ( path ) / cls . steplen )
# this will be useless in load _ bulk
del fields [ 'depth' ]
del fields [ 'path' ]
del fields [ 'numchild' ]
if 'id' in fields : # this happens immediately after a load _ bulk
del fields [ 'id' ]
newobj = { 'data' : fields }
if keep_ids :
newobj [ 'id' ] = pyobj [ 'pk' ]
if ( not parent and depth == 1 ) or ( parent and len ( path ) == len ( parent . path ) ) :
ret . append ( newobj )
else :
parentpath = cls . _get_basepath ( path , depth - 1 )
parentobj = lnk [ parentpath ]
if 'children' not in parentobj :
parentobj [ 'children' ] = [ ]
parentobj [ 'children' ] . append ( newobj )
lnk [ path ] = newobj
return ret
|
def listen ( self , callback = None , timeout = ( 5 , 300 ) ) :
"""Start the & listen long poll and return immediately ."""
|
if self . _running :
return False
# if self . devices ( ) is False :
# return False
self . _queue = Queue ( )
self . _running = True
self . _timeout = timeout
self . _callback_listen = callback
threading . Thread ( target = self . _thread_listen , args = ( ) ) . start ( )
threading . Thread ( target = self . _thread_worker , args = ( ) ) . start ( )
return True
|
def set_relocated_name ( self , name , rr_name ) : # type : ( str , str ) - > None
'''Set the name of the relocated directory on a Rock Ridge ISO . The ISO
must be a Rock Ridge one , and must not have previously had the relocated
name set .
Parameters :
name - The name for a relocated directory .
rr _ name - The Rock Ridge name for a relocated directory .
Returns :
Nothing .'''
|
if not self . _initialized :
raise pycdlibexception . PyCdlibInvalidInput ( 'This object is not yet initialized; call either open() or new() to create an ISO' )
if not self . rock_ridge :
raise pycdlibexception . PyCdlibInvalidInput ( 'Can only set the relocated name on a Rock Ridge ISO' )
encoded_name = name . encode ( 'utf-8' )
encoded_rr_name = rr_name . encode ( 'utf-8' )
if self . _rr_moved_name is not None :
if self . _rr_moved_name == encoded_name and self . _rr_moved_rr_name == encoded_rr_name :
return
raise pycdlibexception . PyCdlibInvalidInput ( 'Changing the existing rr_moved name is not allowed' )
_check_iso9660_directory ( encoded_name , self . interchange_level )
self . _rr_moved_name = encoded_name
self . _rr_moved_rr_name = encoded_rr_name
|
def _generate_examples ( self , archive , directory ) :
"""Generate IMDB examples ."""
|
reg = re . compile ( os . path . join ( "^%s" % directory , "(?P<label>neg|pos)" , "" ) )
for path , imdb_f in archive :
res = reg . match ( path )
if not res :
continue
text = imdb_f . read ( ) . strip ( )
yield { "text" : text , "label" : res . groupdict ( ) [ "label" ] , }
|
def process ( self , plugin , context , instance = None , action = None ) :
"""Transmit a ` process ` request to host
Arguments :
plugin ( PluginProxy ) : Plug - in to process
context ( ContextProxy ) : Filtered context
instance ( InstanceProxy , optional ) : Instance to process
action ( str , optional ) : Action to process"""
|
plugin = plugin . to_json ( )
instance = instance . to_json ( ) if instance is not None else None
return self . _dispatch ( "process" , args = [ plugin , instance , action ] )
|
async def _init_clean ( self ) :
"""Must be called when the agent is starting"""
|
# Data about running containers
self . _containers_running = { }
self . _container_for_job = { }
self . _student_containers_running = { }
self . _student_containers_for_job = { }
self . _containers_killed = dict ( )
# Delete tmp _ dir , and recreate - it again
try :
await self . _ashutil . rmtree ( self . _tmp_dir )
except OSError :
pass
try :
await self . _aos . mkdir ( self . _tmp_dir )
except OSError :
pass
# Docker
self . _docker = AsyncProxy ( DockerInterface ( ) )
# Auto discover containers
self . _logger . info ( "Discovering containers" )
self . _containers = await self . _docker . get_containers ( )
self . _assigned_external_ports = { }
# container _ id : [ external _ ports ]
if self . _address_host is None and len ( self . _containers ) != 0 :
self . _logger . info ( "Guessing external host IP" )
self . _address_host = await self . _docker . get_host_ip ( next ( iter ( self . _containers . values ( ) ) ) [ "id" ] )
if self . _address_host is None :
self . _logger . warning ( "Cannot find external host IP. Please indicate it in the configuration. Remote SSH debug has been deactivated." )
self . _external_ports = None
else :
self . _logger . info ( "External address for SSH remote debug is %s" , self . _address_host )
# Watchers
self . _timeout_watcher = TimeoutWatcher ( self . _docker )
|
def asStructTime ( self , tzinfo = None ) :
"""Return this time represented as a time . struct _ time .
tzinfo is a datetime . tzinfo instance coresponding to the desired
timezone of the output . If is is the default None , UTC is assumed ."""
|
dtime = self . asDatetime ( tzinfo )
if tzinfo is None :
return dtime . utctimetuple ( )
else :
return dtime . timetuple ( )
|
def make_cand_plot ( d , im , data , loclabel , version = 2 , snrs = [ ] , outname = '' ) :
"""Builds a new candidate plot , distinct from the original plots produced by make _ cand _ plot .
Expects phased , dedispersed data ( cut out in time , dual - pol ) , image , and metadata
version 2 is the new one ( thanks to bridget andersen ) . version 1 is the initial one .
loclabel is used to label the plot with ( scan , segment , candint , dmind , dtind , beamnum ) .
snrs is array for an ( optional ) SNR histogram plot .
d are used to label the plots with useful information ."""
|
# given d , im , data , make plot
logger . info ( 'Plotting...' )
logger . debug ( '(image, data) shape: (%s, %s)' % ( str ( im . shape ) , str ( data . shape ) ) )
assert len ( loclabel ) == 6 , 'loclabel should have (scan, segment, candint, dmind, dtind, beamnum)'
scan , segment , candint , dmind , dtind , beamnum = loclabel
# calc source location
snrmin = im . min ( ) / im . std ( )
snrmax = im . max ( ) / im . std ( )
if snrmax > - 1 * snrmin :
l1 , m1 = rt . calc_lm ( d , im , minmax = 'max' )
snrobs = snrmax
else :
l1 , m1 = rt . calc_lm ( d , im , minmax = 'min' )
snrobs = snrmin
pt_ra , pt_dec = d [ 'radec' ]
src_ra , src_dec = source_location ( pt_ra , pt_dec , l1 , m1 )
logger . info ( 'Peak (RA, Dec): %s, %s' % ( src_ra , src_dec ) )
# convert l1 and m1 from radians to arcminutes
l1arcm = l1 * 180. * 60. / np . pi
m1arcm = m1 * 180. * 60. / np . pi
if version == 1 : # build plot
fig = plt . Figure ( figsize = ( 8.5 , 8 ) )
ax = fig . add_subplot ( 221 , axisbg = 'white' )
# add annotating info
ax . text ( 0.1 , 0.9 , d [ 'fileroot' ] , fontname = 'sans-serif' , transform = ax . transAxes )
ax . text ( 0.1 , 0.8 , 'sc %d, seg %d, int %d, DM %.1f, dt %d' % ( scan , segment , candint , d [ 'dmarr' ] [ dmind ] , d [ 'dtarr' ] [ dtind ] ) , fontname = 'sans-serif' , transform = ax . transAxes )
ax . text ( 0.1 , 0.7 , 'Peak: (' + str ( np . round ( l1 , 3 ) ) + ' ,' + str ( np . round ( m1 , 3 ) ) + '), SNR: ' + str ( np . round ( snrobs , 1 ) ) , fontname = 'sans-serif' , transform = ax . transAxes )
# plot dynamic spectra
left , width = 0.6 , 0.2
bottom , height = 0.2 , 0.7
rect_dynsp = [ left , bottom , width , height ]
rect_lc = [ left , bottom - 0.1 , width , 0.1 ]
rect_sp = [ left + width , bottom , 0.1 , height ]
ax_dynsp = fig . add_axes ( rect_dynsp )
ax_lc = fig . add_axes ( rect_lc )
ax_sp = fig . add_axes ( rect_sp )
spectra = np . swapaxes ( data . real , 0 , 1 )
# seems that latest pickle actually contains complex values in spectra . . .
dd = np . concatenate ( ( spectra [ ... , 0 ] , np . zeros_like ( spectra [ ... , 0 ] ) , spectra [ ... , 1 ] ) , axis = 1 )
# make array for display with white space between two pols
logger . debug ( '{0}' . format ( dd . shape ) )
impl = ax_dynsp . imshow ( dd , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( 'Greys' ) )
ax_dynsp . text ( 0.5 , 0.95 , 'RR LL' , horizontalalignment = 'center' , verticalalignment = 'center' , fontsize = 16 , color = 'w' , transform = ax_dynsp . transAxes )
ax_dynsp . set_yticks ( range ( 0 , len ( d [ 'freq' ] ) , 30 ) )
ax_dynsp . set_yticklabels ( d [ 'freq' ] [ : : 30 ] )
ax_dynsp . set_ylabel ( 'Freq (GHz)' )
ax_dynsp . set_xlabel ( 'Integration (rel)' )
spectrum = spectra [ : , len ( spectra [ 0 ] ) / 2 ] . mean ( axis = 1 )
# assume pulse in middle bin . get stokes I spectrum . * * this is wrong in a minority of cases . * *
ax_sp . plot ( spectrum , range ( len ( spectrum ) ) , 'k.' )
ax_sp . plot ( np . zeros ( len ( spectrum ) ) , range ( len ( spectrum ) ) , 'k:' )
ax_sp . set_ylim ( 0 , len ( spectrum ) )
ax_sp . set_yticklabels ( [ ] )
xmin , xmax = ax_sp . get_xlim ( )
ax_sp . set_xticks ( np . linspace ( xmin , xmax , 3 ) . round ( 2 ) )
ax_sp . set_xlabel ( 'Flux (Jy)' )
lc = dd . mean ( axis = 0 )
lenlc = len ( data )
# old ( stupid ) way : lenlc = np . where ( lc = = 0 ) [ 0 ] [ 0]
ax_lc . plot ( range ( 0 , lenlc ) + range ( 2 * lenlc , 3 * lenlc ) , list ( lc ) [ : lenlc ] + list ( lc ) [ - lenlc : ] , 'k.' )
ax_lc . plot ( range ( 0 , lenlc ) + range ( 2 * lenlc , 3 * lenlc ) , list ( np . zeros ( lenlc ) ) + list ( np . zeros ( lenlc ) ) , 'k:' )
ax_lc . set_xlabel ( 'Integration' )
ax_lc . set_ylabel ( 'Flux (Jy)' )
ax_lc . set_xticks ( [ 0 , 0.5 * lenlc , lenlc , 1.5 * lenlc , 2 * lenlc , 2.5 * lenlc , 3 * lenlc ] )
ax_lc . set_xticklabels ( [ '0' , str ( lenlc / 2 ) , str ( lenlc ) , '' , '0' , str ( lenlc / 2 ) , str ( lenlc ) ] )
ymin , ymax = ax_lc . get_ylim ( )
ax_lc . set_yticks ( np . linspace ( ymin , ymax , 3 ) . round ( 2 ) )
# image
ax = fig . add_subplot ( 223 )
fov = np . degrees ( 1. / d [ 'uvres' ] ) * 60.
logger . debug ( '{0}' . format ( im . shape ) )
impl = ax . imshow ( im . transpose ( ) , aspect = 'equal' , origin = 'upper' , interpolation = 'nearest' , extent = [ fov / 2 , - fov / 2 , - fov / 2 , fov / 2 ] , cmap = plt . get_cmap ( 'Greys' ) , vmin = 0 , vmax = 0.5 * im . max ( ) )
ax . set_xlabel ( 'RA Offset (arcmin)' )
ax . set_ylabel ( 'Dec Offset (arcmin)' )
elif version == 2 : # build overall plot
fig = plt . Figure ( figsize = ( 12.75 , 8 ) )
# add metadata in subfigure
ax = fig . add_subplot ( 2 , 3 , 1 , axisbg = 'white' )
# calculate the overall dispersion delay : dd
f1 = d [ 'freq_orig' ] [ 0 ]
f2 = d [ 'freq_orig' ] [ len ( d [ 'freq_orig' ] ) - 1 ]
dd = 4.15 * d [ 'dmarr' ] [ dmind ] * ( f1 ** ( - 2 ) - f2 ** ( - 2 ) )
# add annotating info
start = 1.1
# these values determine the spacing and location of the annotating information
space = 0.07
left = 0.0
ax . text ( left , start , d [ 'fileroot' ] , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - space , 'Peak (arcmin): (' + str ( np . round ( l1arcm , 3 ) ) + ', ' + str ( np . round ( m1arcm , 3 ) ) + ')' , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
# split the RA and Dec and display in a nice format
ra = src_ra . split ( )
dec = src_dec . split ( )
ax . text ( left , start - 2 * space , 'Peak (RA, Dec): (' + ra [ 0 ] + ':' + ra [ 1 ] + ':' + ra [ 2 ] [ 0 : 4 ] + ', ' + dec [ 0 ] + ':' + dec [ 1 ] + ':' + dec [ 2 ] [ 0 : 4 ] + ')' , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 3 * space , 'Source: ' + str ( d [ 'source' ] ) , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 4 * space , 'scan: ' + str ( scan ) , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 5 * space , 'segment: ' + str ( segment ) , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 6 * space , 'integration: ' + str ( candint ) , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 7 * space , 'DM = ' + str ( d [ 'dmarr' ] [ dmind ] ) + ' (index ' + str ( dmind ) + ')' , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 8 * space , 'dt = ' + str ( np . round ( d [ 'inttime' ] * d [ 'dtarr' ] [ dtind ] , 3 ) * 1e3 ) + ' ms' + ' (index ' + str ( dtind ) + ')' , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 9 * space , 'disp delay = ' + str ( np . round ( dd , 1 ) ) + ' ms' , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
ax . text ( left , start - 10 * space , 'SNR: ' + str ( np . round ( snrobs , 1 ) ) , fontname = 'sans-serif' , transform = ax . transAxes , fontsize = 'small' )
# set the plot invisible so that it doesn ' t interfere with annotations
ax . get_xaxis ( ) . set_visible ( False )
ax . get_yaxis ( ) . set_visible ( False )
ax . spines [ 'bottom' ] . set_color ( 'white' )
ax . spines [ 'top' ] . set_color ( 'white' )
ax . spines [ 'right' ] . set_color ( 'white' )
ax . spines [ 'left' ] . set_color ( 'white' )
# plot full dynamic spectra
left , width = 0.75 , 0.2 * 2. / 3.
bottom , height = 0.2 , 0.7
rect_dynsp1 = [ left , bottom , width / 3. , height ]
# three rectangles for each panel of the spectrum ( RR , RR + LL , LL )
rect_dynsp2 = [ left + width / 3. , bottom , width / 3. , height ]
rect_dynsp3 = [ left + 2. * width / 3. , bottom , width / 3. , height ]
rect_lc1 = [ left , bottom - 0.1 , width / 3. , 0.1 ]
rect_lc2 = [ left + width / 3. , bottom - 0.1 , width / 3. , 0.1 ]
rect_lc3 = [ left + 2. * width / 3. , bottom - 0.1 , width / 3. , 0.1 ]
rect_sp = [ left + width , bottom , 0.1 * 2. / 3. , height ]
ax_dynsp1 = fig . add_axes ( rect_dynsp1 )
ax_dynsp2 = fig . add_axes ( rect_dynsp2 , sharey = ax_dynsp1 )
# sharey so that axes line up
ax_dynsp3 = fig . add_axes ( rect_dynsp3 , sharey = ax_dynsp1 )
# make RR + LL and LL dynamic spectra y labels invisible so they don ' t interfere with the plots
[ label . set_visible ( False ) for label in ax_dynsp2 . get_yticklabels ( ) ]
[ label . set_visible ( False ) for label in ax_dynsp3 . get_yticklabels ( ) ]
ax_sp = fig . add_axes ( rect_sp , sharey = ax_dynsp3 )
[ label . set_visible ( False ) for label in ax_sp . get_yticklabels ( ) ]
ax_lc1 = fig . add_axes ( rect_lc1 )
ax_lc2 = fig . add_axes ( rect_lc2 , sharey = ax_lc1 )
ax_lc3 = fig . add_axes ( rect_lc3 , sharey = ax_lc1 )
[ label . set_visible ( False ) for label in ax_lc2 . get_yticklabels ( ) ]
[ label . set_visible ( False ) for label in ax_lc3 . get_yticklabels ( ) ]
# now actually plot the data
spectra = np . swapaxes ( data . real , 0 , 1 )
dd1 = spectra [ ... , 0 ]
dd2 = spectra [ ... , 0 ] + spectra [ ... , 1 ]
dd3 = spectra [ ... , 1 ]
colormap = 'viridis'
logger . debug ( '{0}' . format ( dd1 . shape ) )
logger . debug ( '{0}' . format ( dd2 . shape ) )
logger . debug ( '{0}' . format ( dd3 . shape ) )
impl1 = ax_dynsp1 . imshow ( dd1 , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
impl2 = ax_dynsp2 . imshow ( dd2 , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
impl3 = ax_dynsp3 . imshow ( dd3 , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
ax_dynsp1 . set_yticks ( range ( 0 , len ( d [ 'freq' ] ) , 30 ) )
ax_dynsp1 . set_yticklabels ( d [ 'freq' ] [ : : 30 ] )
ax_dynsp1 . set_ylabel ( 'Freq (GHz)' )
ax_dynsp1 . set_xlabel ( 'RR' )
ax_dynsp1 . xaxis . set_label_position ( 'top' )
ax_dynsp2 . set_xlabel ( 'RR+LL' )
ax_dynsp2 . xaxis . set_label_position ( 'top' )
ax_dynsp3 . set_xlabel ( 'LL' )
ax_dynsp3 . xaxis . set_label_position ( 'top' )
[ label . set_visible ( False ) for label in ax_dynsp1 . get_xticklabels ( ) ]
# set xlabels invisible so that they don ' t interefere with lc plots
ax_dynsp1 . get_yticklabels ( ) [ 0 ] . set_visible ( False )
# This one y label was getting in the way
# plot stokes I spectrum of the candidate pulse ( assume middle bin )
spectrum = spectra [ : , len ( spectra [ 0 ] ) / 2 ] . mean ( axis = 1 )
# select stokes I middle bin
ax_sp . plot ( spectrum , range ( len ( spectrum ) ) , 'k.' )
ax_sp . plot ( np . zeros ( len ( spectrum ) ) , range ( len ( spectrum ) ) , 'r:' )
# plot 0 Jy dotted line
xmin , xmax = ax_sp . get_xlim ( )
ax_sp . set_xticks ( np . linspace ( xmin , xmax , 3 ) . round ( 2 ) )
ax_sp . set_xlabel ( 'Flux (Jy)' )
# plot mean flux values for each time bin
lc1 = dd1 . mean ( axis = 0 )
lc2 = dd2 . mean ( axis = 0 )
lc3 = dd3 . mean ( axis = 0 )
lenlc = len ( data )
ax_lc1 . plot ( range ( 0 , lenlc ) , list ( lc1 ) [ : lenlc ] , 'k.' )
ax_lc2 . plot ( range ( 0 , lenlc ) , list ( lc2 ) [ : lenlc ] , 'k.' )
ax_lc3 . plot ( range ( 0 , lenlc ) , list ( lc3 ) [ : lenlc ] , 'k.' )
ax_lc1 . plot ( range ( 0 , lenlc ) , list ( np . zeros ( lenlc ) ) , 'r:' )
# plot 0 Jy dotted line for each plot
ax_lc2 . plot ( range ( 0 , lenlc ) , list ( np . zeros ( lenlc ) ) , 'r:' )
ax_lc3 . plot ( range ( 0 , lenlc ) , list ( np . zeros ( lenlc ) ) , 'r:' )
ax_lc2 . set_xlabel ( 'Integration (rel)' )
ax_lc1 . set_ylabel ( 'Flux (Jy)' )
ax_lc1 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_lc1 . set_xticklabels ( [ '0' , str ( lenlc / 2 ) , str ( lenlc ) ] )
# note I chose to only show the ' 0 ' label for one of the plots to avoid messy overlap
ax_lc2 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_lc2 . set_xticklabels ( [ '' , str ( lenlc / 2 ) , str ( lenlc ) ] )
ax_lc3 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_lc3 . set_xticklabels ( [ '' , str ( lenlc / 2 ) , str ( lenlc ) ] )
ymin , ymax = ax_lc1 . get_ylim ( )
ax_lc1 . set_yticks ( np . linspace ( ymin , ymax , 3 ) . round ( 2 ) )
# readjust the x tick marks on the dynamic spectra so that they line up with the lc plots
ax_dynsp1 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_dynsp2 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_dynsp3 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
# plot second set of dynamic spectra ( averaged across frequency bins to get SNR = 2 for the detected candidate )
left , width = 0.45 , 0.1333
bottom , height = 0.1 , 0.4
rect_dynsp1 = [ left , bottom , width / 3. , height ]
rect_dynsp2 = [ left + width / 3. , bottom , width / 3. , height ]
rect_dynsp3 = [ left + 2. * width / 3. , bottom , width / 3. , height ]
rect_sp = [ left + width , bottom , 0.1 * 2. / 3. , height ]
ax_dynsp1 = fig . add_axes ( rect_dynsp1 )
ax_dynsp2 = fig . add_axes ( rect_dynsp2 , sharey = ax_dynsp1 )
ax_dynsp3 = fig . add_axes ( rect_dynsp3 , sharey = ax_dynsp1 )
# make RR + LL and LL dynamic spectra y labels invisible so they don ' t interfere with the plots
[ label . set_visible ( False ) for label in ax_dynsp2 . get_yticklabels ( ) ]
[ label . set_visible ( False ) for label in ax_dynsp3 . get_yticklabels ( ) ]
ax_sp = fig . add_axes ( rect_sp , sharey = ax_dynsp3 )
[ label . set_visible ( False ) for label in ax_sp . get_yticklabels ( ) ]
# calculate the number of frequency rows to average together ( make the plot have an SNR of 2)
n = int ( ( 2. * ( len ( spectra ) ) ** 0.5 / snrobs ) ** 2 )
if n == 0 : # if n = = 0 then don ' t average any ( avoids errors for modding and dividing by 0)
dd1avg = dd1
dd3avg = dd3
else : # otherwise , add zeros onto the data so that it ' s length is cleanly divisible by n ( makes it easier to average over )
dd1zerotemp = np . concatenate ( ( np . zeros ( ( n - len ( spectra ) % n , len ( spectra [ 0 ] ) ) , dtype = dd1 . dtype ) , dd1 ) , axis = 0 )
dd3zerotemp = np . concatenate ( ( np . zeros ( ( n - len ( spectra ) % n , len ( spectra [ 0 ] ) ) , dtype = dd3 . dtype ) , dd3 ) , axis = 0 )
# make them masked arrays so that the appended zeros do not affect average calculation
zeros = np . zeros ( ( len ( dd1 ) , len ( dd1 [ 0 ] ) ) )
ones = np . ones ( ( n - len ( spectra ) % n , len ( dd1 [ 0 ] ) ) )
masktemp = np . concatenate ( ( ones , zeros ) , axis = 0 )
dd1zero = ma . masked_array ( dd1zerotemp , mask = masktemp )
dd3zero = ma . masked_array ( dd3zerotemp , mask = masktemp )
# average together the data
dd1avg = np . array ( [ ] , dtype = dd1 . dtype )
for i in range ( len ( spectra [ 0 ] ) ) :
temp = dd1zero [ : , i ] . reshape ( - 1 , n )
tempavg = np . reshape ( np . mean ( temp , axis = 1 ) , ( len ( temp ) , 1 ) )
temprep = np . repeat ( tempavg , n , axis = 0 )
# repeats the mean values to create more pixels ( easier to properly crop when it is finally displayed )
if i == 0 :
dd1avg = temprep
else :
dd1avg = np . concatenate ( ( dd1avg , temprep ) , axis = 1 )
dd3avg = np . array ( [ ] , dtype = dd3 . dtype )
for i in range ( len ( spectra [ 0 ] ) ) :
temp = dd3zero [ : , i ] . reshape ( - 1 , n )
tempavg = np . reshape ( np . mean ( temp , axis = 1 ) , ( len ( temp ) , 1 ) )
temprep = np . repeat ( tempavg , n , axis = 0 )
if i == 0 :
dd3avg = temprep
else :
dd3avg = np . concatenate ( ( dd3avg , temprep ) , axis = 1 )
dd2avg = dd1avg + dd3avg
# add together to get averaged RR + LL spectrum
colormap = 'viridis'
if n == 0 : # again , if n = = 0 then don ' t crop the spectra because no zeroes were appended
dd1avgcrop = dd1avg
dd2avgcrop = dd2avg
dd3avgcrop = dd3avg
else : # otherwise , crop off the appended zeroes
dd1avgcrop = dd1avg [ len ( ones ) : len ( dd1avg ) , : ]
dd2avgcrop = dd2avg [ len ( ones ) : len ( dd2avg ) , : ]
dd3avgcrop = dd3avg [ len ( ones ) : len ( dd3avg ) , : ]
logger . debug ( '{0}' . format ( dd1avgcrop . shape ) )
logger . debug ( '{0}' . format ( dd2avgcrop . shape ) )
logger . debug ( '{0}' . format ( dd3avgcrop . shape ) )
impl1 = ax_dynsp1 . imshow ( dd1avgcrop , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
impl2 = ax_dynsp2 . imshow ( dd2avgcrop , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
impl3 = ax_dynsp3 . imshow ( dd3avgcrop , origin = 'lower' , interpolation = 'nearest' , aspect = 'auto' , cmap = plt . get_cmap ( colormap ) )
ax_dynsp1 . set_yticks ( range ( 0 , len ( d [ 'freq' ] ) , 30 ) )
ax_dynsp1 . set_yticklabels ( d [ 'freq' ] [ : : 30 ] )
ax_dynsp1 . set_ylabel ( 'Freq (GHz)' )
ax_dynsp1 . set_xlabel ( 'RR' )
ax_dynsp1 . xaxis . set_label_position ( 'top' )
ax_dynsp2 . set_xlabel ( 'Integration (rel)' )
ax2 = ax_dynsp2 . twiny ( )
ax2 . set_xlabel ( 'RR+LL' )
[ label . set_visible ( False ) for label in ax2 . get_xticklabels ( ) ]
ax_dynsp3 . set_xlabel ( 'LL' )
ax_dynsp3 . xaxis . set_label_position ( 'top' )
# plot stokes I spectrum of the candidate pulse in the averaged data ( assume middle bin )
ax_sp . plot ( dd2avgcrop [ : , len ( dd2avgcrop [ 0 ] ) / 2 ] / 2. , range ( len ( dd2avgcrop ) ) , 'k.' )
ax_sp . plot ( np . zeros ( len ( dd2avgcrop ) ) , range ( len ( dd2avgcrop ) ) , 'r:' )
xmin , xmax = ax_sp . get_xlim ( )
ax_sp . set_xticks ( np . linspace ( xmin , xmax , 3 ) . round ( 2 ) )
ax_sp . get_xticklabels ( ) [ 0 ] . set_visible ( False )
ax_sp . set_xlabel ( 'Flux (Jy)' )
# readjust the x tick marks on the dynamic spectra
ax_dynsp1 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_dynsp1 . set_xticklabels ( [ '0' , str ( lenlc / 2 ) , str ( lenlc ) ] )
ax_dynsp2 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_dynsp2 . set_xticklabels ( [ '' , str ( lenlc / 2 ) , str ( lenlc ) ] )
ax_dynsp3 . set_xticks ( [ 0 , 0.5 * lenlc , lenlc ] )
ax_dynsp3 . set_xticklabels ( [ '' , str ( lenlc / 2 ) , str ( lenlc ) ] )
# plot the image and zoomed cutout
ax = fig . add_subplot ( 2 , 3 , 4 )
fov = np . degrees ( 1. / d [ 'uvres' ] ) * 60.
impl = ax . imshow ( im . transpose ( ) , aspect = 'equal' , origin = 'upper' , interpolation = 'nearest' , extent = [ fov / 2 , - fov / 2 , - fov / 2 , fov / 2 ] , cmap = plt . get_cmap ( 'viridis' ) , vmin = 0 , vmax = 0.5 * im . max ( ) )
ax . set_xlabel ( 'RA Offset (arcmin)' )
ax . set_ylabel ( 'Dec Offset (arcmin)' )
ax . autoscale ( False )
# to stop the plot from autoscaling when we plot the triangles that label the location
# add markers on the axes to indicate the measured position of the candidate
ax . scatter ( x = [ l1arcm ] , y = [ - fov / 2 ] , c = '#ffff00' , s = 60 , marker = '^' , clip_on = False )
ax . scatter ( x = [ fov / 2 ] , y = [ m1arcm ] , c = '#ffff00' , s = 60 , marker = '>' , clip_on = False )
ax . set_frame_on ( False )
# makes it so the axis does not intersect the location triangles ( for cosmetic reasons )
# add a zoomed cutout image of the candidate ( set width at 5 * synthesized beam )
key = d [ 'vrange' ] . keys ( ) [ 0 ]
umax = d [ 'urange' ] [ key ]
vmax = d [ 'vrange' ] [ key ]
uvdist = ( umax ** 2 + vmax ** 2 ) ** 0.5
sbeam = np . degrees ( d [ 'uvoversample' ] / uvdist ) * 60.
# calculate synthesized beam in arcminutes
# figure out the location to center the zoomed image on
xratio = len ( im [ 0 ] ) / fov
# pix / arcmin
yratio = len ( im ) / fov
# pix / arcmin
mult = 5
# sets how many times the synthesized beam the zoomed FOV is
xmin = max ( 0 , int ( len ( im [ 0 ] ) / 2 - ( m1arcm + sbeam * mult ) * xratio ) )
xmax = int ( len ( im [ 0 ] ) / 2 - ( m1arcm - sbeam * mult ) * xratio )
ymin = max ( 0 , int ( len ( im ) / 2 - ( l1arcm + sbeam * mult ) * yratio ) )
ymax = int ( len ( im ) / 2 - ( l1arcm - sbeam * mult ) * yratio )
left , width = 0.231 , 0.15
bottom , height = 0.465 , 0.15
rect_imcrop = [ left , bottom , width , height ]
ax_imcrop = fig . add_axes ( rect_imcrop )
logger . debug ( '{0}' . format ( im . transpose ( ) [ xmin : xmax , ymin : ymax ] . shape ) )
logger . debug ( '{0} {1} {2} {3}' . format ( xmin , xmax , ymin , ymax ) )
impl = ax_imcrop . imshow ( im . transpose ( ) [ xmin : xmax , ymin : ymax ] , aspect = 1 , origin = 'upper' , interpolation = 'nearest' , extent = [ - 1 , 1 , - 1 , 1 ] , cmap = plt . get_cmap ( 'viridis' ) , vmin = 0 , vmax = 0.5 * im . max ( ) )
# setup the axes
ax_imcrop . set_ylabel ( 'Dec (arcmin)' )
ax_imcrop . set_xlabel ( 'RA (arcmin)' )
ax_imcrop . xaxis . set_label_position ( 'top' )
ax_imcrop . xaxis . tick_top ( )
xlabels = [ str ( np . round ( l1arcm + sbeam * mult / 2 , 1 ) ) , '' , str ( np . round ( l1arcm , 1 ) ) , '' , str ( np . round ( l1arcm - sbeam * mult / 2 , 1 ) ) ]
ylabels = [ str ( np . round ( m1arcm - sbeam * mult / 2 , 1 ) ) , '' , str ( np . round ( m1arcm , 1 ) ) , '' , str ( np . round ( m1arcm + sbeam * mult / 2 , 1 ) ) ]
ax_imcrop . set_xticklabels ( xlabels )
ax_imcrop . set_yticklabels ( ylabels )
# change axis label location of inset so it doesn ' t interfere with the full picture
ax_imcrop . get_yticklabels ( ) [ 0 ] . set_verticalalignment ( 'bottom' )
# create SNR versus N histogram for the whole observation ( properties for each candidate in the observation given by prop )
if len ( snrs ) :
left , width = 0.45 , 0.2
bottom , height = 0.6 , 0.3
rect_snr = [ left , bottom , width , height ]
ax_snr = fig . add_axes ( rect_snr )
pos_snrs = snrs [ snrs >= 0 ]
neg_snrs = snrs [ snrs < 0 ]
if not len ( neg_snrs ) : # if working with subset and only positive snrs
neg_snrs = pos_snrs
nonegs = True
else :
nonegs = False
minval = 5.5
maxval = 8.0
# determine the min and max values of the x axis
if min ( pos_snrs ) < min ( np . abs ( neg_snrs ) ) :
minval = min ( pos_snrs )
else :
minval = min ( np . abs ( neg_snrs ) )
if max ( pos_snrs ) > max ( np . abs ( neg_snrs ) ) :
maxval = max ( pos_snrs )
else :
maxval = max ( np . abs ( neg_snrs ) )
# positive SNR bins are in blue
# absolute values of negative SNR bins are taken and plotted as red x ' s on top of positive blue bins for compactness
n , b , patches = ax_snr . hist ( pos_snrs , 50 , ( minval , maxval ) , facecolor = 'blue' , zorder = 1 )
vals , bin_edges = np . histogram ( np . abs ( neg_snrs ) , 50 , ( minval , maxval ) )
bins = np . array ( [ ( bin_edges [ i ] + bin_edges [ i + 1 ] ) / 2. for i in range ( len ( vals ) ) ] )
vals = np . array ( vals )
if not nonegs :
ax_snr . scatter ( bins [ vals > 0 ] , vals [ vals > 0 ] , marker = 'x' , c = 'orangered' , alpha = 1.0 , zorder = 2 )
ax_snr . set_xlabel ( 'SNR' )
ax_snr . set_xlim ( left = minval - 0.2 )
ax_snr . set_xlim ( right = maxval + 0.2 )
ax_snr . set_ylabel ( 'N' )
ax_snr . set_yscale ( 'log' )
# draw vertical line where the candidate SNR is
ax_snr . axvline ( x = np . abs ( snrobs ) , linewidth = 1 , color = 'y' , alpha = 0.7 )
else :
logger . warn ( 'make_cand_plot version not recognized.' )
if not outname :
outname = os . path . join ( d [ 'workdir' ] , 'cands_{}_sc{}-seg{}-i{}-dm{}-dt{}.png' . format ( d [ 'fileroot' ] , scan , segment , candint , dmind , dtind ) )
try :
canvas = FigureCanvasAgg ( fig )
canvas . print_figure ( outname )
except ValueError :
logger . warn ( 'Could not write figure to %s' % outname )
|
def command_u2k ( string , vargs ) :
"""Print the Kirshenbaum ASCII string corresponding to the given Unicode IPA string .
: param str string : the string to act upon
: param dict vargs : the command line arguments"""
|
try :
l = KirshenbaumMapper ( ) . map_unicode_string ( unicode_string = string , ignore = vargs [ "ignore" ] , single_char_parsing = vargs [ "single_char_parsing" ] , return_as_list = True )
print ( vargs [ "separator" ] . join ( l ) )
except ValueError as exc :
print_error ( str ( exc ) )
|
def can_patch ( self , filename ) :
"""Check if specified filename can be patched . Returns None if file can
not be found among source filenames . False if patch can not be applied
clearly . True otherwise .
: returns : True , False or None"""
|
filename = abspath ( filename )
for p in self . items :
if filename == abspath ( p . source ) :
return self . _match_file_hunks ( filename , p . hunks )
return None
|
def _convert_to_folder ( self , packages ) :
"""Silverstripe ' s page contains a list of composer packages . This
function converts those to folder names . These may be different due
to installer - name .
Implemented exponential backoff in order to prevent packager from
being overly sensitive about the number of requests I was making .
@ see : https : / / github . com / composer / installers # custom - install - names
@ see : https : / / github . com / richardsjoqvist / silverstripe - localdate / issues / 7"""
|
url = 'http://packagist.org/p/%s.json'
with ThreadPoolExecutor ( max_workers = 12 ) as executor :
futures = [ ]
for package in packages :
future = executor . submit ( self . _get , url , package )
futures . append ( { 'future' : future , 'package' : package } )
folders = [ ]
for i , future in enumerate ( futures , start = 1 ) :
r = future [ 'future' ] . result ( )
package = future [ 'package' ]
if not 'installer-name' in r . text :
folder_name = package . split ( '/' ) [ 1 ]
else :
splat = list ( filter ( None , re . split ( r'[^a-zA-Z0-9-_.,]' , r . text ) ) )
folder_name = splat [ splat . index ( 'installer-name' ) + 1 ]
if not folder_name in folders :
folders . append ( folder_name )
else :
print ( "Folder %s is duplicated (current %s, previous %s)" % ( folder_name , package , folders . index ( folder_name ) ) )
if i % 25 == 0 :
print ( "Done %s." % i )
return folders
|
def dump_options_header ( header , options ) :
"""The reverse function to : func : ` parse _ options _ header ` .
: param header : the header to dump
: param options : a dict of options to append ."""
|
segments = [ ]
if header is not None :
segments . append ( header )
for key , value in iteritems ( options ) :
if value is None :
segments . append ( key )
else :
segments . append ( "%s=%s" % ( key , quote_header_value ( value ) ) )
return "; " . join ( segments )
|
def _log_players ( self , players ) :
""": param players : list of catan . game . Player objects"""
|
self . _logln ( 'players: {0}' . format ( len ( players ) ) )
for p in self . _players :
self . _logln ( 'name: {0}, color: {1}, seat: {2}' . format ( p . name , p . color , p . seat ) )
|
def get_url ( width , height = None , background_color = "cccccc" , text_color = "969696" , text = None , random_background_color = False ) :
"""Craft the URL for a placeholder image .
You can customize the background color , text color and text using
the optional keyword arguments
If you want to use a random color pass in random _ background _ color as True ."""
|
if random_background_color :
background_color = _get_random_color ( )
# If height is not provided , presume it is will be a square
if not height :
height = width
d = dict ( width = width , height = height , bcolor = background_color , tcolor = text_color )
url = URL % d
if text :
text = text . replace ( " " , "+" )
url = url + "?text=" + text
return url
|
def validate_character_instance_valid_for_arc ( sender , instance , action , reverse , pk_set , * args , ** kwargs ) :
'''Evaluate attempts to assign a character instance to ensure it is from same
outline .'''
|
if action == 'pre_add' :
if reverse : # Fetch arc definition through link .
for apk in pk_set :
arc_node = ArcElementNode . objects . get ( pk = apk )
if arc_node . parent_outline != instance . outline :
raise IntegrityError ( _ ( 'Character Instance and Arc Element must be from same outline.' ) )
else :
for cpk in pk_set :
char_instance = CharacterInstance . objects . get ( pk = cpk )
if char_instance . outline != instance . parent_outline :
raise IntegrityError ( _ ( 'Character Instance and Arc Element must be from the same outline.' ) )
|
def fire_event ( self , event_name , wait = False , * args , ** kwargs ) :
"""Fire an event to plugins .
PluginManager schedule @ asyncio . coroutinecalls for each plugin on method called " on _ " + event _ name
For example , on _ connect will be called on event ' connect '
Method calls are schedule in the asyn loop . wait parameter must be set to true to wait until all
mehtods are completed .
: param event _ name :
: param args :
: param kwargs :
: param wait : indicates if fire _ event should wait for plugin calls completion ( True ) , or not
: return :"""
|
tasks = [ ]
event_method_name = "on_" + event_name
for plugin in self . _plugins :
event_method = getattr ( plugin . object , event_method_name , None )
if event_method :
try :
task = self . _schedule_coro ( event_method ( * args , ** kwargs ) )
tasks . append ( task )
def clean_fired_events ( future ) :
try :
self . _fired_events . remove ( task )
except ( KeyError , ValueError ) :
pass
task . add_done_callback ( clean_fired_events )
except AssertionError :
self . logger . error ( "Method '%s' on plugin '%s' is not a coroutine" % ( event_method_name , plugin . name ) )
self . _fired_events . extend ( tasks )
if wait :
if tasks :
yield from asyncio . wait ( tasks , loop = self . _loop )
|
def sink_create ( self , project , sink_name , filter_ , destination , unique_writer_identity = False ) :
"""API call : create a sink resource .
See
https : / / cloud . google . com / logging / docs / reference / v2 / rest / v2 / projects . sinks / create
: type project : str
: param project : ID of the project in which to create the sink .
: type sink _ name : str
: param sink _ name : the name of the sink
: type filter _ : str
: param filter _ : the advanced logs filter expression defining the
entries exported by the sink .
: type destination : str
: param destination : destination URI for the entries exported by
the sink .
: type unique _ writer _ identity : bool
: param unique _ writer _ identity : ( Optional ) determines the kind of
IAM identity returned as
writer _ identity in the new sink .
: rtype : dict
: returns : The sink resource returned from the API ( converted from a
protobuf to a dictionary ) ."""
|
parent = "projects/%s" % ( project , )
sink_pb = LogSink ( name = sink_name , filter = filter_ , destination = destination )
created_pb = self . _gapic_api . create_sink ( parent , sink_pb , unique_writer_identity = unique_writer_identity )
return MessageToDict ( created_pb )
|
def Partial ( func , ** kwargs ) :
"""Allows the use of partially applied functions in the
configuration ."""
|
if isinstance ( func , str ) :
func = resolve_dotted_name ( func )
partial_func = partial ( func , ** kwargs )
update_wrapper ( partial_func , func )
return partial_func
|
def download ( queries , user = None , pwd = None , email = None , pred_type = 'and' ) :
"""Spin up a download request for GBIF occurrence data .
: param queries : One or more of query arguments to kick of a download job .
See Details .
: type queries : str or list
: param pred _ type : ( character ) One of ` ` equals ` ` ( ` ` = ` ` ) , ` ` and ` ` ( ` ` & ` ` ) ,
` or ` ` ( ` ` | ` ` ) , ` ` lessThan ` ` ( ` ` < ` ` ) , ` ` lessThanOrEquals ` ` ( ` ` < = ` ` ) ,
` ` greaterThan ` ` ( ` ` > ` ` ) , ` ` greaterThanOrEquals ` ` ( ` ` > = ` ` ) ,
` ` in ` ` , ` ` within ` ` , ` ` not ` ` ( ` ` ! ` ` ) , ` ` like ` `
: param user : ( character ) User name within GBIF ' s website .
Required . Set in your env vars with the option ` ` GBIF _ USER ` `
: param pwd : ( character ) User password within GBIF ' s website . Required .
Set in your env vars with the option ` ` GBIF _ PWD ` `
: param email : ( character ) Email address to recieve download notice done
email . Required . Set in your env vars with the option ` ` GBIF _ EMAIL ` `
Argument passed have to be passed as character ( e . g . , ` ` country = US ` ` ) ,
with a space between key ( ` ` country ` ` ) , operator ( ` ` = ` ` ) , and value ( ` ` US ` ` ) .
See the ` ` type ` ` parameter for possible options for the operator .
This character string is parsed internally .
Acceptable arguments to ` ` . . . ` ` ( args ) are :
- taxonKey = ` ` TAXON _ KEY ` `
- scientificName = ` ` SCIENTIFIC _ NAME ` `
- country = ` ` COUNTRY ` `
- publishingCountry = ` ` PUBLISHING _ COUNTRY ` `
- hasCoordinate = ` ` HAS _ COORDINATE ` `
- hasGeospatialIssue = ` ` HAS _ GEOSPATIAL _ ISSUE ` `
- typeStatus = ` ` TYPE _ STATUS ` `
- recordNumber = ` ` RECORD _ NUMBER ` `
- lastInterpreted = ` ` LAST _ INTERPRETED ` `
- continent = ` ` CONTINENT ` `
- geometry = ` ` GEOMETRY ` `
- basisOfRecord = ` ` BASIS _ OF _ RECORD ` `
- datasetKey = ` ` DATASET _ KEY ` `
- eventDate = ` ` EVENT _ DATE ` `
- catalogNumber = ` ` CATALOG _ NUMBER ` `
- year = ` ` YEAR ` `
- month = ` ` MONTH ` `
- decimalLatitude = ` ` DECIMAL _ LATITUDE ` `
- decimalLongitude = ` ` DECIMAL _ LONGITUDE ` `
- elevation = ` ` ELEVATION ` `
- depth = ` ` DEPTH ` `
- institutionCode = ` ` INSTITUTION _ CODE ` `
- collectionCode = ` ` COLLECTION _ CODE ` `
- issue = ` ` ISSUE ` `
- mediatype = ` ` MEDIA _ TYPE ` `
- recordedBy = ` ` RECORDED _ BY ` `
- repatriated = ` ` REPATRIATED ` `
See the API docs http : / / www . gbif . org / developer / occurrence # download
for more info , and the predicates docs
http : / / www . gbif . org / developer / occurrence # predicates
GBIF has a limit of 12,000 characters for download queries - so
if you ' re download request is really , really long and complex ,
consider breaking it up into multiple requests by one factor or
another .
: return : A dictionary , of results
Usage : :
from pygbif import occurrences as occ
occ . download ( ' basisOfRecord = LITERATURE ' )
occ . download ( ' taxonKey = 3119195 ' )
occ . download ( ' decimalLatitude > 50 ' )
occ . download ( ' elevation > = 9000 ' )
occ . download ( ' decimalLatitude > = 65 ' )
occ . download ( ' country = US ' )
occ . download ( ' institutionCode = TLMF ' )
occ . download ( ' catalogNumber = Bird . 27847588 ' )
res = occ . download ( [ ' taxonKey = 7264332 ' , ' hasCoordinate = TRUE ' ] )
# pass output to download _ meta for more information
occ . download _ meta ( occ . download ( ' decimalLatitude > 75 ' ) )
# Multiple queries
gg = occ . download ( [ ' decimalLatitude > = 65 ' ,
' decimalLatitude < = - 65 ' ] , type = ' or ' )
gg = occ . download ( [ ' depth = 80 ' , ' taxonKey = 2343454 ' ] ,
type = ' or ' )
# Repratriated data for Costa Rica
occ . download ( [ ' country = CR ' , ' repatriated = true ' ] )"""
|
user = _check_environ ( 'GBIF_USER' , user )
pwd = _check_environ ( 'GBIF_PWD' , pwd )
email = _check_environ ( 'GBIF_EMAIL' , email )
if isinstance ( queries , str ) :
queries = [ queries ]
keyval = [ _parse_args ( z ) for z in queries ]
# USE GBIFDownload class to set up the predicates
req = GbifDownload ( user , email )
req . main_pred_type = pred_type
for predicate in keyval :
req . add_predicate ( predicate [ 'key' ] , predicate [ 'value' ] , predicate [ 'type' ] )
out = req . post_download ( user , pwd )
return out , req . payload
|
def url ( self ) -> str :
"""path + query 的url"""
|
url_str = self . parse_url . path or ""
if self . parse_url . querystring is not None :
url_str += "?" + self . parse_url . querystring
return url_str
|
def play ( quiet , session_file , shell , speed , prompt , commentecho ) :
"""Play a session file ."""
|
run ( session_file . readlines ( ) , shell = shell , speed = speed , quiet = quiet , test_mode = TESTING , prompt_template = prompt , commentecho = commentecho , )
|
def apply ( self , data ) :
"""Applies calibration solution to data array . Assumes structure of ( nint , nbl , nch , npol ) ."""
|
# find best skyfreq for each channel
skyfreqs = n . unique ( self . skyfreq [ self . select ] )
# one per spw
nch_tot = len ( self . freqs )
chan_bandnum = [ range ( nch_tot * i / len ( skyfreqs ) , nch_tot * ( i + 1 ) / len ( skyfreqs ) ) for i in range ( len ( skyfreqs ) ) ]
# divide chans by number of spw in solution
self . logger . info ( 'Solutions for %d spw: (%s)' % ( len ( skyfreqs ) , skyfreqs ) )
for j in range ( len ( skyfreqs ) ) :
skyfreq = skyfreqs [ j ]
chans = chan_bandnum [ j ]
self . logger . info ( 'Applying gain solution for chans from %d-%d' % ( chans [ 0 ] , chans [ - 1 ] ) )
# define freq structure to apply delay solution
nch = len ( chans )
chanref = nch / 2
# reference channel at center
relfreq = self . chansize * ( n . arange ( nch ) - chanref )
# relative frequency
for i in range ( len ( self . blarr ) ) :
ant1 , ant2 = self . blarr [ i ]
# ant numbers ( 1 - based )
for pol in self . polind : # apply gain correction
invg1g2 = self . calcgain ( ant1 , ant2 , skyfreq , pol )
data [ : , i , chans , pol - self . polind [ 0 ] ] = data [ : , i , chans , pol - self . polind [ 0 ] ] * invg1g2
# hack : lousy data pol indexing
# apply delay correction
d1d2 = self . calcdelay ( ant1 , ant2 , skyfreq , pol )
delayrot = 2 * n . pi * ( d1d2 [ 0 ] * 1e-9 ) * relfreq
# phase to rotate across band
data [ : , i , chans , pol - self . polind [ 0 ] ] = data [ : , i , chans , pol - self . polind [ 0 ] ] * n . exp ( - 1j * delayrot [ None , None , : ] )
|
def is_label_dataframe ( label , df ) :
"""check column label existance"""
|
setdiff = set ( label ) - set ( df . columns . tolist ( ) )
if len ( setdiff ) == 0 :
return True
else :
return False
|
def flush_all ( self , time ) :
"""Send a command to server flush | delete all keys .
: param time : Time to wait until flush in seconds .
: type time : int
: return : True in case of success , False in case of failure
: rtype : bool"""
|
logger . info ( 'Flushing memcached' )
self . _send ( struct . pack ( self . HEADER_STRUCT + self . COMMANDS [ 'flush' ] [ 'struct' ] , self . MAGIC [ 'request' ] , self . COMMANDS [ 'flush' ] [ 'command' ] , 0 , 4 , 0 , 0 , 4 , 0 , 0 , time ) )
( magic , opcode , keylen , extlen , datatype , status , bodylen , opaque , cas , extra_content ) = self . _get_response ( )
if status not in ( self . STATUS [ 'success' ] , self . STATUS [ 'server_disconnected' ] ) :
raise MemcachedException ( 'Code: %d message: %s' % ( status , extra_content ) , status )
logger . debug ( 'Memcached flushed' )
return True
|
def translify ( in_string , strict = True ) :
"""Translify russian text
@ param in _ string : input string
@ type in _ string : C { unicode }
@ param strict : raise error if transliteration is incomplete .
( True by default )
@ type strict : C { bool }
@ return : transliterated string
@ rtype : C { str }
@ raise ValueError : when string doesn ' t transliterate completely .
Raised only if strict = True"""
|
translit = in_string
for symb_in , symb_out in TRANSTABLE :
translit = translit . replace ( symb_in , symb_out )
if strict and any ( ord ( symb ) > 128 for symb in translit ) :
raise ValueError ( "Unicode string doesn't transliterate completely, " + "is it russian?" )
return translit
|
def min ( self ) :
"""Return the minimum value in this histogram .
If there are no values in the histogram at all , return 10.
Returns :
int : The minimum value in the histogram ."""
|
if len ( self . _data ) == 0 :
return 10
return next ( iter ( sorted ( self . _data . keys ( ) ) ) )
|
def variant_filtration ( call_file , ref_file , vrn_files , data , items ) :
"""Filter variant calls using Variant Quality Score Recalibration .
Newer GATK with Haplotype calling has combined SNP / indel filtering ."""
|
caller = data [ "config" ] [ "algorithm" ] . get ( "variantcaller" )
if "gvcf" not in dd . get_tools_on ( data ) :
call_file = ploidy . filter_vcf_by_sex ( call_file , items )
if caller in [ "freebayes" ] :
return vfilter . freebayes ( call_file , ref_file , vrn_files , data )
elif caller in [ "platypus" ] :
return vfilter . platypus ( call_file , data )
elif caller in [ "samtools" ] :
return vfilter . samtools ( call_file , data )
elif caller in [ "gatk" , "gatk-haplotype" , "haplotyper" ] :
if dd . get_analysis ( data ) . lower ( ) . find ( "rna-seq" ) >= 0 :
from bcbio . rnaseq import variation as rnaseq_variation
return rnaseq_variation . gatk_filter_rnaseq ( call_file , data )
else :
return gatkfilter . run ( call_file , ref_file , vrn_files , data )
# no additional filtration for callers that filter as part of call process
else :
return call_file
|
def _macros2pys ( self ) :
"""Writes macros to pys file
Format : < macro code line > \n"""
|
macros = self . code_array . dict_grid . macros
pys_macros = macros . encode ( "utf-8" )
self . pys_file . write ( pys_macros )
|
def get_experiment_spec ( self , matrix_declaration ) :
"""Returns an experiment spec for this group spec and the given matrix declaration ."""
|
parsed_data = Parser . parse ( self , self . _data , matrix_declaration )
del parsed_data [ self . HP_TUNING ]
validator . validate ( spec = self , data = parsed_data )
return ExperimentSpecification ( values = [ parsed_data , { 'kind' : self . _EXPERIMENT } ] )
|
def tie_properties ( self , class_list ) :
"""Runs through the classess and ties the properties to the class
args :
class _ list : a list of class names to run"""
|
log . setLevel ( self . log_level )
start = datetime . datetime . now ( )
log . info ( " Tieing properties to the class" )
for cls_name in class_list :
cls_obj = getattr ( MODULE . rdfclass , cls_name )
prop_dict = dict ( cls_obj . properties )
for prop_name , prop_obj in cls_obj . properties . items ( ) :
setattr ( cls_obj , prop_name , link_property ( prop_obj , cls_obj ) )
log . info ( " Finished tieing properties in: %s" , ( datetime . datetime . now ( ) - start ) )
|
def new_code_block ( self , ** kwargs ) :
"""Create a new code block ."""
|
proto = { 'content' : '' , 'type' : self . code , 'IO' : '' , 'attributes' : '' }
proto . update ( ** kwargs )
return proto
|
def _call_method ( self , request ) :
"""Calls given method with given params and returns it value ."""
|
method = self . method_data [ request [ 'method' ] ] [ 'method' ]
params = request [ 'params' ]
result = None
try :
if isinstance ( params , list ) : # Does it have enough arguments ?
if len ( params ) < self . _man_args ( method ) :
raise InvalidParamsError ( 'not enough arguments' )
# Does it have too many arguments ?
if not self . _vargs ( method ) and len ( params ) > self . _max_args ( method ) :
raise InvalidParamsError ( 'too many arguments' )
result = yield defer . maybeDeferred ( method , * params )
elif isinstance ( params , dict ) : # Do not accept keyword arguments if the jsonrpc version is
# not > = 1.1.
if request [ 'jsonrpc' ] < 11 :
raise KeywordError
result = yield defer . maybeDeferred ( method , ** params )
else : # No params
result = yield defer . maybeDeferred ( method )
except JSONRPCError :
raise
except Exception : # Exception was raised inside the method .
log . msg ( 'Exception raised while invoking RPC method "{}".' . format ( request [ 'method' ] ) )
log . err ( )
raise ServerError
defer . returnValue ( result )
|
def sample_storage_size ( self ) :
"""Get the storage size of the samples storage collection ."""
|
try :
coll_stats = self . database . command ( 'collStats' , 'fs.chunks' )
sample_storage_size = coll_stats [ 'size' ] / 1024.0 / 1024.0
return sample_storage_size
except pymongo . errors . OperationFailure :
return 0
|
def optimize ( self , ** kwargs ) :
"""Iteratively optimize the ROI model . The optimization is
performed in three sequential steps :
* Free the normalization of the N largest components ( as
determined from NPred ) that contain a fraction ` ` npred _ frac ` `
of the total predicted counts in the model and perform a
simultaneous fit of the normalization parameters of these
components .
* Individually fit the normalizations of all sources that were
not included in the first step in order of their npred
values . Skip any sources that have NPred <
` ` npred _ threshold ` ` .
* Individually fit the shape and normalization parameters of
all sources with TS > ` ` shape _ ts _ threshold ` ` where TS is
determined from the first two steps of the ROI optimization .
To ensure that the model is fully optimized this method can be
run multiple times .
Parameters
npred _ frac : float
Threshold on the fractional number of counts in the N
largest components in the ROI . This parameter determines
the set of sources that are fit in the first optimization
step .
npred _ threshold : float
Threshold on the minimum number of counts of individual
sources . This parameter determines the sources that are
fit in the second optimization step .
shape _ ts _ threshold : float
Threshold on source TS used for determining the sources
that will be fit in the third optimization step .
max _ free _ sources : int
Maximum number of sources that will be fit simultaneously
in the first optimization step .
skip : list
List of str source names to skip while optimizing .
optimizer : dict
Dictionary that overrides the default optimizer settings ."""
|
loglevel = kwargs . pop ( 'loglevel' , self . loglevel )
timer = Timer . create ( start = True )
self . logger . log ( loglevel , 'Starting' )
loglike0 = - self . like ( )
self . logger . debug ( 'LogLike: %f' % loglike0 )
# Extract options from kwargs
config = copy . deepcopy ( self . config [ 'roiopt' ] )
config [ 'optimizer' ] = copy . deepcopy ( self . config [ 'optimizer' ] )
fermipy . config . validate_config ( kwargs , config )
config = merge_dict ( config , kwargs )
# Extract options from kwargs
npred_frac_threshold = config [ 'npred_frac' ]
npred_threshold = config [ 'npred_threshold' ]
shape_ts_threshold = config [ 'shape_ts_threshold' ]
max_free_sources = config [ 'max_free_sources' ]
skip = copy . deepcopy ( config [ 'skip' ] )
o = defaults . make_default_dict ( defaults . roiopt_output )
o [ 'config' ] = config
o [ 'loglike0' ] = loglike0
# preserve free parameters
free = self . get_free_param_vector ( )
# Fix all parameters
self . free_sources ( free = False , loglevel = logging . DEBUG )
# Free norms of sources for which the sum of npred is a
# fraction > npred _ frac of the total model counts in the ROI
npred_sum = 0
skip_sources = skip if skip != None else [ ]
joint_norm_fit = [ ]
# EAC , we need this try block by older version of the ST don ' t have has _ weights function
try :
if self . like . logLike . has_weights ( ) :
npred_str = 'npred_wt'
else :
npred_str = 'npred'
except AttributeError :
npred_str = 'npred'
# FIXME , EAC , use npred _ wt here
for s in sorted ( self . roi . sources , key = lambda t : t [ npred_str ] , reverse = True ) :
npred_sum += s [ npred_str ]
npred_frac = npred_sum / self . _roi_data [ npred_str ]
if s . name in skip_sources :
continue
self . free_norm ( s . name , loglevel = logging . DEBUG )
joint_norm_fit . append ( s . name )
if npred_frac > npred_frac_threshold :
break
if s [ npred_str ] < npred_threshold :
break
if len ( joint_norm_fit ) >= max_free_sources :
break
print ( "Joint fit " , joint_norm_fit )
self . fit ( loglevel = logging . DEBUG , ** config [ 'optimizer' ] )
self . free_sources ( free = False , loglevel = logging . DEBUG )
# Step through remaining sources and re - fit normalizations
# FIXME , EAC , use npred _ wt here
for s in sorted ( self . roi . sources , key = lambda t : t [ npred_str ] , reverse = True ) :
if s . name in skip_sources or s . name in joint_norm_fit :
continue
if s [ npred_str ] < npred_threshold :
self . logger . debug ( 'Skipping %s with npred %10.3f' , s . name , s [ npred_str ] )
continue
self . logger . debug ( 'Fitting %s npred: %10.3f TS: %10.3f' , s . name , s [ npred_str ] , s [ 'ts' ] )
self . free_norm ( s . name , loglevel = logging . DEBUG )
self . fit ( loglevel = logging . DEBUG , ** config [ 'optimizer' ] )
self . logger . debug ( 'Post-fit Results npred: %10.3f TS: %10.3f' , s [ npred_str ] , s [ 'ts' ] )
self . free_norm ( s . name , free = False , loglevel = logging . DEBUG )
# Refit spectral shape parameters for sources with TS >
# shape _ ts _ threshold
for s in sorted ( self . roi . sources , key = lambda t : t [ 'ts' ] if np . isfinite ( t [ 'ts' ] ) else 0 , reverse = True ) :
if s . name in skip_sources :
continue
if s [ 'ts' ] < shape_ts_threshold or not np . isfinite ( s [ 'ts' ] ) :
continue
print ( 'Fitting shape %s TS: %10.3f' % ( s . name , s [ 'ts' ] ) )
self . logger . debug ( 'Fitting shape %s TS: %10.3f' , s . name , s [ 'ts' ] )
self . free_source ( s . name , loglevel = logging . DEBUG )
self . fit ( loglevel = logging . DEBUG , ** config [ 'optimizer' ] )
self . free_source ( s . name , free = False , loglevel = logging . DEBUG )
self . set_free_param_vector ( free )
loglike1 = - self . like ( )
o [ 'loglike1' ] = loglike1
o [ 'dloglike' ] = loglike1 - loglike0
self . logger . log ( loglevel , 'Finished' )
self . logger . log ( loglevel , 'LogLike: %f Delta-LogLike: %f' , loglike1 , loglike1 - loglike0 )
self . logger . log ( loglevel , 'Execution time: %.2f s' , timer . elapsed_time )
return o
|
def in6_getscope ( addr ) :
"""Returns the scope of the address ."""
|
if in6_isgladdr ( addr ) or in6_isuladdr ( addr ) :
scope = IPV6_ADDR_GLOBAL
elif in6_islladdr ( addr ) :
scope = IPV6_ADDR_LINKLOCAL
elif in6_issladdr ( addr ) :
scope = IPV6_ADDR_SITELOCAL
elif in6_ismaddr ( addr ) :
if in6_ismgladdr ( addr ) :
scope = IPV6_ADDR_GLOBAL
elif in6_ismlladdr ( addr ) :
scope = IPV6_ADDR_LINKLOCAL
elif in6_ismsladdr ( addr ) :
scope = IPV6_ADDR_SITELOCAL
elif in6_ismnladdr ( addr ) :
scope = IPV6_ADDR_LOOPBACK
else :
scope = - 1
elif addr == '::1' :
scope = IPV6_ADDR_LOOPBACK
else :
scope = - 1
return scope
|
def format ( amount , currency = None ) :
"""Formats a decimal or Money object into an unambiguous string representation
for the purpose of invoices in English .
: param amount :
A Decimal or Money object
: param currency :
If the amount is a Decimal , the currency of the amount
: return :
A string representation of the amount in the currency"""
|
if currency is None and hasattr ( amount , 'currency' ) :
currency = amount . currency
# Allow Money objects
if not isinstance ( amount , Decimal ) and hasattr ( amount , 'amount' ) :
amount = amount . amount
if not isinstance ( currency , str_cls ) :
raise ValueError ( 'The currency specified is not a string' )
if currency not in FORMATTING_RULES :
valid_currencies = sorted ( FORMATTING_RULES . keys ( ) )
formatted_currencies = ', ' . join ( valid_currencies )
raise ValueError ( 'The currency specified, "%s", is not a supported currency: %s' % ( currency , formatted_currencies ) )
if not isinstance ( amount , Decimal ) :
raise ValueError ( 'The amount specified is not a Decimal' )
rules = FORMATTING_RULES [ currency ]
format_string = ',.%sf' % rules [ 'decimal_places' ]
result = builtin_format ( amount , format_string )
result = result . replace ( ',' , '_' )
result = result . replace ( '.' , '|' )
result = result . replace ( '_' , rules [ 'thousands_separator' ] )
result = result . replace ( '|' , rules [ 'decimal_mark' ] )
if rules [ 'symbol_first' ] :
result = rules [ 'symbol' ] + result
else :
result = result + rules [ 'symbol' ]
return result
|
def start_crawler ( self , index , daemonize = False ) :
"""Starts a crawler from the input - array .
: param int index : The array - index of the site
: param int daemonize : Bool if the crawler is supposed to be daemonized
( to delete the JOBDIR )"""
|
call_process = [ sys . executable , self . __single_crawler , self . cfg_file_path , self . json_file_path , "%s" % index , "%s" % self . shall_resume , "%s" % daemonize ]
self . log . debug ( "Calling Process: %s" , call_process )
crawler = Popen ( call_process , stderr = None , stdout = None )
crawler . communicate ( )
self . crawlers . append ( crawler )
|
def update_delivery_note ( self , delivery_note_id , delivery_note_dict ) :
"""Updates a delivery note
: param delivery _ note _ id : the delivery note id
: param delivery _ note _ dict : dict
: return : dict"""
|
return self . _create_put_request ( resource = DELIVERY_NOTES , billomat_id = delivery_note_id , send_data = delivery_note_dict )
|
def refresh ( self ) :
'''Refresh the list and the screen'''
|
self . _screen . force_update ( )
self . _screen . refresh ( )
self . _update ( 1 )
|
def get_node_type ( self , node , parent = None ) :
"""If node is a document , the type is page .
If node is a binder with no parent , the type is book .
If node is a translucent binder , the type is either chapters ( only
contain pages ) or unit ( contains at least one translucent binder ) ."""
|
if isinstance ( node , CompositeDocument ) :
return 'composite-page'
elif isinstance ( node , ( Document , DocumentPointer ) ) :
return 'page'
elif isinstance ( node , Binder ) and parent is None :
return 'book'
for child in node :
if isinstance ( child , TranslucentBinder ) :
return 'unit'
return 'chapter'
|
def _symbolic_product_helper ( ) :
"""Use SymPy to generate the 3D products for diffusion _ stencil _ 3d ."""
|
from sympy import symbols , Matrix
D11 , D12 , D13 , D21 , D22 , D23 , D31 , D32 , D33 = symbols ( 'D11, D12, D13, D21, D22, D23, D31, D32, D33' )
D = Matrix ( [ [ D11 , D12 , D13 ] , [ D21 , D22 , D23 ] , [ D31 , D32 , D33 ] ] )
grad = Matrix ( [ [ 'dx' , 'dy' , 'dz' ] ] ) . T
div = grad . T
a = div * D * grad
print ( a [ 0 ] )
|
def _set_interface_PO_ospf_conf ( self , v , load = False ) :
"""Setter method for interface _ PO _ ospf _ conf , mapped from YANG variable / interface / port _ channel / ip / interface _ PO _ ospf _ conf ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ interface _ PO _ ospf _ conf is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ interface _ PO _ ospf _ conf ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = interface_PO_ospf_conf . interface_PO_ospf_conf , is_container = 'container' , presence = False , yang_name = "interface-PO-ospf-conf" , rest_name = "" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-drop-node-name' : None , u'callpoint' : u'OSPFPoInterfaceCallPoint' } } , namespace = 'urn:brocade.com:mgmt:brocade-ospf' , defining_module = 'brocade-ospf' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """interface_PO_ospf_conf must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=interface_PO_ospf_conf.interface_PO_ospf_conf, is_container='container', presence=False, yang_name="interface-PO-ospf-conf", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'callpoint': u'OSPFPoInterfaceCallPoint'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""" , } )
self . __interface_PO_ospf_conf = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_rx_fdiscs ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
fcoe_get_interface = ET . Element ( "fcoe_get_interface" )
config = fcoe_get_interface
output = ET . SubElement ( fcoe_get_interface , "output" )
fcoe_intf_list = ET . SubElement ( output , "fcoe-intf-list" )
fcoe_intf_fcoe_port_id_key = ET . SubElement ( fcoe_intf_list , "fcoe-intf-fcoe-port-id" )
fcoe_intf_fcoe_port_id_key . text = kwargs . pop ( 'fcoe_intf_fcoe_port_id' )
fcoe_intf_rx_fdiscs = ET . SubElement ( fcoe_intf_list , "fcoe-intf-rx-fdiscs" )
fcoe_intf_rx_fdiscs . text = kwargs . pop ( 'fcoe_intf_rx_fdiscs' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def validate ( self ) :
"""validate : Makes sure document node contains at least one EPUB or PDF
Args : None
Returns : boolean indicating if document is valid"""
|
from . files import DocumentFile , EPubFile
try :
assert self . kind == content_kinds . DOCUMENT , "Assumption Failed: Node should be a document"
assert self . questions == [ ] , "Assumption Failed: Document should not have questions"
assert len ( self . files ) > 0 , "Assumption Failed: Document should have at least one file"
assert any ( filter ( lambda f : isinstance ( f , DocumentFile ) or isinstance ( f , EPubFile ) , self . files ) ) , "Assumption Failed: Document should have at least one document file"
return super ( DocumentNode , self ) . validate ( )
except AssertionError as ae :
raise InvalidNodeException ( "Invalid node ({}): {} - {}" . format ( ae . args [ 0 ] , self . title , self . __dict__ ) )
|
def nanopub_to_edges ( nanopub : dict = { } , rules : List [ str ] = [ ] , orthologize_targets : list = [ ] ) :
"""Process nanopub into edges and load into EdgeStore
Args :
nanopub : BEL Nanopub
rules : list of compute rules to process
orthologize _ targets : list of species in TAX : < int > format
Returns :
list : of edges
Edge object :
" edge " : {
" subject " : {
" name " : subj _ canon ,
" name _ lc " : subj _ canon . lower ( ) ,
" label " : subj _ lbl ,
" label _ lc " : subj _ lbl . lower ( ) ,
" components " : subj _ components ,
" relation " : { # relation _ key is based on a hash
" relation " : edge _ ast . bel _ relation ,
" edge _ hash " : edge _ hash ,
" edge _ dt " : edge _ dt ,
" nanopub _ url " : nanopub _ url ,
" nanopub _ id " : nanopub _ id ,
" citation " : citation ,
" subject _ canon " : subj _ canon ,
" subject " : subj _ lbl ,
" object _ canon " : obj _ canon ,
" object " : obj _ lbl ,
" annotations " : nanopub [ ' annotations ' ] ,
" metadata " : nanopub [ ' metadata ' ] ,
" public _ flag " : True , # will be added when groups / permissions feature is finished ,
" edge _ types " : edge _ types ,
' object ' : {
" name " : obj _ canon ,
" name _ lc " : obj _ canon . lower ( ) ,
" label " : obj _ lbl ,
" label _ lc " : obj _ lbl . lower ( ) ,
" components " : obj _ components ,"""
|
# Collect input values # # # # #
nanopub_url = nanopub . get ( "source_url" , "" )
edge_dt = utils . dt_utc_formatted ( )
# don ' t want this in relation _ id
# Extract BEL Version and make sure we can process this
if nanopub [ "nanopub" ] [ "type" ] [ "name" ] . upper ( ) == "BEL" :
bel_version = nanopub [ "nanopub" ] [ "type" ] [ "version" ]
versions = bel . lang . bel_specification . get_bel_versions ( )
if bel_version not in versions :
log . error ( f"Do not know this BEL Version: {bel_version}, these are the ones I can process: {versions.keys()}" )
return [ ]
else :
log . error ( f"Not a BEL Nanopub according to nanopub.type.name: {nanopub['nanopub']['type']['name']}" )
return [ ]
# Required for BEL parsing / canonicalization / orthologization
api_url = config [ "bel_api" ] [ "servers" ] [ "api_url" ]
try :
citation_string = normalize_nanopub_citation ( nanopub )
except Exception as e :
log . error ( f"Could not create citation string for {nanopub_url}" )
citation_string = ""
if orthologize_targets == [ ] :
if config [ "bel_api" ] . get ( "edges" , None ) :
orthologize_targets = config [ "bel_api" ] [ "edges" ] . get ( "orthologize_targets" , [ ] )
# orig _ species _ id = [ anno [ ' id ' ] for anno in nanopub [ ' nanopub ' ] [ ' annotations ' ] if anno [ ' type ' ] = = ' Species ' ]
# if orig _ species _ id :
# orig _ species _ id = orig _ species _ id [ 0]
master_annotations = copy . deepcopy ( nanopub [ "nanopub" ] [ "annotations" ] )
master_metadata = copy . deepcopy ( nanopub [ "nanopub" ] [ "metadata" ] )
master_metadata . pop ( "gd_abstract" , None )
nanopub_type = nanopub [ "nanopub" ] [ "metadata" ] . get ( "nanopub_type" )
# Create Edge Assertion Info # # # # #
# r = generate _ assertion _ edge _ info ( nanopub [ ' nanopub ' ] [ ' assertions ' ] , orig _ species _ id , orthologize _ targets , bel _ version , api _ url , nanopub _ type )
r = generate_assertion_edge_info ( nanopub [ "nanopub" ] [ "assertions" ] , orthologize_targets , bel_version , api_url , nanopub_type )
edge_info_list = r [ "edge_info_list" ]
# Build Edges # # # # #
edges = [ ]
errors = [ ]
for edge_info in edge_info_list :
annotations = copy . deepcopy ( master_annotations )
metadata = copy . deepcopy ( master_metadata )
errors . extend ( edge_info [ "errors" ] )
if not edge_info . get ( "canonical" ) :
continue
# TODO - remove this
# if edge _ info . get ( ' species _ id ' , False ) :
# annotations = orthologize _ context ( edge _ info [ ' species _ id ' ] , annotations )
edge_hash = utils . _create_hash ( f'{edge_info["canonical"]["subject"]} {edge_info["canonical"]["relation"]} {edge_info["canonical"]["object"]}' )
edge = { "edge" : { "subject" : { "name" : edge_info [ "canonical" ] [ "subject" ] , "name_lc" : edge_info [ "canonical" ] [ "subject" ] . lower ( ) , "label" : edge_info [ "decanonical" ] [ "subject" ] , "label_lc" : edge_info [ "decanonical" ] [ "subject" ] . lower ( ) , "components" : edge_info [ "subject_comp" ] , } , "relation" : { "relation" : edge_info [ "canonical" ] [ "relation" ] , "edge_hash" : edge_hash , "edge_dt" : edge_dt , "nanopub_url" : nanopub_url , "nanopub_id" : nanopub [ "nanopub" ] [ "id" ] , "citation" : citation_string , "subject_canon" : edge_info [ "canonical" ] [ "subject" ] , "subject" : edge_info [ "decanonical" ] [ "subject" ] , "object_canon" : edge_info [ "canonical" ] [ "object" ] , "object" : edge_info [ "decanonical" ] [ "object" ] , "annotations" : copy . deepcopy ( annotations ) , "metadata" : copy . deepcopy ( metadata ) , "public_flag" : True , "edge_types" : edge_info [ "edge_types" ] , "species_id" : edge_info [ "species_id" ] , "species_label" : edge_info [ "species_label" ] , } , "object" : { "name" : edge_info [ "canonical" ] [ "object" ] , "name_lc" : edge_info [ "canonical" ] [ "object" ] . lower ( ) , "label" : edge_info [ "decanonical" ] [ "object" ] , "label_lc" : edge_info [ "decanonical" ] [ "object" ] . lower ( ) , "components" : edge_info [ "object_comp" ] , } , } }
edges . append ( copy . deepcopy ( edge ) )
return { "edges" : edges , "nanopub_id" : nanopub [ "nanopub" ] [ "id" ] , "nanopub_url" : nanopub_url , "success" : True , "errors" : errors , }
|
def create ( self , name , ** kwargs ) :
"""Create new role
http : / / www . keycloak . org / docs - api / 3.4 / rest - api / index . html
# _ roles _ resource
: param str name : Name for the role
: param str description : ( optional )
: param str id : ( optional )
: param bool client _ role : ( optional )
: param bool composite : ( optional )
: param object composites : ( optional )
: param str container _ id : ( optional )
: param bool scope _ param _ required : ( optional )"""
|
payload = OrderedDict ( name = name )
for key in ROLE_KWARGS :
if key in kwargs :
payload [ to_camel_case ( key ) ] = kwargs [ key ]
return self . _client . post ( url = self . _client . get_full_url ( self . get_path ( 'collection' , realm = self . _realm_name , id = self . _client_id ) ) , data = json . dumps ( payload , sort_keys = True ) )
|
def unregister ( self , slug ) :
"""Unregisters the given url .
If a slug isn ' t already registered , this will raise NotRegistered ."""
|
if slug not in self . _registry :
raise NotRegistered ( 'The slug %s is not registered' % slug )
bundle = self . _registry [ slug ]
if bundle . _meta . model and bundle . _meta . primary_model_bundle :
self . unregister_model ( bundle . _meta . model )
del self . _registry [ slug ]
del self . _order [ slug ]
|
def plot_edoses ( self , dos_pos = None , method = "gaussian" , step = 0.01 , width = 0.1 , ** kwargs ) :
"""Plot the band structure and the DOS .
Args :
dos _ pos : Index of the task from which the DOS should be obtained .
None is all DOSes should be displayed . Accepts integer or list of integers .
method : String defining the method for the computation of the DOS .
step : Energy step ( eV ) of the linear mesh .
width : Standard deviation ( eV ) of the gaussian .
kwargs : Keyword arguments passed to ` plot ` method to customize the plot .
Returns :
` matplotlib ` figure ."""
|
if dos_pos is not None and not isinstance ( dos_pos , ( list , tuple ) ) :
dos_pos = [ dos_pos ]
from abipy . electrons . ebands import ElectronDosPlotter
plotter = ElectronDosPlotter ( )
for i , task in enumerate ( self . dos_tasks ) :
if dos_pos is not None and i not in dos_pos :
continue
with task . open_gsr ( ) as gsr :
edos = gsr . ebands . get_edos ( method = method , step = step , width = width )
ngkpt = task . get_inpvar ( "ngkpt" )
plotter . add_edos ( "ngkpt %s" % str ( ngkpt ) , edos )
return plotter . combiplot ( ** kwargs )
|
def escape ( u ) :
"""Escape a string in an OAuth - compatible fashion .
TODO : verify whether this can in fact be used for OAuth 2"""
|
if not isinstance ( u , unicode_type ) :
raise ValueError ( 'Only unicode objects are escapable.' )
return quote ( u . encode ( 'utf-8' ) , safe = b'~' )
|
def folderitem ( self , obj , item , index ) :
"""Augment folder listing item with additional data"""
|
url = item . get ( "url" )
title = item . get ( "DocumentID" )
item [ "replace" ] [ "DocumentID" ] = get_link ( url , title )
item [ "FileDownload" ] = ""
item [ "replace" ] [ "FileDownload" ] = ""
file = self . get_file ( obj )
if file and file . get_size ( ) > 0 :
filename = file . filename
download_url = "{}/at_download/File" . format ( url )
anchor = get_link ( download_url , filename )
item [ "FileDownload" ] = filename
item [ "replace" ] [ "FileDownload" ] = anchor
item [ "DocumentVersion" ] = obj . getDocumentVersion ( )
item [ "DocumentLocation" ] = obj . getDocumentLocation ( )
item [ "DocumentType" ] = obj . getDocumentType ( )
return item
|
def build ( self , X , Y , w = None , edges = None ) :
"""Assigns data to this object and builds the Morse - Smale
Complex
@ In , X , an m - by - n array of values specifying m
n - dimensional samples
@ In , Y , a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In , w , an optional m vector of values specifying the
weights associated to each of the m samples used . Default of
None means all points will be equally weighted
@ In , edges , an optional list of custom edges to use as a
starting point for pruning , or in place of a computed graph ."""
|
super ( ContourTree , self ) . build ( X , Y , w , edges )
# Build the join and split trees that we will merge into the
# contour tree
joinTree = MergeTree ( debug = self . debug )
splitTree = MergeTree ( debug = self . debug )
joinTree . build_for_contour_tree ( self , True )
splitTree . build_for_contour_tree ( self , False )
self . augmentedEdges = dict ( joinTree . augmentedEdges )
self . augmentedEdges . update ( dict ( splitTree . augmentedEdges ) )
if self . short_circuit :
jt = self . _construct_nx_tree ( joinTree , splitTree )
st = self . _construct_nx_tree ( splitTree , joinTree )
else :
jt = self . _construct_nx_tree ( joinTree )
st = self . _construct_nx_tree ( splitTree )
self . _process_tree ( jt , st )
self . _process_tree ( st , jt )
# Now we have a fully augmented contour tree stored in nodes and
# edges The rest is some convenience stuff for querying later
self . _identifyBranches ( )
self . _identifySuperGraph ( )
if self . debug :
sys . stdout . write ( "Sorting Nodes: " )
start = time . clock ( )
self . sortedNodes = sorted ( enumerate ( self . Y ) , key = operator . itemgetter ( 1 ) )
if self . debug :
end = time . clock ( )
sys . stdout . write ( "%f s\n" % ( end - start ) )
|
def reset ( self ) :
"""Reset analyzer state"""
|
self . prevframe = None
self . wasmoving = False
self . t0 = 0
self . ismoving = False
|
def hist ( self , * columns , overlay = True , bins = None , bin_column = None , unit = None , counts = None , group = None , side_by_side = False , width = 6 , height = 4 , ** vargs ) :
"""Plots one histogram for each column in columns . If no column is
specified , plot all columns .
Kwargs :
overlay ( bool ) : If True , plots 1 chart with all the histograms
overlaid on top of each other ( instead of the default behavior
of one histogram for each column in the table ) . Also adds a
legend that matches each bar color to its column . Note that
if the histograms are not overlaid , they are not forced to the
same scale .
bins ( list or int ) : Lower bound for each bin in the
histogram or number of bins . If None , bins will
be chosen automatically .
bin _ column ( column name or index ) : A column of bin lower bounds .
All other columns are treated as counts of these bins .
If None , each value in each row is assigned a count of 1.
counts ( column name or index ) : Deprecated name for bin _ column .
unit ( string ) : A name for the units of the plotted column ( e . g .
' kg ' ) , to be used in the plot .
group ( column name or index ) : A column of categories . The rows are
grouped by the values in this column , and a separate histogram is
generated for each group . The histograms are overlaid or plotted
separately depending on the overlay argument . If None , no such
grouping is done .
side _ by _ side ( bool ) : Whether histogram bins should be plotted side by
side ( instead of directly overlaid ) . Makes sense only when
plotting multiple histograms , either by passing several columns
or by using the group option .
vargs : Additional arguments that get passed into : func : plt . hist .
See http : / / matplotlib . org / api / pyplot _ api . html # matplotlib . pyplot . hist
for additional arguments that can be passed into vargs . These
include : ` range ` , ` normed ` / ` density ` , ` cumulative ` , and
` orientation ` , to name a few .
> > > t = Table ( ) . with _ columns (
. . . ' count ' , make _ array ( 9 , 3 , 3 , 1 ) ,
. . . ' points ' , make _ array ( 1 , 2 , 2 , 10 ) )
count | points
9 | 1
3 | 2
3 | 2
1 | 10
> > > t . hist ( ) # doctest : + SKIP
< histogram of values in count >
< histogram of values in points >
> > > t = Table ( ) . with _ columns (
. . . ' value ' , make _ array ( 101 , 102 , 103 ) ,
. . . ' proportion ' , make _ array ( 0.25 , 0.5 , 0.25 ) )
> > > t . hist ( bin _ column = ' value ' ) # doctest : + SKIP
< histogram of values weighted by corresponding proportions >
> > > t = Table ( ) . with _ columns (
. . . ' value ' , make _ array ( 1 , 2 , 3 , 2 , 5 ) ,
. . . ' category ' , make _ array ( ' a ' , ' a ' , ' a ' , ' b ' , ' b ' ) )
> > > t . hist ( ' value ' , group = ' category ' ) # doctest : + SKIP
< two overlaid histograms of the data [ 1 , 2 , 3 ] and [ 2 , 5 ] >"""
|
if counts is not None and bin_column is None :
warnings . warn ( "counts arg of hist is deprecated; use bin_column" )
bin_column = counts
if columns :
columns_included = list ( columns )
if bin_column is not None :
columns_included . append ( bin_column )
if group is not None :
columns_included . append ( group )
self = self . select ( * columns_included )
if group is not None :
if bin_column is not None :
raise ValueError ( "Using bin_column and group together is " "currently unsupported." )
if len ( columns ) > 1 :
raise ValueError ( "Using group with multiple histogram value " "columns is currently unsupported." )
# Check for non - numerical values and raise a ValueError if any found
for col in self :
if col != group and any ( isinstance ( cell , np . flexible ) for cell in self [ col ] ) :
raise ValueError ( "The column '{0}' contains non-numerical " "values. A histogram cannot be drawn for this table." . format ( col ) )
if bin_column is not None and bins is None :
bins = np . unique ( self . column ( bin_column ) )
if bins is not None :
vargs [ 'bins' ] = bins
# Matplotlib has deprecated the normed keyword .
# TODO consider changing this function to use density = instead too
if 'normed' not in vargs and 'density' not in vargs :
vargs [ 'density' ] = True
elif 'normed' in vargs and 'density' not in vargs :
vargs [ 'density' ] = vargs . pop ( 'normed' )
elif 'normed' in vargs and 'density' in vargs :
raise ValueError ( "You can't specify both normed and density. " "Use one or the other." )
def prepare_hist_with_bin_column ( bin_column ) : # This code is factored as a function for clarity only .
weight_columns = [ c for c in self . labels if c != bin_column ]
bin_values = self . column ( bin_column )
values_dict = [ ( w . rstrip ( ' count' ) , ( bin_values , self . column ( w ) ) ) for w in weight_columns ]
return values_dict
def prepare_hist_with_group ( group ) : # This code is factored as a function for clarity only .
grouped = self . group ( group , np . array )
if grouped . num_rows > 20 :
warnings . warn ( "It looks like you're making a grouped histogram with " "a lot of groups ({:d}), which is probably incorrect." . format ( grouped . num_rows ) )
return [ ( "{}={}" . format ( group , k ) , ( v [ 0 ] [ 1 ] , ) ) for k , v in grouped . index_by ( group ) . items ( ) ]
# Populate values _ dict : An ordered dict from column name to singleton
# tuple of array of values or a ( values , weights ) pair of arrays . If
# any values have weights , they all must have weights .
if bin_column is not None :
values_dict = prepare_hist_with_bin_column ( bin_column )
elif group is not None :
values_dict = prepare_hist_with_group ( group )
else :
values_dict = [ ( k , ( self . column ( k ) , ) ) for k in self . labels ]
values_dict = collections . OrderedDict ( values_dict )
def draw_hist ( values_dict ) : # This code is factored as a function for clarity only .
n = len ( values_dict )
colors = [ rgb_color + ( self . default_alpha , ) for rgb_color in itertools . islice ( itertools . cycle ( self . chart_colors ) , n ) ]
hist_names = list ( values_dict . keys ( ) )
values = [ v [ 0 ] for v in values_dict . values ( ) ]
weights = [ v [ 1 ] for v in values_dict . values ( ) if len ( v ) > 1 ]
if n > len ( weights ) > 0 :
raise ValueError ( "Weights were provided for some columns, but not " " all, and that's not supported." )
if vargs [ 'density' ] :
y_label = 'Percent per ' + ( unit if unit else 'unit' )
percentage = plt . FuncFormatter ( lambda x , _ : "{:g}" . format ( 100 * x ) )
else :
y_label = 'Count'
if overlay and n > 1 : # Reverse because legend prints bottom - to - top
values = values [ : : - 1 ]
weights = weights [ : : - 1 ]
colors = list ( colors ) [ : : - 1 ]
if len ( weights ) == n :
vargs [ 'weights' ] = weights
if not side_by_side :
vargs . setdefault ( 'histtype' , 'stepfilled' )
figure = plt . figure ( figsize = ( width , height ) )
plt . hist ( values , color = colors , ** vargs )
axis = figure . get_axes ( ) [ 0 ]
_vertical_x ( axis )
axis . set_ylabel ( y_label )
if vargs [ 'density' ] :
axis . yaxis . set_major_formatter ( percentage )
if unit :
axis . set_xlabel ( '(' + unit + ')' , fontsize = 16 )
plt . legend ( hist_names , loc = 2 , bbox_to_anchor = ( 1.05 , 1 ) )
type ( self ) . plots . append ( axis )
else :
_ , axes = plt . subplots ( n , 1 , figsize = ( width , height * n ) )
if 'bins' in vargs :
bins = vargs [ 'bins' ]
if isinstance ( bins , numbers . Integral ) and bins > 76 or hasattr ( bins , '__len__' ) and len ( bins ) > 76 : # Use stepfilled when there are too many bins
vargs . setdefault ( 'histtype' , 'stepfilled' )
if n == 1 :
axes = [ axes ]
for i , ( axis , hist_name , values_for_hist , color ) in enumerate ( zip ( axes , hist_names , values , colors ) ) :
axis . set_ylabel ( y_label )
if vargs [ 'density' ] :
axis . yaxis . set_major_formatter ( percentage )
x_unit = ' (' + unit + ')' if unit else ''
if len ( weights ) == n :
vargs [ 'weights' ] = weights [ i ]
axis . set_xlabel ( hist_name + x_unit , fontsize = 16 )
axis . hist ( values_for_hist , color = color , ** vargs )
_vertical_x ( axis )
type ( self ) . plots . append ( axis )
draw_hist ( values_dict )
|
def items ( self ) :
"""Items are the discussions on the entries ."""
|
content_type = ContentType . objects . get_for_model ( Entry )
return comments . get_model ( ) . objects . filter ( content_type = content_type , is_public = True ) . order_by ( '-submit_date' ) [ : self . limit ]
|
def build_permission_name ( model_class , prefix ) :
"""Build permission name for model _ class ( like ' app . add _ model ' ) ."""
|
model_name = model_class . _meta . object_name . lower ( )
app_label = model_class . _meta . app_label
action_name = prefix
perm = '%s.%s_%s' % ( app_label , action_name , model_name )
return perm
|
def check_scicrunch_for_label ( self , label : str ) -> dict :
"""Sees if label with your user ID already exists
There are can be multiples of the same label in interlex , but there should only be one
label with your user id . Therefore you can create labels if there already techniqually
exist , but not if you are the one to create it ."""
|
list_of_crude_matches = self . crude_search_scicrunch_via_label ( label )
for crude_match in list_of_crude_matches : # If labels match
if crude_match [ 'label' ] . lower ( ) . strip ( ) == label . lower ( ) . strip ( ) :
complete_data_of_crude_match = self . get_entity ( crude_match [ 'ilx' ] )
crude_match_label = crude_match [ 'label' ]
crude_match_user_id = complete_data_of_crude_match [ 'uid' ]
# If label was created by you
if str ( self . user_id ) == str ( crude_match_user_id ) :
return complete_data_of_crude_match
# You created the entity already
# No label AND user id match
return { }
|
def get_cache_settings ( self , service_id , version_number , name ) :
"""Get a specific cache settings object ."""
|
content = self . _fetch ( "/service/%s/version/%d/cache_settings/%s" % ( service_id , version_number , name ) )
return FastlyCacheSettings ( self , content )
|
def handle_starttag ( self , tag , attrs ) :
"""This method handles any HTML tags that have a matching
closing tag . So elements like < p > and < div > are handled
by this method .
@ param < string > tag
An html tag that has a separate closing tag such as < p >
< div > or < body >
@ param < tuple > attrs
A tuple of HTML element attributes such as ' class ' , ' id ' ,
' style ' , etc . The tuple is of the form ( ' html _ attribute ' ,
' attr1 ' , ' attr2 ' , ' attr3 ' . . . ' attrN ' )"""
|
dattrs = dict ( attrs )
# look for ' < link type = ' text / css ' rel = ' stylesheet ' href = ' . . . ' > tags
# to see if looking for link tags makes sense here , we need to know
# a little more about the implementation . Whether HTML parser looks for
# the trailing slash at the end of an element , or just knows which elements
# should be paired or not .
if tag . lower ( ) == 'link' :
print "Found link"
if all ( k in dattrs for k in ( 'rel' , 'href' , 'type' ) ) :
if ( dattrs [ 'rel' ] . lower ( ) == 'stylesheet' and dattrs [ 'type' ] . lower ( ) == 'text/css' ) : # Add the url to the stack
if ( dattrs [ 'href' ] [ : 5 ] . lower ( ) == 'http:' or dattrs [ 'href' ] [ : 6 ] . lower ( ) == 'https:' ) :
self . linked_sheets . append ( dattrs [ 'href' ] )
else :
self . linked_sheets . append ( self . url_root + dattrs [ 'href' ] )
# Look for < style type = ' text / css ' . . . / > tags and add their rules
# into the list .
elif ( tag . lower ( ) == 'style' and 'type' in dattrs and dattrs [ 'type' ] . lower ( ) == 'text/css' ) : # print " Found CSS inline defs "
self . get_data = True
self . append_styles ( tag , attrs )
|
def attribute_value ( self , doc : Document , attribute_name : str ) :
"""Access data using attribute name rather than the numeric indices
Returns : the value for the attribute"""
|
return doc . cdr_document . get ( self . header_translation_table [ attribute_name ] )
|
def add_cluster ( self , name , server = None , certificate_authority_data = None , ** attrs ) :
"""Add a cluster to config ."""
|
if self . cluster_exists ( name ) :
raise KubeConfError ( "Cluster with the given name already exists." )
clusters = self . get_clusters ( )
# Add parameters .
new_cluster = { 'name' : name , 'cluster' : { } }
attrs_ = new_cluster [ 'cluster' ]
if server is not None :
attrs_ [ 'server' ] = server
if certificate_authority_data is not None :
attrs_ [ 'certificate-authority-data' ] = certificate_authority_data
attrs_ . update ( attrs )
clusters . append ( new_cluster )
|
def get_stack_frame ( N = 0 , strict = True ) :
"""Args :
N ( int ) : N = 0 means the frame you called this function in .
N = 1 is the parent frame .
strict ( bool ) : ( default = True )"""
|
frame_cur = inspect . currentframe ( )
for _ix in range ( N + 1 ) : # always skip the frame of this function
frame_next = frame_cur . f_back
if frame_next is None :
if strict :
raise AssertionError ( 'Frame level %r is root' % _ix )
else :
break
frame_cur = frame_next
return frame_cur
|
def GetKeyByPath ( self , key_path ) :
"""Retrieves the key for a specific path .
Args :
key _ path ( str ) : Windows Registry key path .
Returns :
WinRegistryKey : Windows Registry key or None if not available ."""
|
key_path_upper = key_path . upper ( )
if key_path_upper . startswith ( self . _key_path_prefix_upper ) :
relative_key_path = key_path [ self . _key_path_prefix_length : ]
elif key_path . startswith ( definitions . KEY_PATH_SEPARATOR ) :
relative_key_path = key_path
key_path = '' . join ( [ self . _key_path_prefix , key_path ] )
else :
return None
path_segments = key_paths . SplitKeyPath ( relative_key_path )
registry_key = self . _root_key
if not registry_key :
return None
for path_segment in path_segments :
registry_key = registry_key . GetSubkeyByName ( path_segment )
if not registry_key :
return None
return registry_key
|
def remove_time_dependent_effects ( self , ts ) :
"""Given a timeseries , apply inverse operations to obtain the original series of underlying errors .
Parameters
ts :
Time series of observations with this model ' s characteristics as a Numpy array
returns the time series with removed time - dependent effects as a Numpy array"""
|
destts = Vectors . dense ( np . array ( [ 0 ] * len ( ts ) ) )
result = self . _jmodel . removeTimeDependentEffects ( _py2java ( self . _ctx , Vectors . dense ( ts ) ) , _py2java ( self . _ctx , destts ) )
return _java2py ( self . _ctx , result . toArray ( ) )
|
def is_unstructured ( self , var ) :
"""Test if a variable is on an unstructered grid
Parameters
% ( CFDecoder . is _ triangular . parameters ) s
Returns
% ( CFDecoder . is _ triangular . returns ) s
Notes
Currently this is the same as : meth : ` is _ triangular ` method , but may
change in the future to support hexagonal grids"""
|
if str ( var . attrs . get ( 'grid_type' ) ) == 'unstructured' :
return True
xcoord = self . get_x ( var )
if xcoord is not None :
bounds = self . _get_coord_cell_node_coord ( xcoord )
if bounds is not None and bounds . shape [ - 1 ] > 2 :
return True
|
def assign_indent_numbers ( lst , inum , dic = collections . defaultdict ( int ) ) :
"""Associate keywords with their respective indentation numbers"""
|
for i in lst :
dic [ i ] = inum
return dic
|
def render_pull_base_image ( self ) :
"""Configure pull _ base _ image"""
|
phase = 'prebuild_plugins'
plugin = 'pull_base_image'
if self . user_params . parent_images_digests . value :
self . pt . set_plugin_arg ( phase , plugin , 'parent_images_digests' , self . user_params . parent_images_digests . value )
|
def _bsecurate_cli_elements_in_files ( args ) :
'''Handles the elements - in - files subcommand'''
|
data = curate . elements_in_files ( args . files )
return '\n' . join ( format_columns ( data . items ( ) ) )
|
def validate ( request : Union [ Dict , List ] , schema : dict ) -> Union [ Dict , List ] :
"""Wraps jsonschema . validate , returning the same object passed in .
Args :
request : The deserialized - from - json request .
schema : The jsonschema schema to validate against .
Raises :
jsonschema . ValidationError"""
|
jsonschema_validate ( request , schema )
return request
|
def film_search ( self , title ) :
"""film search using fuzzy matching"""
|
films = [ ]
# check for cache or update
if not hasattr ( self , 'film_list' ) :
self . get_film_list ( )
# iterate over films and check for fuzzy string match
for film in self . film_list :
strength = WRatio ( title , film [ 'title' ] )
if strength > 80 :
film . update ( { u'strength' : strength } )
films . append ( film )
# sort films by the strength of the fuzzy string match
films_sorted = sorted ( films , key = itemgetter ( 'strength' ) , reverse = True )
return films_sorted
|
def errbackForName ( self , instance , commandName , errorName ) :
"""Retrieve an errback - a callable object that accepts a L { Failure } as an
argument - that is exposed on the given instance , given an AMP
commandName and a name in that command ' s error mapping ."""
|
return super ( _AMPErrorExposer , self ) . get ( instance , ( commandName , errorName ) )
|
def remove_vip ( self , vip_request_ids ) :
"""Method to delete vip request
param vip _ request _ ids : vip _ request ids"""
|
uri = 'api/v3/vip-request/deploy/%s/' % vip_request_ids
return super ( ApiVipRequest , self ) . delete ( uri )
|
def list_members ( context , request ) :
"""Return the list of users in the group ."""
|
members = context . members ( )
return { 'users' : [ { 'username' : m . identifier , 'userid' : m . userid , 'roles' : context . get_member_roles ( m . userid ) , 'links' : [ rellink ( m , request ) ] } for m in members ] }
|
def get_minions ( ) :
'''Return a list of minions'''
|
log . debug ( 'sqlite3 returner <get_minions> called' )
conn = _get_conn ( ret = None )
cur = conn . cursor ( )
sql = '''SELECT DISTINCT id FROM salt_returns'''
cur . execute ( sql )
data = cur . fetchall ( )
ret = [ ]
for minion in data :
ret . append ( minion [ 0 ] )
_close_conn ( conn )
return ret
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.