signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def show ( config_file = False ) :
'''Return a list of sysctl parameters for this minion
CLI Example :
. . code - block : : bash
salt ' * ' sysctl . show''' | roots = ( 'kern' , 'vm' , 'vfs' , 'net' , 'hw' , 'machdep' , 'user' , 'ddb' , 'proc' , 'emul' , 'security' , 'init' )
cmd = 'sysctl -ae'
ret = { }
out = __salt__ [ 'cmd.run' ] ( cmd , output_loglevel = 'trace' )
comps = [ '' ]
for line in out . splitlines ( ) :
if any ( [ line . startswith ( '{0}.' . format ( root ) ) for root in roots ] ) :
comps = re . split ( '[=:]' , line , 1 )
ret [ comps [ 0 ] ] = comps [ 1 ]
elif comps [ 0 ] :
ret [ comps [ 0 ] ] += '{0}\n' . format ( line )
else :
continue
return ret |
def _hash_task ( task ) :
"""Returns a unique hash for identify a task and its params""" | params = task . get ( "params" )
if params :
params = json . dumps ( sorted ( list ( task [ "params" ] . items ( ) ) , key = lambda x : x [ 0 ] ) )
# pylint : disable = no - member
full = [ str ( task . get ( x ) ) for x in [ "path" , "interval" , "dailytime" , "weekday" , "monthday" , "queue" ] ]
full . extend ( [ str ( params ) ] )
return " " . join ( full ) |
def display_message ( self , clear , beep , timeout , line1 , line2 ) :
"""Display a message on all of the keypads in this area .""" | self . _elk . send ( dm_encode ( self . _index , clear , beep , timeout , line1 , line2 ) ) |
def set_build_image ( self ) :
"""Overrides build _ image for worker , to be same as in orchestrator build""" | current_platform = platform . processor ( )
orchestrator_platform = current_platform or 'x86_64'
current_buildimage = self . get_current_buildimage ( )
for plat , build_image in self . build_image_override . items ( ) :
self . log . debug ( 'Overriding build image for %s platform to %s' , plat , build_image )
self . build_image_digests [ plat ] = build_image
manifest_list_platforms = self . platforms - set ( self . build_image_override . keys ( ) )
if not manifest_list_platforms :
self . log . debug ( 'Build image override used for all platforms, ' 'skipping build image manifest list checks' )
return
# orchestrator platform is same as platform on which we want to built on ,
# so we can use the same image
if manifest_list_platforms == set ( [ orchestrator_platform ] ) :
self . build_image_digests [ orchestrator_platform ] = current_buildimage
return
# BuildConfig exists
build_image , imagestream = self . get_image_info_from_buildconfig ( )
if not ( build_image or imagestream ) : # get image build from build metadata , which is set for direct builds
# this is explicitly set by osbs - client , it isn ' t default OpenShift behaviour
build_image , imagestream = self . get_image_info_from_annotations ( )
# if imageStream is used
if imagestream :
build_image = self . get_build_image_from_imagestream ( imagestream )
# we have build _ image with tag , so we can check for manifest list
if build_image :
self . check_manifest_list ( build_image , orchestrator_platform , manifest_list_platforms , current_buildimage ) |
def parse_coach_ec_df ( infile ) :
"""Parse the EC . dat output file of COACH and return a dataframe of results
EC . dat contains the predicted EC number and active residues .
The columns are : PDB _ ID , TM - score , RMSD , Sequence identity ,
Coverage , Confidence score , EC number , and Active site residues
Args :
infile ( str ) : Path to EC . dat
Returns :
DataFrame : Pandas DataFrame summarizing EC number predictions""" | ec_df = pd . read_table ( infile , delim_whitespace = True , names = [ 'pdb_template' , 'tm_score' , 'rmsd' , 'seq_ident' , 'seq_coverage' , 'c_score' , 'ec_number' , 'binding_residues' ] )
ec_df [ 'pdb_template_id' ] = ec_df [ 'pdb_template' ] . apply ( lambda x : x [ : 4 ] )
ec_df [ 'pdb_template_chain' ] = ec_df [ 'pdb_template' ] . apply ( lambda x : x [ 4 ] )
ec_df = ec_df [ [ 'pdb_template_id' , 'pdb_template_chain' , 'tm_score' , 'rmsd' , 'seq_ident' , 'seq_coverage' , 'c_score' , 'ec_number' , 'binding_residues' ] ]
ec_df [ 'c_score' ] = pd . to_numeric ( ec_df . c_score , errors = 'coerce' )
return ec_df |
def noisy_moment ( self , moment : 'cirq.Moment' , system_qubits : Sequence [ 'cirq.Qid' ] ) -> 'cirq.OP_TREE' :
"""Adds noise to the operations from a moment .
Args :
moment : The moment to add noise to .
system _ qubits : A list of all qubits in the system .
Returns :
An OP _ TREE corresponding to the noisy operations for the moment .""" | if not hasattr ( self . noisy_moments , '_not_overridden' ) :
return self . noisy_moments ( [ moment ] , system_qubits )
if not hasattr ( self . noisy_operation , '_not_overridden' ) :
return [ self . noisy_operation ( op ) for op in moment ]
assert False , 'Should be unreachable.' |
def create_translate_dictionaries ( symbols ) :
u"""create translate dictionaries for text , google , docomo , kddi and softbank via ` symbols `
create dictionaries for translate emoji character to carrier from unicode ( forward ) or to unicode from carrier ( reverse ) .
method return dictionary instance which key is carrier name and value format is ` ( forward _ dictionary , reverse _ dictionary ) `
each dictionary expect ` unicode ` format . any text not decoded have to be decode before using this dictionary ( like matching key )
DO NOT CONFUSE with carrier ' s UNICODE emoji . UNICODE emoji like ` u " \uE63E " ` for DoCoMo ' s sun emoji is not expected . expected character
for DoCoMo ' s sun is decoded character from ` " \xF8 \x9F " ` ( actually decoded unicode of ` " \xF8 \xF9 " ` is ` u " \uE63E " ` however not all emoji
can convert with general encode / decode method . conversion of UNICODE < - > ShiftJIS is operated in Symbol constructor and stored in Symbol ' s ` sjis `
attribute and unicode formatted is ` usjis ` attribute . )""" | unicode_to_text = { }
unicode_to_docomo_img = { }
unicode_to_kddi_img = { }
unicode_to_softbank_img = { }
unicode_to_google = { }
unicode_to_docomo = { }
unicode_to_kddi = { }
unicode_to_softbank = { }
google_to_unicode = { }
docomo_to_unicode = { }
kddi_to_unicode = { }
softbank_to_unicode = { }
for x in symbols :
if x . unicode . keyable :
unicode_to_text [ unicode ( x . unicode ) ] = x . unicode . fallback
unicode_to_docomo_img [ unicode ( x . unicode ) ] = x . docomo . thumbnail
unicode_to_kddi_img [ unicode ( x . unicode ) ] = x . kddi . thumbnail
unicode_to_softbank_img [ unicode ( x . unicode ) ] = x . softbank . thumbnail
unicode_to_google [ unicode ( x . unicode ) ] = unicode ( x . google )
unicode_to_docomo [ unicode ( x . unicode ) ] = unicode ( x . docomo )
unicode_to_kddi [ unicode ( x . unicode ) ] = unicode ( x . kddi )
unicode_to_softbank [ unicode ( x . unicode ) ] = unicode ( x . softbank )
if x . google . keyable :
google_to_unicode [ unicode ( x . google ) ] = unicode ( x . unicode )
if x . docomo . keyable :
docomo_to_unicode [ unicode ( x . docomo ) ] = unicode ( x . unicode )
if x . kddi . keyable :
kddi_to_unicode [ unicode ( x . kddi ) ] = unicode ( x . unicode )
if x . softbank . keyable :
softbank_to_unicode [ unicode ( x . softbank ) ] = unicode ( x . unicode )
return { # forward reverse
'text' : ( None , unicode_to_text ) , 'docomo_img' : ( None , unicode_to_docomo_img ) , 'kddi_img' : ( None , unicode_to_kddi_img ) , 'softbank_img' : ( None , unicode_to_softbank_img ) , 'google' : ( google_to_unicode , unicode_to_google ) , 'docomo' : ( docomo_to_unicode , unicode_to_docomo ) , 'kddi' : ( kddi_to_unicode , unicode_to_kddi ) , 'softbank' : ( softbank_to_unicode , unicode_to_softbank ) , } |
def timeit ( func ) :
"""Simple decorator to time functions
: param func : function to decorate
: type func : callable
: return : wrapped function
: rtype : callable""" | @ wraps ( func )
def _wrapper ( * args , ** kwargs ) :
start = time . time ( )
result = func ( * args , ** kwargs )
elapsed = time . time ( ) - start
LOGGER . info ( '%s took %s seconds to complete' , func . __name__ , round ( elapsed , 2 ) )
return result
return _wrapper |
def stop_executing_svc_checks ( self ) :
"""Disable service check execution ( globally )
Format of the line that triggers function call : :
STOP _ EXECUTING _ SVC _ CHECKS
: return : None""" | if self . my_conf . execute_service_checks :
self . my_conf . modified_attributes |= DICT_MODATTR [ "MODATTR_ACTIVE_CHECKS_ENABLED" ] . value
self . my_conf . execute_service_checks = False
self . my_conf . explode_global_conf ( )
self . daemon . update_program_status ( ) |
def use_file ( self , enabled = True , file_name = None , level = logging . WARNING , when = 'd' , interval = 1 , backup_count = 30 , delay = False , utc = False , at_time = None , log_format = None , date_format = None ) :
"""Handler for logging to a file , rotating the log file at certain timed intervals .""" | if enabled :
if not self . __file_handler :
assert file_name , 'File name is missing!'
# Create new TimedRotatingFileHandler instance
kwargs = { 'filename' : file_name , 'when' : when , 'interval' : interval , 'backupCount' : backup_count , 'encoding' : 'UTF-8' , 'delay' : delay , 'utc' : utc , }
if sys . version_info [ 0 ] >= 3 :
kwargs [ 'atTime' ] = at_time
self . __file_handler = TimedRotatingFileHandler ( ** kwargs )
# Use this format for default case
if not log_format :
log_format = '%(asctime)s %(name)s[%(process)d] ' '%(programname)s/%(module)s/%(funcName)s[%(lineno)d] ' '%(levelname)s %(message)s'
# Set formatter
formatter = logging . Formatter ( fmt = log_format , datefmt = date_format )
self . __file_handler . setFormatter ( fmt = formatter )
# Set level for this handler
self . __file_handler . setLevel ( level = level )
# Add this handler to logger
self . add_handler ( hdlr = self . __file_handler )
elif self . __file_handler : # Remove handler from logger
self . remove_handler ( hdlr = self . __file_handler )
self . __file_handler = None |
import re
pattern = '^[a-z]$|^([a-z]).*\\1$'
def validate_string ( s ) :
"""Function to validate whether the input string begins and ends with the same character using regular expressions .
Example :
> > > validate _ string ( ' mom ' )
' Valid '
> > > validate _ string ( ' z ' )
' Valid '
> > > validate _ string ( ' cars ' )
' Invalid '""" | if re . match ( pattern , s ) :
return 'Valid'
else :
return 'Invalid' |
def _section_from_spec ( elf_file , spec ) :
'''Retrieve a section given a " spec " ( either number or name ) .
Return None if no such section exists in the file .''' | if isinstance ( spec , int ) :
num = int ( spec )
if num < elf_file . num_sections ( ) :
return elf_file . get_section ( num )
# Not a number . Must be a name then
if isinstance ( spec , str ) :
try :
return elf_file . get_section_by_name ( spec )
except AttributeError :
return None |
def wait_time ( self , value ) :
"""Setter for * * self . _ _ wait _ time * * attribute .
: param value : Attribute value .
: type value : int or float""" | if value is not None :
assert type ( value ) in ( int , float ) , "'{0}' attribute: '{1}' type is not 'int' or 'float'!" . format ( "wait_time" , value )
assert value >= 0 , "'{0}' attribute: '{1}' need to be positive!" . format ( "wait_time" , value )
self . __wait_time = value |
def compute_radius ( wcs ) :
"""Compute the radius from the center to the furthest edge of the WCS .""" | ra , dec = wcs . wcs . crval
img_center = SkyCoord ( ra = ra * u . degree , dec = dec * u . degree )
wcs_foot = wcs . calc_footprint ( )
img_corners = SkyCoord ( ra = wcs_foot [ : , 0 ] * u . degree , dec = wcs_foot [ : , 1 ] * u . degree )
radius = img_center . separation ( img_corners ) . max ( ) . value
return radius |
def set_pixel ( self , x , y , state ) :
"""Set pixel at " x , y " to " state " where state can be one of " ON " , " OFF "
or " TOGGLE " """ | self . send_cmd ( "P" + str ( x + 1 ) + "," + str ( y + 1 ) + "," + state ) |
def map ( self , func : Callable [ [ Tuple [ Any , Log ] ] , Tuple [ Any , Log ] ] ) -> 'Writer' :
"""Map a function func over the Writer value .
Haskell :
fmap f m = Writer $ let ( a , w ) = runWriter m in ( f a , w )
Keyword arguments :
func - - Mapper function :""" | a , w = self . run ( )
b , _w = func ( ( a , w ) )
return Writer ( b , _w ) |
def match_function_pattern ( self , first , rest = None , least = 1 , offset = 0 ) :
"""Match each char sequentially from current SourceString position
until the pattern doesnt match and return all maches .
Integer argument least defines and minimum amount of chars that can
be matched .
This version takes functions instead of string patterns .
Each function must take one argument , a string , and return a
value that can be evauluated as True or False .
If rest is defined then first is used only to match the first arg
and the rest of the chars are matched against rest .""" | if not self . has_space ( offset = offset ) :
return ''
firstchar = self . string [ self . pos + offset ]
if not first ( firstchar ) :
return ''
output = [ firstchar ]
pattern = first if rest is None else rest
for char in self . string [ self . pos + offset + 1 : ] :
if pattern ( char ) :
output . append ( char )
else :
break
if len ( output ) < least :
return ''
return '' . join ( output ) |
def _resolve ( self , path , migration_file ) :
"""Resolve a migration instance from a file .
: param migration _ file : The migration file
: type migration _ file : str
: rtype : eloquent . migrations . migration . Migration""" | variables = { }
name = '_' . join ( migration_file . split ( '_' ) [ 4 : ] )
migration_file = os . path . join ( path , '%s.py' % migration_file )
with open ( migration_file ) as fh :
exec ( fh . read ( ) , { } , variables )
klass = variables [ inflection . camelize ( name ) ]
instance = klass ( )
instance . set_schema_builder ( self . get_repository ( ) . get_connection ( ) . get_schema_builder ( ) )
return instance |
def validate_get_dbs ( connection ) :
"""validates the connection object is capable of read access to rethink
should be at least one test database by default
: param connection : < rethinkdb . net . DefaultConnection >
: return : < set > list of databases
: raises : ReqlDriverError AssertionError""" | remote_dbs = set ( rethinkdb . db_list ( ) . run ( connection ) )
assert remote_dbs
return remote_dbs |
def componentSelection ( self , comp ) :
"""Toggles the selection of * comp * from the currently active parameter""" | # current row which is selected in auto parameters to all component selection to
indexes = self . selectedIndexes ( )
index = indexes [ 0 ]
self . model ( ) . toggleSelection ( index , comp ) |
def device_not_active ( self , addr ) :
"""Handle inactive devices .""" | self . aldb_device_handled ( addr )
for callback in self . _cb_device_not_active :
callback ( addr ) |
def _read_coord_h5 ( files , shapes , header , twod ) :
"""Read all coord hdf5 files of a snapshot .
Args :
files ( list of pathlib . Path ) : list of NodeCoordinates files of
a snapshot .
shapes ( list of ( int , int ) ) : shape of mesh grids .
header ( dict ) : geometry info .
twod ( str ) : ' XZ ' , ' YZ ' or None depending on what is relevant .""" | meshes = [ ]
for h5file , shape in zip ( files , shapes ) :
meshes . append ( { } )
with h5py . File ( h5file , 'r' ) as h5f :
for coord , mesh in h5f . items ( ) : # for some reason , the array is transposed !
meshes [ - 1 ] [ coord ] = mesh [ ( ) ] . reshape ( shape ) . T
meshes [ - 1 ] [ coord ] = _make_3d ( meshes [ - 1 ] [ coord ] , twod )
header [ 'ncs' ] = _ncores ( meshes , twod )
header [ 'nts' ] = list ( ( meshes [ 0 ] [ 'X' ] . shape [ i ] - 1 ) * header [ 'ncs' ] [ i ] for i in range ( 3 ) )
header [ 'nts' ] = np . array ( [ max ( 1 , val ) for val in header [ 'nts' ] ] )
# meshes could also be defined in legacy parser , so that these can be used
# in geometry setup
meshes = _conglomerate_meshes ( meshes , header )
if np . any ( meshes [ 'Z' ] [ : , : , 0 ] != 0 ) : # spherical
header [ 'x_mesh' ] = np . copy ( meshes [ 'Y' ] )
# annulus geometry . . .
header [ 'y_mesh' ] = np . copy ( meshes [ 'Z' ] )
header [ 'z_mesh' ] = np . copy ( meshes [ 'X' ] )
header [ 'r_mesh' ] = np . sqrt ( header [ 'x_mesh' ] ** 2 + header [ 'y_mesh' ] ** 2 + header [ 'z_mesh' ] ** 2 )
header [ 't_mesh' ] = np . arccos ( header [ 'z_mesh' ] / header [ 'r_mesh' ] )
header [ 'p_mesh' ] = np . roll ( np . arctan2 ( header [ 'y_mesh' ] , - header [ 'x_mesh' ] ) + np . pi , - 1 , 1 )
header [ 'e1_coord' ] = header [ 't_mesh' ] [ : , 0 , 0 ]
header [ 'e2_coord' ] = header [ 'p_mesh' ] [ 0 , : , 0 ]
header [ 'e3_coord' ] = header [ 'r_mesh' ] [ 0 , 0 , : ]
else :
header [ 'e1_coord' ] = meshes [ 'X' ] [ : , 0 , 0 ]
header [ 'e2_coord' ] = meshes [ 'Y' ] [ 0 , : , 0 ]
header [ 'e3_coord' ] = meshes [ 'Z' ] [ 0 , 0 , : ]
header [ 'aspect' ] = ( header [ 'e1_coord' ] [ - 1 ] - header [ 'e2_coord' ] [ 0 ] , header [ 'e1_coord' ] [ - 1 ] - header [ 'e2_coord' ] [ 0 ] )
header [ 'rcmb' ] = header [ 'e3_coord' ] [ 0 ]
if header [ 'rcmb' ] == 0 :
header [ 'rcmb' ] = - 1
else : # could make the difference between r _ coord and z _ coord
header [ 'e3_coord' ] = header [ 'e3_coord' ] - header [ 'rcmb' ]
if twod is None or 'X' in twod :
header [ 'e1_coord' ] = header [ 'e1_coord' ] [ : - 1 ]
if twod is None or 'Y' in twod :
header [ 'e2_coord' ] = header [ 'e2_coord' ] [ : - 1 ]
header [ 'e3_coord' ] = header [ 'e3_coord' ] [ : - 1 ] |
def remove_align_qc_tools ( data ) :
"""Remove alignment based QC tools we don ' t need for data replicates .
When we do multiple variant calling on a sample file ( somatic / germline ) ,
avoid re - running QC .""" | align_qc = set ( [ "qsignature" , "coverage" , "picard" , "samtools" , "fastqc" ] )
data [ "config" ] [ "algorithm" ] [ "qc" ] = [ t for t in dd . get_algorithm_qc ( data ) if t not in align_qc ]
return data |
def _set_current_page ( self , current_page , last_page ) :
"""Get the current page for the request .
: param current _ page : The current page of results
: type current _ page : int
: param last _ page : The last page of results
: type last _ page : int
: rtype : int""" | if not current_page :
current_page = self . resolve_current_page ( )
if current_page > last_page :
if last_page > 0 :
return last_page
return 1
if not self . _is_valid_page_number ( current_page ) :
return 1
return current_page |
def run_hook ( self , app : FlaskUnchained , bundles : List [ Bundle ] ) :
"""Hook entry point . Override to disable standard behavior of iterating
over bundles to discover objects and processing them .""" | self . process_objects ( app , self . collect_from_bundles ( bundles ) ) |
def get_file_size ( path ) :
"""The the size of a file in bytes .
Parameters
path : str
The path of the file .
Returns
int
The size of the file in bytes .
Raises
IOError
If the file does not exist .
OSError
If a file system error occurs .""" | assert isinstance ( path , ( str , _oldstr ) )
if not os . path . isfile ( path ) :
raise IOError ( 'File "%s" does not exist.' , path )
return os . path . getsize ( path ) |
def to_positive_multiple_of_10 ( string ) :
"""Converts a string into its encoded positive integer ( greater zero ) or throws an exception .""" | try :
value = int ( string )
except ValueError :
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse . ArgumentTypeError ( msg )
if value <= 0 or value % 10 != 0 :
msg = '"%r" is not a positive multiple of 10 (greater zero).' % string
raise argparse . ArgumentTypeError ( msg )
return value |
def get_proteins_from_psm ( line ) :
"""From a line , return list of proteins reported by Mzid2TSV . When unrolled
lines are given , this returns the single protein from the line .""" | proteins = line [ mzidtsvdata . HEADER_PROTEIN ] . split ( ';' )
outproteins = [ ]
for protein in proteins :
prepost_protein = re . sub ( '\(pre=.*post=.*\)' , '' , protein ) . strip ( )
outproteins . append ( prepost_protein )
return outproteins |
def dict ( self ) :
"""Returns dictionary of post fields and attributes""" | post_dict = { 'id' : self . id , 'link' : self . link , 'permalink' : self . permalink , 'content_type' : self . content_type , 'slug' : self . slug , 'updated' : self . updated , # . strftime ( conf . GOSCALE _ ATOM _ DATETIME _ FORMAT ) ,
'published' : self . published , # . strftime ( conf . GOSCALE _ ATOM _ DATETIME _ FORMAT ) ,
'title' : self . title , 'description' : self . description , 'author' : self . author , 'categories' : self . categories [ 1 : - 1 ] . split ( ',' ) if self . categories else None , 'summary' : self . summary , }
if self . attributes :
attributes = simplejson . loads ( self . attributes )
post_dict . update ( attributes )
return post_dict |
def normalizeX ( value ) :
"""Normalizes x coordinate .
* * * value * * must be an : ref : ` type - int - float ` .
* Returned value is the same type as the input value .""" | if not isinstance ( value , ( int , float ) ) :
raise TypeError ( "X coordinates must be instances of " ":ref:`type-int-float`, not %s." % type ( value ) . __name__ )
return value |
def shutdown ( server , graceful = True ) :
"""Shut down the application .
If a graceful stop is requested , waits for all of the IO loop ' s
handlers to finish before shutting down the rest of the process .
We impose a 10 second timeout .
Based on http : / / tornadogists . org / 3428652/""" | ioloop = IOLoop . instance ( )
logging . info ( "Stopping server..." )
# Stop listening for new connections
server . stop ( )
def final_stop ( ) :
ioloop . stop ( )
logging . info ( "Stopped." )
sys . exit ( 0 )
def poll_stop ( counts = { 'remaining' : None , 'previous' : None } ) :
remaining = len ( ioloop . _handlers )
counts [ 'remaining' ] , counts [ 'previous' ] = remaining , counts [ 'remaining' ]
previous = counts [ 'previous' ]
# Wait until we only have only one IO handler remaining . That
# final handler will be our PeriodicCallback polling task .
if remaining == 1 :
final_stop ( )
if previous is None or remaining != previous :
logging . info ( "Waiting on IO %d remaining handlers" , remaining )
if graceful : # Callback to check on remaining handlers .
poller = PeriodicCallback ( poll_stop , 250 , io_loop = ioloop )
poller . start ( )
# Give up after 10 seconds of waiting .
ioloop . add_timeout ( time . time ( ) + 10 , final_stop )
else :
final_stop ( ) |
def create ( cls , exiter , args , env , target_roots = None , daemon_graph_session = None , options_bootstrapper = None ) :
"""Creates a new LocalPantsRunner instance by parsing options .
: param Exiter exiter : The Exiter instance to use for this run .
: param list args : The arguments ( e . g . sys . argv ) for this run .
: param dict env : The environment ( e . g . os . environ ) for this run .
: param TargetRoots target _ roots : The target roots for this run .
: param LegacyGraphSession daemon _ graph _ session : The graph helper for this session .
: param OptionsBootstrapper options _ bootstrapper : The OptionsBootstrapper instance to reuse .""" | build_root = get_buildroot ( )
options , build_config , options_bootstrapper = cls . parse_options ( args , env , setup_logging = True , options_bootstrapper = options_bootstrapper , )
global_options = options . for_global_scope ( )
# Option values are usually computed lazily on demand ,
# but command line options are eagerly computed for validation .
for scope in options . scope_to_flags . keys ( ) :
options . for_scope ( scope )
# Verify configs .
if global_options . verify_config :
options_bootstrapper . verify_configs_against_options ( options )
# If we ' re running with the daemon , we ' ll be handed a session from the
# resident graph helper - otherwise initialize a new one here .
graph_session = cls . _maybe_init_graph_session ( daemon_graph_session , options_bootstrapper , build_config , options )
target_roots = cls . _maybe_init_target_roots ( target_roots , graph_session , options , build_root )
profile_path = env . get ( 'PANTS_PROFILE' )
return cls ( build_root , exiter , options , options_bootstrapper , build_config , target_roots , graph_session , daemon_graph_session is not None , profile_path ) |
def _get_library_search_paths ( ) :
"""Returns a list of library search paths , considering of the current working
directory , default paths and paths from environment variables .""" | search_paths = [ '' , '/usr/lib64' , '/usr/local/lib64' , '/usr/lib' , '/usr/local/lib' , '/run/current-system/sw/lib' , '/usr/lib/x86_64-linux-gnu/' , os . path . abspath ( os . path . dirname ( __file__ ) ) ]
if sys . platform == 'darwin' :
path_environment_variable = 'DYLD_LIBRARY_PATH'
else :
path_environment_variable = 'LD_LIBRARY_PATH'
if path_environment_variable in os . environ :
search_paths . extend ( os . environ [ path_environment_variable ] . split ( ':' ) )
return search_paths |
def emitResetAxisSignal ( self , axisNumber ) :
"""Emits the sigResetAxis with the axisNumber as parameter
axisNumber should be 0 for X , 1 for Y , and 2 for both axes .""" | assert axisNumber in ( VALID_AXES_NUMBERS ) , "Axis Nr should be one of {}, got {}" . format ( VALID_AXES_NUMBERS , axisNumber )
# Hide ' auto - scale ( A ) ' button
logger . debug ( "ArgosPgPlotItem.autoBtnClicked, mode:{}" . format ( self . autoBtn . mode ) )
if self . autoBtn . mode == 'auto' :
self . autoBtn . hide ( )
else : # Does this occur ?
msg = "Unexpected autobutton mode: {}" . format ( self . autoBtn . mode )
if DEBUGGING :
raise ValueError ( msg )
else :
logger . warn ( msg )
logger . debug ( "Emitting sigAxisReset({}) for {!r}" . format ( axisNumber , self ) )
self . sigAxisReset . emit ( axisNumber ) |
def add ( self , date_range , library_name ) :
"""Adds the library with the given date range to the underlying collection of libraries used by this store .
The underlying libraries should not overlap as the date ranges are assumed to be CLOSED _ CLOSED by this function
and the rest of the class .
Arguments :
date _ range : A date range provided on the assumption that it is CLOSED _ CLOSED . If for example the underlying
libraries were split by year , the start of the date range would be datetime . datetime ( year , 1 , 1 ) and the end
would be datetime . datetime ( year , 12 , 31 , 23 , 59 , 59 , 999000 ) . The date range must fall on UTC day boundaries ,
that is the start must be add midnight and the end must be 1 millisecond before midnight .
library _ name : The name of the underlying library . This must be the name of a valid Arctic library""" | # check that the library is valid
try :
self . _arctic_lib . arctic [ library_name ]
except Exception as e :
logger . error ( "Could not load library" )
raise e
assert date_range . start and date_range . end , "Date range should have start and end properties {}" . format ( date_range )
start = date_range . start . astimezone ( mktz ( 'UTC' ) ) if date_range . start . tzinfo is not None else date_range . start . replace ( tzinfo = mktz ( 'UTC' ) )
end = date_range . end . astimezone ( mktz ( 'UTC' ) ) if date_range . end . tzinfo is not None else date_range . end . replace ( tzinfo = mktz ( 'UTC' ) )
assert start . time ( ) == time . min and end . time ( ) == end_time_min , "Date range should fall on UTC day boundaries {}" . format ( date_range )
# check that the date range does not overlap
library_metadata = self . _get_library_metadata ( date_range )
if len ( library_metadata ) > 1 or ( len ( library_metadata ) == 1 and library_metadata [ 0 ] != library_name ) :
raise OverlappingDataException ( """There are libraries that overlap with the date range:
library: {}
overlapping libraries: {}""" . format ( library_name , [ l . library for l in library_metadata ] ) )
self . _collection . update_one ( { 'library_name' : library_name } , { '$set' : { 'start' : start , 'end' : end } } , upsert = True ) |
def _validate_iss ( claims , issuer = None ) :
"""Validates that the ' iss ' claim is valid .
The " iss " ( issuer ) claim identifies the principal that issued the
JWT . The processing of this claim is generally application specific .
The " iss " value is a case - sensitive string containing a StringOrURI
value . Use of this claim is OPTIONAL .
Args :
claims ( dict ) : The claims dictionary to validate .
issuer ( str or iterable ) : Acceptable value ( s ) for the issuer that
signed the token .""" | if issuer is not None :
if isinstance ( issuer , string_types ) :
issuer = ( issuer , )
if claims . get ( 'iss' ) not in issuer :
raise JWTClaimsError ( 'Invalid issuer' ) |
def factory_reset ( self , ids , except_ids = False , except_baudrate_and_ids = False ) :
"""Reset all motors on the bus to their factory default settings .""" | mode = ( 0x02 if except_baudrate_and_ids else 0x01 if except_ids else 0xFF )
for id in ids :
try :
self . _send_packet ( self . _protocol . DxlResetPacket ( id , mode ) )
except ( DxlTimeoutError , DxlCommunicationError ) :
pass |
def get_user_by_auth_token ( token = None ) :
"""Return the AuthUser associated to the token , otherwise it will return None .
If token is not provided , it will pull it from the headers : Authorization
Exception :
Along with AuthError , it may
: param token :
: return : AuthUser""" | if not token :
token = request . get_auth_token ( )
secret_key = get_jwt_secret ( )
s = utils . unsign_jwt ( token = token , secret_key = secret_key , salt = get_jwt_salt ( ) )
if "id" not in s :
raise exceptions . AuthError ( "Invalid Authorization Bearer Token" )
return get_user_by_id ( int ( s [ "id" ] ) ) |
def dna ( self , dna ) :
"""Replace this chromosome ' s DNA with new DNA of equal length ,
assigning the new DNA to the chromosome ' s genes sequentially .
For example , if a chromosome contains these genes . . .
1 . 100100
2 . 011011
. . . and the new DNA is 1111100000 , the genes become :
1 . 11111
2 . 00000""" | assert self . length == len ( dna )
i = 0
for gene in self . genes :
gene . dna = dna [ i : i + gene . length ]
i += gene . length |
def _parse_entry ( self , cols ) :
"""Parses an entry ' s row and adds the result to py : attr : ` entries ` .
Parameters
cols : : class : ` bs4 . ResultSet `
The list of columns for that entry .""" | rank , name , vocation , * values = [ c . text . replace ( '\xa0' , ' ' ) . strip ( ) for c in cols ]
rank = int ( rank )
if self . category == Category . EXPERIENCE or self . category == Category . LOYALTY_POINTS :
extra , value = values
else :
value , * extra = values
value = int ( value . replace ( ',' , '' ) )
if self . category == Category . EXPERIENCE :
entry = ExpHighscoresEntry ( name , rank , vocation , value , int ( extra ) )
elif self . category == Category . LOYALTY_POINTS :
entry = LoyaltyHighscoresEntry ( name , rank , vocation , value , extra )
else :
entry = HighscoresEntry ( name , rank , vocation , value )
self . entries . append ( entry ) |
def static2dplot_timeaveraged ( var , time ) :
"""If the static _ taverage option is set in tplot , and is supplied with a time range , then the spectrogram
plot ( s ) for which it is set will have another window pop up , where the displayed y and z values are
averaged by the number of seconds between the specified time range .""" | # Grab names of data loaded in as tplot variables .
names = list ( pytplot . data_quants . keys ( ) )
# Get data we ' ll actually work with here .
valid_variables = tplot_utilities . get_data ( names )
# Don ' t plot anything unless we have spectrograms with which to work .
if valid_variables : # Get z label
labels = tplot_utilities . get_labels_axis_types ( names )
# Put together data in easy - to - access format for plots .
data = { }
for name in valid_variables :
bins = tplot_utilities . get_bins ( name )
time_values , z_values = tplot_utilities . get_z_t_values ( name )
data [ name ] = [ bins , z_values , time_values ]
# Set up the 2D static plot
pytplot . static_tavg_window = pg . GraphicsWindow ( )
pytplot . static_tavg_window . resize ( 1000 , 600 )
pytplot . static_tavg_window . setWindowTitle ( 'Time-Averaged Values Static Window' )
plot = pytplot . static_tavg_window . addPlot ( title = '2D Static Plot for Time-Averaged Values' , row = 0 , col = 0 )
# Make it so that whenever this first starts up , you just have an empty plot
plot_data = plot . plot ( [ ] , [ ] )
if var in valid_variables : # Get min / max values of data ' s time range ( in both datetime and seconds since epoch )
t_min = np . nanmin ( time_values )
t_min_str = tplot_utilities . int_to_str ( np . nanmin ( time_values ) )
t_min_conv_back = tplot_utilities . str_to_int ( t_min_str )
t_max = np . nanmax ( time_values )
t_max_str = tplot_utilities . int_to_str ( np . nanmax ( time_values ) )
t_max_conv_back = tplot_utilities . str_to_int ( t_max_str )
# Convert user input to seconds since epoch
user_time = [ tplot_utilities . str_to_int ( i ) for i in time ]
# Covering situation where user entered a time not in the dataset !
# As long as they used a time in the dataset , this will not trigger .
for t , datetime in enumerate ( user_time ) :
if datetime not in range ( t_min_conv_back , t_max_conv_back + 1 ) :
while True :
try :
if t == 0 :
time_bound = 'left bound'
else :
time_bound = 'right bound'
user_time [ t ] = tplot_utilities . str_to_int ( input ( 'Chosen {} time [{}] not in range of data [{} to {}]. Input new time (%Y-%m-%d %H:%M:%S).' . format ( time_bound , tplot_utilities . int_to_str ( datetime ) , t_min_str , t_max_str ) ) )
except ValueError :
continue
else :
if user_time [ t ] not in range ( int ( t_min ) , int ( t_max ) ) :
continue
else :
break
# Get index of the time closest to the user ' s time choice
time_array = np . array ( data [ var ] [ 2 ] )
array = np . asarray ( time_array )
idx = [ ( np . abs ( array - i ) ) . argmin ( ) for i in user_time ]
# Average values based on the chosen time range ' s indices
time_diff = abs ( idx [ 0 ] - idx [ 1 ] )
# Make sure to account for edge problem
if idx [ 1 ] != - 1 :
y_values_slice = data [ name ] [ 1 ] [ idx [ 0 ] : idx [ 1 ] + 1 ]
else :
y_values_slice = data [ name ] [ 1 ] [ idx [ 0 ] : ]
y_values_avgd = np . nansum ( y_values_slice , axis = 0 ) / np . float ( time_diff )
# If user indicated they wanted the interactive plot ' s axes to be logged , log ' em .
# But first make sure that values in x and y are loggable !
x_axis = False
y_axis = False
# Checking x axis
if np . nanmin ( data [ name ] [ 0 ] [ : ] ) < 0 :
print ( 'Negative data is incompatible with log plotting.' )
elif np . nanmin ( data [ name ] [ 0 ] [ : ] ) >= 0 and labels [ name ] [ 2 ] == 'log' :
x_axis = True
# Checking y axis
if np . nanmin ( list ( data [ name ] [ 1 ] [ idx [ 0 ] ] ) ) < 0 or np . nanmin ( list ( data [ name ] [ 1 ] [ idx [ 1 ] ] ) ) < 0 :
print ( 'Negative data is incompatible with log plotting' )
elif np . nanmin ( list ( data [ name ] [ 1 ] [ idx [ 0 ] ] ) ) >= 0 and np . nanmin ( list ( data [ name ] [ 1 ] [ idx [ 1 ] ] ) ) >= 0 and labels [ name ] [ 3 ] == 'log' :
y_axis = True
# Set plot labels
plot . setLabel ( 'bottom' , '{}' . format ( labels [ name ] [ 0 ] ) )
plot . setLabel ( 'left' , '{}' . format ( labels [ name ] [ 1 ] ) )
plot . setLogMode ( x = x_axis , y = y_axis )
# Update x and y range if user modified it
tplot_utilities . set_x_range ( name , x_axis , plot )
tplot_utilities . set_y_range ( name , y_axis , plot )
# Plot data based on time we ' re hovering over
plot_data . setData ( data [ var ] [ 0 ] [ : ] , y_values_avgd ) |
def _close_remaining_channels ( self ) :
"""Forcefully close all open channels .
: return :""" | for channel_id in list ( self . _channels ) :
self . _channels [ channel_id ] . set_state ( Channel . CLOSED )
self . _channels [ channel_id ] . close ( )
self . _cleanup_channel ( channel_id ) |
def _adjust_offset ( self , real_wave_mfcc , algo_parameters ) :
"""OFFSET""" | self . log ( u"Called _adjust_offset" )
self . _apply_offset ( offset = algo_parameters [ 0 ] ) |
def autodiscover ( ) :
"""Taken from ` ` django . contrib . admin . autodiscover ` ` and used to run
any calls to the ` ` processor _ for ` ` decorator .""" | global LOADED
if LOADED :
return
LOADED = True
for app in get_app_name_list ( ) :
try :
module = import_module ( app )
except ImportError :
pass
else :
try :
import_module ( "%s.page_processors" % app )
except :
if module_has_submodule ( module , "page_processors" ) :
raise |
def get_initializer ( default_init_type : str , default_init_scale : float , default_init_xavier_rand_type : str , default_init_xavier_factor_type : str , embed_init_type : str , embed_init_sigma : float , rnn_init_type : str , extra_initializers : Optional [ List [ Tuple [ str , mx . initializer . Initializer ] ] ] = None ) -> mx . initializer . Initializer :
"""Returns a mixed MXNet initializer .
: param default _ init _ type : The default weight initializer type .
: param default _ init _ scale : The scale used for default weight initialization ( only used with uniform initialization ) .
: param default _ init _ xavier _ rand _ type : Xavier random number generator type .
: param default _ init _ xavier _ factor _ type : Xavier factor type .
: param embed _ init _ type : Embedding matrix initialization type .
: param embed _ init _ sigma : Sigma for normal initialization of embedding matrix .
: param rnn _ init _ type : Initialization type for RNN h2h matrices .
: param extra _ initializers : Optional initializers provided from other sources .
: return : Mixed initializer .""" | # default initializer
if default_init_type == C . INIT_XAVIER :
default_init = [ ( C . DEFAULT_INIT_PATTERN , mx . init . Xavier ( rnd_type = default_init_xavier_rand_type , factor_type = default_init_xavier_factor_type , magnitude = default_init_scale ) ) ]
elif default_init_type == C . INIT_UNIFORM :
default_init = [ ( C . DEFAULT_INIT_PATTERN , mx . init . Uniform ( scale = default_init_scale ) ) ]
else :
raise ValueError ( "Unknown default initializer %s." % default_init_type )
# embedding initializer
if embed_init_type == C . EMBED_INIT_NORMAL :
embed_init = [ ( C . EMBED_INIT_PATTERN , mx . init . Normal ( sigma = embed_init_sigma ) ) ]
elif embed_init_type == C . EMBED_INIT_DEFAULT :
embed_init = [ ]
else :
raise ValueError ( 'Unknown embedding initializer: %s' % embed_init_type )
# rnn initializer
if rnn_init_type == C . RNN_INIT_ORTHOGONAL :
rnn_init = [ ( C . RNN_INIT_PATTERN , mx . initializer . Orthogonal ( ) ) ]
elif rnn_init_type == C . RNN_INIT_ORTHOGONAL_STACKED :
rnn_init = [ ( C . RNN_INIT_PATTERN , StackedOrthogonalInit ( scale = 1.0 , rand_type = "eye" ) ) ]
elif rnn_init_type == C . RNN_INIT_DEFAULT :
rnn_init = [ ]
else :
raise ValueError ( 'Unknown RNN initializer: %s' % rnn_init_type )
params_init_pairs = embed_init + rnn_init + default_init
if extra_initializers is not None :
params_init_pairs = extra_initializers + params_init_pairs
return mx . initializer . Mixed ( * zip ( * params_init_pairs ) ) |
def propinfo ( cls , value , prop , visitor ) :
"""Like : py : meth : ` normalize . visitor . VisitorPattern . apply ` , but takes a
property and returns a dict with some basic info . The default
implementation returns just the name of the property and the type in
here .""" | if not prop :
return { "name" : value . __name__ }
rv = { "name" : prop . name }
if prop . valuetype :
if isinstance ( prop . valuetype , tuple ) :
rv [ 'type' ] = [ typ . __name__ for typ in prop . valuetype ]
else :
rv [ 'type' ] = prop . valuetype . __name__
return rv |
def as_hdu ( self ) :
"Return a version of ourself as an astropy . io . fits . PrimaryHDU object" | from astropy . io import fits
# transfer header , preserving ordering
ahdr = self . get_header ( )
header = fits . Header ( ahdr . items ( ) )
data = self . get_mddata ( )
hdu = fits . PrimaryHDU ( data = data , header = header )
return hdu |
def det4D ( m ) :
'''det4D ( array ) yields the determinate of the given matrix array , which may have more than 2
dimensions , in which case the later dimensions are multiplied and added point - wise .''' | # I just solved this in Mathematica , copy - pasted , and replaced the string ' ] m ' with ' ] * m ' :
# Mathematica code : Det @ Table [ m [ i ] [ j ] , { i , 0 , 3 } , { j , 0 , 3 } ]
return ( m [ 0 ] [ 3 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 0 ] - m [ 0 ] [ 2 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 0 ] - m [ 0 ] [ 3 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 0 ] + m [ 0 ] [ 1 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 0 ] + m [ 0 ] [ 2 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 0 ] - m [ 0 ] [ 1 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 0 ] - m [ 0 ] [ 3 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 1 ] + m [ 0 ] [ 2 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 1 ] + m [ 0 ] [ 3 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 1 ] - m [ 0 ] [ 0 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 1 ] - m [ 0 ] [ 2 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 1 ] + m [ 0 ] [ 0 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 1 ] + m [ 0 ] [ 3 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 2 ] - m [ 0 ] [ 1 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 2 ] - m [ 0 ] [ 3 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 2 ] + m [ 0 ] [ 0 ] * m [ 1 ] [ 3 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 2 ] + m [ 0 ] [ 1 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 2 ] - m [ 0 ] [ 0 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 3 ] * m [ 3 ] [ 2 ] - m [ 0 ] [ 2 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 3 ] + m [ 0 ] [ 1 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 0 ] * m [ 3 ] [ 3 ] + m [ 0 ] [ 2 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 3 ] - m [ 0 ] [ 0 ] * m [ 1 ] [ 2 ] * m [ 2 ] [ 1 ] * m [ 3 ] [ 3 ] - m [ 0 ] [ 1 ] * m [ 1 ] [ 0 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 3 ] + m [ 0 ] [ 0 ] * m [ 1 ] [ 1 ] * m [ 2 ] [ 2 ] * m [ 3 ] [ 3 ] ) |
def contains_rva ( self , rva ) :
"""Check whether the section contains the address provided .""" | # Check if the SizeOfRawData is realistic . If it ' s bigger than the size of
# the whole PE file minus the start address of the section it could be
# either truncated or the SizeOfRawData contains a misleading value .
# In either of those cases we take the VirtualSize
if len ( self . pe . __data__ ) - self . pe . adjust_FileAlignment ( self . PointerToRawData , self . pe . OPTIONAL_HEADER . FileAlignment ) < self . SizeOfRawData : # PECOFF documentation v8 says :
# VirtualSize : The total size of the section when loaded into memory .
# If this value is greater than SizeOfRawData , the section is zero - padded .
# This field is valid only for executable images and should be set to zero
# for object files .
size = self . Misc_VirtualSize
else :
size = max ( self . SizeOfRawData , self . Misc_VirtualSize )
VirtualAddress_adj = self . pe . adjust_SectionAlignment ( self . VirtualAddress , self . pe . OPTIONAL_HEADER . SectionAlignment , self . pe . OPTIONAL_HEADER . FileAlignment )
# Check whether there ' s any section after the current one that starts before the
# calculated end for the current one . If so , cut the current section ' s size
# to fit in the range up to where the next section starts .
if ( self . next_section_virtual_address is not None and self . next_section_virtual_address > self . VirtualAddress and VirtualAddress_adj + size > self . next_section_virtual_address ) :
size = self . next_section_virtual_address - VirtualAddress_adj
return VirtualAddress_adj <= rva < VirtualAddress_adj + size |
def add_source ( self , name , src_dict , free = None , save_source_maps = True , use_pylike = True , use_single_psf = False ) :
"""Add a new source to the model . Source properties
( spectrum , spatial model ) are set with the src _ dict argument .
Parameters
name : str
Source name .
src _ dict : dict or ` ~ fermipy . roi _ model . Source ` object
Dictionary or Source object defining the properties of the
source .
free : bool
Initialize the source with the normalization parameter free .
save _ source _ maps : bool
Write the source map for this source to the source maps file .
use _ pylike : bool
use _ single _ psf : bool""" | # if self . roi . has _ source ( name ) :
# msg = ' Source % s already exists . ' % name
# self . logger . error ( msg )
# raise Exception ( msg )
srcmap_utils . delete_source_map ( self . files [ 'srcmap' ] , name )
src = self . roi [ name ]
if self . config [ 'gtlike' ] [ 'expscale' ] is not None and name not in self . _src_expscale :
self . _src_expscale [ name ] = self . config [ 'gtlike' ] [ 'expscale' ]
if self . _like is None :
return
if not use_pylike :
self . _update_srcmap_file ( [ src ] , True )
pylike_src = self . _create_source ( src )
# Initialize source as free / fixed
if free is not None :
pylike_src . spectrum ( ) . normPar ( ) . setFree ( free )
if hasattr ( pyLike , 'PsfIntegConfig' ) and hasattr ( pyLike . PsfIntegConfig , 'set_use_single_psf' ) :
config = pyLike . BinnedLikeConfig ( self . like . logLike . config ( ) )
config . psf_integ_config ( ) . set_use_single_psf ( use_single_psf )
self . like . addSource ( pylike_src , config )
else :
self . like . addSource ( pylike_src )
self . like . syncSrcParams ( str ( name ) )
self . like . logLike . buildFixedModelWts ( )
if save_source_maps and not self . config [ 'gtlike' ] [ 'use_external_srcmap' ] :
self . like . logLike . saveSourceMaps ( str ( self . files [ 'srcmap' ] ) )
self . set_exposure_scale ( name ) |
def update_groups_for_state ( self , state : State ) :
"""Update all the Group memberships for the users
who have State
: param state : State to update for
: return :""" | users = get_users_for_state ( state )
for config in self . filter ( states = state ) :
logger . debug ( "in state loop" )
for user in users :
logger . debug ( "in user loop for {}" . format ( user ) )
config . update_group_membership_for_user ( user ) |
def revocation_date ( self , value ) :
"""A datetime . datetime object of when the certificate was revoked , if the
status is not " good " or " unknown " .""" | if value is not None and not isinstance ( value , datetime ) :
raise TypeError ( _pretty_message ( '''
revocation_date must be an instance of datetime.datetime, not %s
''' , _type_name ( value ) ) )
self . _revocation_date = value |
def getISAStudy ( studyNum , pathToISATABFile , noAssays = True ) :
"""This function returns a Study object given the study number in an ISA file
Typically , you should use the exploreISA function to check the contents
of the ISA file and retrieve the study number you are interested in !
: param studyNum : The Study number ( notice it ' s not zero - based index ) .
: type studyNum : int
: param pathToISATABFile : The path to the ISATAB file
: type pathToISATABFile : str
: param noAssays : whetehr to remove all assays ( i . e . return a copy of the study only )
: type noAssays : boolean
: raise FileNotFoundError : If pathToISATABFile does not contain file ' i _ Investigation . txt ' .""" | from isatools import isatab
import copy
try :
isa = isatab . load ( pathToISATABFile , skip_load_tables = True )
st = copy . deepcopy ( isa . studies [ studyNum - 1 ] )
if noAssays :
st . assays = [ ]
return st
except FileNotFoundError as err :
raise err |
def camel_2_snake ( name ) :
"Converts CamelCase to camel _ case" | s1 = re . sub ( '(.)([A-Z][a-z]+)' , r'\1_\2' , name )
return re . sub ( '([a-z0-9])([A-Z])' , r'\1_\2' , s1 ) . lower ( ) |
def get_url_image_prediction ( self , model_id , picture_url , token = None , url = API_GET_PREDICTION_IMAGE_URL ) :
"""Gets a prediction from a supplied picture url based on a previously trained model .
: param model _ id : string , once you train a model you ' ll be given a model id to use .
: param picture _ url : string , in the form of a url pointing to a publicly accessible
image file .
returns : requests object""" | auth = 'Bearer ' + self . check_for_token ( token )
m = MultipartEncoder ( fields = { 'sampleLocation' : picture_url , 'modelId' : model_id } )
h = { 'Authorization' : auth , 'Cache-Control' : 'no-cache' , 'Content-Type' : m . content_type }
the_url = url
r = requests . post ( the_url , headers = h , data = m )
return r |
def date_added ( self , date_added ) :
"""Updates the security labels date _ added
Args :
date _ added : Converted to % Y - % m - % dT % H : % M : % SZ date format""" | date_added = self . _utils . format_datetime ( date_added , date_format = '%Y-%m-%dT%H:%M:%SZ' )
self . _data [ 'dateAdded' ] = date_added
request = self . _base_request
request [ 'dateAdded' ] = date_added
return self . _tc_requests . update ( request , owner = self . owner ) |
def add_grant ( self , grant ) :
"""Adds a Grant that the provider should support .
: param grant : An instance of a class that extends
: class : ` oauth2 . grant . GrantHandlerFactory `
: type grant : oauth2 . grant . GrantHandlerFactory""" | if hasattr ( grant , "expires_in" ) :
self . token_generator . expires_in [ grant . grant_type ] = grant . expires_in
if hasattr ( grant , "refresh_expires_in" ) :
self . token_generator . refresh_expires_in = grant . refresh_expires_in
self . grant_types . append ( grant ) |
def Muller_Steinhagen_Heck ( m , x , rhol , rhog , mul , mug , D , roughness = 0 , L = 1 ) :
r'''Calculates two - phase pressure drop with the Muller - Steinhagen and Heck
(1986 ) correlation from [ 1 ] _ , also in [ 2 ] _ and [ 3 ] _ .
. . math : :
\ Delta P _ { tp } = G _ { MSH } ( 1 - x ) ^ { 1/3 } + \ Delta P _ { go } x ^ 3
. . math : :
G _ { MSH } = \ Delta P _ { lo } + 2 \ left [ \ Delta P _ { go } - \ Delta P _ { lo } \ right ] x
Parameters
m : float
Mass flow rate of fluid , [ kg / s ]
x : float
Quality of fluid , [ - ]
rhol : float
Liquid density , [ kg / m ^ 3]
rhog : float
Gas density , [ kg / m ^ 3]
mul : float
Viscosity of liquid , [ Pa * s ]
mug : float
Viscosity of gas , [ Pa * s ]
D : float
Diameter of pipe , [ m ]
roughness : float , optional
Roughness of pipe for use in calculating friction factor , [ m ]
L : float , optional
Length of pipe , [ m ]
Returns
dP : float
Pressure drop of the two - phase flow , [ Pa ]
Notes
Applicable for 0 < x < 1 . Developed to be easily integrated . The
contribution of each term to the overall pressure drop can be
understood in this model .
Examples
> > > Muller _ Steinhagen _ Heck ( m = 0.6 , x = 0.1 , rhol = 915 . , rhog = 2.67 , mul = 180E - 6,
. . . mug = 14E - 6 , D = 0.05 , roughness = 0 , L = 1)
793.4465457435081
References
. . [ 1 ] Müller - Steinhagen , H , and K Heck . " A Simple Friction Pressure Drop
Correlation for Two - Phase Flow in Pipes . " Chemical Engineering and
Processing : Process Intensification 20 , no . 6 ( November 1 , 1986 ) :
297-308 . doi : 10.1016/0255-2701(86)80008-3.
. . [ 2 ] Mekisso , Henock Mateos . " Comparison of Frictional Pressure Drop
Correlations for Isothermal Two - Phase Horizontal Flow . " Thesis , Oklahoma
State University , 2013 . https : / / shareok . org / handle / 11244/11109.
. . [ 3 ] Thome , John R . " Engineering Data Book III . " Wolverine Tube Inc
(2004 ) . http : / / www . wlv . com / heat - transfer - databook /''' | # Liquid - only properties , for calculation of dP _ lo
v_lo = m / rhol / ( pi / 4 * D ** 2 )
Re_lo = Reynolds ( V = v_lo , rho = rhol , mu = mul , D = D )
fd_lo = friction_factor ( Re = Re_lo , eD = roughness / D )
dP_lo = fd_lo * L / D * ( 0.5 * rhol * v_lo ** 2 )
# Gas - only properties , for calculation of dP _ go
v_go = m / rhog / ( pi / 4 * D ** 2 )
Re_go = Reynolds ( V = v_go , rho = rhog , mu = mug , D = D )
fd_go = friction_factor ( Re = Re_go , eD = roughness / D )
dP_go = fd_go * L / D * ( 0.5 * rhog * v_go ** 2 )
G_MSH = dP_lo + 2 * ( dP_go - dP_lo ) * x
return G_MSH * ( 1 - x ) ** ( 1 / 3. ) + dP_go * x ** 3 |
def parse ( self ) :
"""Apply search template .""" | self . verbose = bool ( self . re_verbose )
self . unicode = bool ( self . re_unicode )
self . global_flag_swap = { "unicode" : ( ( self . re_unicode is not None ) if not _util . PY37 else False ) , "verbose" : False }
self . temp_global_flag_swap = { "unicode" : False , "verbose" : False }
self . ascii = self . re_unicode is not None and not self . re_unicode
if not self . unicode and not self . ascii :
self . unicode = True
new_pattern = [ ]
text = self . process_quotes ( self . search . decode ( 'latin-1' ) if self . is_bytes else self . search )
i = _util . StringIter ( text )
iter ( i )
retry = True
while retry :
retry = False
try :
new_pattern = self . main_group ( i )
except GlobalRetryException : # Prevent a loop of retry over and over for a pattern like ( ( ? u ) ( ? a ) )
# or ( ? - x : ( ? x ) )
if self . temp_global_flag_swap [ 'unicode' ] :
if self . global_flag_swap [ 'unicode' ] :
raise LoopException ( 'Global unicode flag recursion.' )
else :
self . global_flag_swap [ "unicode" ] = True
if self . temp_global_flag_swap [ 'verbose' ] :
if self . global_flag_swap [ 'verbose' ] :
raise LoopException ( 'Global verbose flag recursion.' )
else :
self . global_flag_swap [ 'verbose' ] = True
self . temp_global_flag_swap = { "unicode" : False , "verbose" : False }
i . rewind ( i . index )
retry = True
return "" . join ( new_pattern ) . encode ( 'latin-1' ) if self . is_bytes else "" . join ( new_pattern ) |
def classify_by_name ( names ) :
"""Classify a ( composite ) ligand by the HETID ( s )""" | if len ( names ) > 3 : # Polymer
if len ( set ( config . RNA ) . intersection ( set ( names ) ) ) != 0 :
ligtype = 'RNA'
elif len ( set ( config . DNA ) . intersection ( set ( names ) ) ) != 0 :
ligtype = 'DNA'
else :
ligtype = "POLYMER"
else :
ligtype = 'SMALLMOLECULE'
for name in names :
if name in config . METAL_IONS :
if len ( names ) == 1 :
ligtype = 'ION'
else :
if "ION" not in ligtype :
ligtype += '+ION'
return ligtype |
def strip_accents ( x ) :
u"""Strip accents in the input phrase X .
Strip accents in the input phrase X ( assumed in UTF - 8 ) by replacing
accented characters with their unaccented cousins ( e . g . é by e ) .
: param x : the input phrase to strip .
: type x : string
: return : Return such a stripped X .""" | x = re_latex_lowercase_a . sub ( "a" , x )
x = re_latex_lowercase_ae . sub ( "ae" , x )
x = re_latex_lowercase_oe . sub ( "oe" , x )
x = re_latex_lowercase_e . sub ( "e" , x )
x = re_latex_lowercase_i . sub ( "i" , x )
x = re_latex_lowercase_o . sub ( "o" , x )
x = re_latex_lowercase_u . sub ( "u" , x )
x = re_latex_lowercase_y . sub ( "x" , x )
x = re_latex_lowercase_c . sub ( "c" , x )
x = re_latex_lowercase_n . sub ( "n" , x )
x = re_latex_uppercase_a . sub ( "A" , x )
x = re_latex_uppercase_ae . sub ( "AE" , x )
x = re_latex_uppercase_oe . sub ( "OE" , x )
x = re_latex_uppercase_e . sub ( "E" , x )
x = re_latex_uppercase_i . sub ( "I" , x )
x = re_latex_uppercase_o . sub ( "O" , x )
x = re_latex_uppercase_u . sub ( "U" , x )
x = re_latex_uppercase_y . sub ( "Y" , x )
x = re_latex_uppercase_c . sub ( "C" , x )
x = re_latex_uppercase_n . sub ( "N" , x )
# convert input into Unicode string :
try :
y = unicode ( x , "utf-8" )
except Exception :
return x
# something went wrong , probably the input wasn ' t UTF - 8
# asciify Latin - 1 lowercase characters :
y = re_unicode_lowercase_a . sub ( "a" , y )
y = re_unicode_lowercase_ae . sub ( "ae" , y )
y = re_unicode_lowercase_oe . sub ( "oe" , y )
y = re_unicode_lowercase_e . sub ( "e" , y )
y = re_unicode_lowercase_i . sub ( "i" , y )
y = re_unicode_lowercase_o . sub ( "o" , y )
y = re_unicode_lowercase_u . sub ( "u" , y )
y = re_unicode_lowercase_y . sub ( "y" , y )
y = re_unicode_lowercase_c . sub ( "c" , y )
y = re_unicode_lowercase_n . sub ( "n" , y )
y = re_unicode_lowercase_ss . sub ( "ss" , y )
# asciify Latin - 1 uppercase characters :
y = re_unicode_uppercase_a . sub ( "A" , y )
y = re_unicode_uppercase_ae . sub ( "AE" , y )
y = re_unicode_uppercase_oe . sub ( "OE" , y )
y = re_unicode_uppercase_e . sub ( "E" , y )
y = re_unicode_uppercase_i . sub ( "I" , y )
y = re_unicode_uppercase_o . sub ( "O" , y )
y = re_unicode_uppercase_u . sub ( "U" , y )
y = re_unicode_uppercase_y . sub ( "Y" , y )
y = re_unicode_uppercase_c . sub ( "C" , y )
y = re_unicode_uppercase_n . sub ( "N" , y )
# return UTF - 8 representation of the Unicode string :
return y . encode ( "utf-8" ) |
def get_contained_extras ( marker ) :
"""Collect " extra = = . . . " operands from a marker .
Returns a list of str . Each str is a speficied extra in this marker .""" | if not marker :
return set ( )
marker = Marker ( str ( marker ) )
extras = set ( )
_markers_collect_extras ( marker . _markers , extras )
return extras |
def show ( self , ax : plt . Axes = None , figsize : tuple = ( 3 , 3 ) , title : Optional [ str ] = None , hide_axis : bool = True , cmap : str = 'tab20' , alpha : float = 0.5 , ** kwargs ) :
"Show the ` ImageSegment ` on ` ax ` ." | ax = show_image ( self , ax = ax , hide_axis = hide_axis , cmap = cmap , figsize = figsize , interpolation = 'nearest' , alpha = alpha , vmin = 0 )
if title :
ax . set_title ( title ) |
def parse_block ( self , contents , parent , module , depth ) :
"""Extracts all executable definitions from the specified string and adds
them to the specified parent .""" | for anexec in self . RE_EXEC . finditer ( contents ) :
x = self . _process_execs ( anexec , parent , module )
parent . executables [ x . name . lower ( ) ] = x
if isinstance ( parent , Module ) and "public" in x . modifiers :
parent . publics [ x . name . lower ( ) ] = 1
# To handle the embedded executables , run this method recursively
self . parse_block ( x . contents , x , module , depth + 1 )
# Now that we have the executables , we can use them to compile a string
# that includes only documentation * external * to the executable definitions
# Because we enforce adding the name to ' end subroutine ' statements etc .
# all the embedded executables haven ' t been parsed yet .
if len ( parent . executables ) > 0 :
remove = [ ]
for x in parent . executables :
remove . append ( ( parent . executables [ x ] . start , parent . executables [ x ] . end ) )
remove . sort ( key = lambda tup : tup [ 0 ] )
retain = [ ]
cur_end = 0
for rem in remove :
if "\n" in contents [ rem [ 0 ] + 1 : rem [ 1 ] ] :
signature = contents [ rem [ 0 ] + 1 : rem [ 1 ] ] . index ( "\n" ) + 2
keep = contents [ cur_end : rem [ 0 ] + signature ]
cur_end = rem [ 1 ]
retain . append ( keep )
# Now we have a string of documentation segments and the signatures they
# decorate that only applies to the non - embedded subroutines
docsearch = "" . join ( retain )
docblocks = self . docparser . parse_docs ( docsearch , parent )
# Process the decorating documentation for the executables including the
# parameter definitions .
for x in parent . executables :
self . _process_docs ( parent . executables [ x ] , docblocks , parent , module , docsearch ) |
def node_setup ( domain , master , ticket ) :
'''Setup the icinga2 node .
Returns : :
icinga2 node setup - - ticket TICKET _ ID - - endpoint master . domain . tld - - zone domain . tld - - master _ host master . domain . tld - - trustedcert / etc / icinga2 / pki / trusted - master . crt
CLI Example :
. . code - block : : bash
salt ' * ' icinga2 . node _ setup domain . tld master . domain . tld TICKET _ ID''' | result = __salt__ [ 'cmd.run_all' ] ( [ "icinga2" , "node" , "setup" , "--ticket" , ticket , "--endpoint" , master , "--zone" , domain , "--master_host" , master , "--trustedcert" , "{0}trusted-master.crt" . format ( get_certs_path ( ) ) ] , python_shell = False )
return result |
def fetch ( self , wait = 0 ) :
"""get the task result objects .
: param int wait : how many milliseconds to wait for a result
: return : an unsorted list of task objects""" | if self . started :
return fetch ( self . id , wait = wait , cached = self . cached ) |
def masked_local_attention_2d ( q , k , v , query_shape = ( 8 , 16 ) , memory_flange = ( 8 , 16 ) , name = None ) :
"""Strided block local self - attention .
Each position in a query block can attend to all the generated queries in
the query block , which are generated in raster scan , and positions that are
generated to the left and top . The shapes are specified by query shape and
memory flange . Note that if you ' re using this function , you do not need to
right shift . Right shifting happens inside this function separately for each
block .
Args :
q : a Tensor with shape [ batch , heads , h , w , depth _ k ]
k : a Tensor with shape [ batch , heads , h , w , depth _ k ]
v : a Tensor with shape [ batch , heads , h , w , depth _ v ] . In the current
implementation , depth _ v must be equal to depth _ k .
query _ shape : an tuple indicating the height and width of each query block .
query _ shape = block _ shape
memory _ flange : an integer indicating how much to look in height and width
from each query block .
memory shape = query _ shape + ( block _ flange [ 0 ] , 2 * block _ flange [ 1 ] )
name : an optional string
Returns :
a Tensor of shape [ batch , heads , h , w , depth _ v ]""" | with tf . variable_scope ( name , default_name = "local_masked_self_attention_2d" , values = [ q , k , v ] ) :
v_shape = common_layers . shape_list ( v )
# Pad query to ensure multiple of corresponding lengths .
q = pad_to_multiple_2d ( q , query_shape )
# Set up query blocks .
q_indices = gather_indices_2d ( q , query_shape , query_shape )
q_new = gather_blocks_2d ( q , q_indices )
# Set up key and value blocks .
k_flange , k_center = get_memory_region ( k , query_shape , memory_flange , q_indices )
v_flange , v_center = get_memory_region ( v , query_shape , memory_flange , q_indices )
if k_flange is not None :
k_new = tf . concat ( [ k_flange , k_center ] , axis = 3 )
v_new = tf . concat ( [ v_flange , v_center ] , axis = 3 )
else :
k_new = k_center
v_new = v_center
# Set up the masks .
query_elements = np . prod ( query_shape )
padding_mask = None
if k_flange is not None :
padding_mask = tf . expand_dims ( embedding_to_padding ( k_flange ) * - 1e9 , axis = - 2 )
padding_mask = tf . tile ( padding_mask , [ 1 , 1 , 1 , query_elements , 1 ] )
center_attention_bias = attention_bias_lower_triangle ( np . prod ( query_elements ) )
center_attention_bias = tf . reshape ( center_attention_bias , [ 1 , 1 , 1 , query_elements , query_elements ] )
v_center_shape = common_layers . shape_list ( v_center )
center_attention_bias = tf . tile ( center_attention_bias , [ v_center_shape [ 0 ] , v_center_shape [ 1 ] , v_center_shape [ 2 ] , 1 , 1 ] )
if padding_mask is not None : # Combine the mask for padding and visible region .
attention_bias = tf . concat ( [ padding_mask , center_attention_bias ] , axis = 4 )
else :
attention_bias = center_attention_bias
output = dot_product_attention ( q_new , k_new , v_new , attention_bias , dropout_rate = 0. , name = "masked_local_2d" , make_image_summary = False )
# Put representations back into original shapes .
padded_q_shape = common_layers . shape_list ( q )
output = scatter_blocks_2d ( output , q_indices , padded_q_shape )
# Remove the padding if introduced .
output = tf . slice ( output , [ 0 , 0 , 0 , 0 , 0 ] , [ - 1 , - 1 , v_shape [ 2 ] , v_shape [ 3 ] , - 1 ] )
return output |
def request ( self , method , uri , params = None , data = None , headers = None , auth = None , timeout = None , allow_redirects = False ) :
"""Make an HTTP request .""" | url = self . relative_uri ( uri )
return self . domain . request ( method , url , params = params , data = data , headers = headers , auth = auth , timeout = timeout , allow_redirects = allow_redirects ) |
def send_output ( self , value , stdout ) :
"""Write the output or value of the expression back to user .
> > > print ( ' cash rules everything around me ' )
cash rules everything around me""" | writer = self . writer
if value is not None :
writer . write ( '{!r}\n' . format ( value ) . encode ( 'utf8' ) )
if stdout :
writer . write ( stdout . encode ( 'utf8' ) )
yield from writer . drain ( ) |
def _parseParams ( self ) :
"""Parse parameters from their string HTML representation to dictionary .
Result is saved to the : attr : ` params ` property .""" | # check if there are any parameters
if " " not in self . _element or "=" not in self . _element :
return
# remove ' < ' & ' > '
params = self . _element . strip ( ) [ 1 : - 1 ] . strip ( )
# remove tagname
offset = params . find ( self . getTagName ( ) ) + len ( self . getTagName ( ) )
params = params [ offset : ] . strip ( )
# parser machine
next_state = 0
key = ""
value = ""
end_quote = ""
buff = [ "" , "" ]
for c in params :
if next_state == 0 : # key
if c . strip ( ) != "" : # safer than list space , tab and all
if c == "=" : # possible whitespaces in UTF
next_state = 1
else :
key += c
elif next_state == 1 : # value decisioner
if c . strip ( ) != "" : # skip whitespaces
if c == "'" or c == '"' :
next_state = 3
end_quote = c
else :
next_state = 2
value += c
elif next_state == 2 : # one word parameter without quotes
if c . strip ( ) == "" :
next_state = 0
self . params [ key ] = value
key = ""
value = ""
else :
value += c
elif next_state == 3 : # quoted string
if c == end_quote and ( buff [ 0 ] != "\\" or ( buff [ 0 ] ) == "\\" and buff [ 1 ] == "\\" ) :
next_state = 0
self . params [ key ] = unescape ( value , end_quote )
key = ""
value = ""
end_quote = ""
else :
value += c
buff = _rotate_buff ( buff )
buff [ 0 ] = c
if key :
if end_quote and value . strip ( ) :
self . params [ key ] = unescape ( value , end_quote )
else :
self . params [ key ] = value
if "/" in self . params . keys ( ) :
del self . params [ "/" ]
self . _isnonpairtag = True |
def mark_entities ( tokens , positions , markers = [ ] , style = "insert" ) :
"""Adds special markers around tokens at specific positions ( e . g . , entities )
Args :
tokens : A list of tokens ( the sentence )
positions :
1 ) A list of inclusive ranges ( tuples ) corresponding to the
token ranges of the entities in order . ( Assumes each entity
has only one corresponding mention . )
OR
2 ) A dict of lists with keys corresponding to mention indices and
values corresponding to one or more inclusive ranges corresponding
to that mention . ( Allows entities to potentially have multiple
mentions )
markers : A list of strings ( length of 2 * the number of entities ) to
use as markers of the entities .
style : Where to apply the markers :
' insert ' : Insert the markers as new tokens before / after each entity
' concatenate ' : Prepend / append the markers to the first / last token
of each entity
If the tokens are going to be input to an LSTM , then it is usually
best to use the ' insert ' option ; ' concatenate ' may be better for
viewing .
Returns :
toks : An extended list of tokens with markers around the mentions
WARNING : if the marked token set will be used with pretrained embeddings ,
provide markers that will not result in UNK embeddings !
Example :
Input : ( [ ' The ' , ' cat ' , ' sat ' ] , [ ( 1,1 ) ] )
Output : [ ' The ' , ' [ [ BEGIN0 ] ] ' , ' cat ' , ' [ [ END0 ] ] ' , ' sat ' ]""" | if markers and len ( markers ) != 2 * len ( positions ) :
msg = ( f"Expected len(markers) == 2 * len(positions), " f"but {len(markers)} != {2 * len(positions)}." )
raise ValueError ( msg )
toks = list ( tokens )
# markings will be of the form :
# [ ( position , entity _ idx ) , ( position , entity _ idx ) , . . . ]
if isinstance ( positions , list ) :
markings = [ ( position , idx ) for idx , position in enumerate ( positions ) ]
elif isinstance ( positions , dict ) :
markings = [ ]
for idx , v in positions . items ( ) :
for position in v :
markings . append ( ( position , idx ) )
else :
msg = ( f"Argument _positions_ must be a list or dict. " f"Instead, got {type(positions)}" )
raise ValueError ( msg )
markings = sorted ( markings )
for i , ( ( si , ei ) , idx ) in enumerate ( markings ) :
if markers :
start_marker = markers [ 2 * idx ]
end_marker = markers [ 2 * idx + 1 ]
else :
start_marker = f"[[BEGIN{idx}]]"
end_marker = f"[[END{idx}]]"
if style == "insert" :
toks . insert ( si + 2 * i , start_marker )
toks . insert ( ei + 2 * ( i + 1 ) , end_marker )
elif style == "concatenate" :
toks [ si ] = start_marker + toks [ si ]
toks [ ei ] = toks [ ei ] + end_marker
else :
raise NotImplementedError
return toks |
def generate ( self , secret , type = 'totp' , account = 'alex' , issuer = None , algo = 'sha1' , digits = 6 , init_counter = None ) :
"""https : / / github . com / google / google - authenticator / wiki / Key - Uri - Format""" | args = { }
uri = 'otpauth://{0}/{1}?{2}'
try : # converts the secret to a 16 cars string
a = binascii . unhexlify ( secret )
args [ SECRET ] = base64 . b32encode ( a ) . decode ( 'ascii' )
except binascii . Error as ex :
raise ValueError ( str ( ex ) )
except Exception as ex :
print ( ex )
raise ValueError ( 'invalid secret format' )
if type not in [ TOTP , HOTP ] :
raise ValueError ( 'type should be totp or hotp, got ' , type )
if type != TOTP :
args [ 'type' ] = type
if algo not in [ 'sha1' , 'sha256' , 'sha512' ] :
raise ValueError ( 'algo should be sha1, sha256 or sha512, got ' , algo )
if algo != 'sha1' :
args [ 'algorithm' ] = algo
if init_counter is not None :
if type != HOTP :
raise ValueError ( 'type should be hotp when ' , 'setting init_counter' )
if int ( init_counter ) < 0 :
raise ValueError ( 'init_counter should be positive' )
args [ COUNTER ] = int ( init_counter )
digits = int ( digits )
if digits != 6 and digits != 8 :
raise ValueError ( 'digits should be 6 or 8' )
if digits != 6 :
args [ DIGITS ] = digits
args [ PERIOD ] = 30
account = quote ( account )
if issuer is not None :
account = quote ( issuer ) + ':' + account
args [ ISSUER ] = issuer
uri = uri . format ( type , account , urlencode ( args ) . replace ( "+" , "%20" ) )
return uri |
def osd_tree ( conn , cluster ) :
"""Check the status of an OSD . Make sure all are up and in
What good output would look like : :
" epoch " : 8,
" num _ osds " : 1,
" num _ up _ osds " : 1,
" num _ in _ osds " : " 1 " ,
" full " : " false " ,
" nearfull " : " false "
Note how the booleans are actually strings , so we need to take that into
account and fix it before returning the dictionary . Issue # 8108""" | ceph_executable = system . executable_path ( conn , 'ceph' )
command = [ ceph_executable , '--cluster={cluster}' . format ( cluster = cluster ) , 'osd' , 'tree' , '--format=json' , ]
out , err , code = remoto . process . check ( conn , command , )
try :
loaded_json = json . loads ( b'' . join ( out ) . decode ( 'utf-8' ) )
# convert boolean strings to actual booleans because
# - - format = json fails to do this properly
for k , v in loaded_json . items ( ) :
if v == 'true' :
loaded_json [ k ] = True
elif v == 'false' :
loaded_json [ k ] = False
return loaded_json
except ValueError :
return { } |
def send_email_confirmation ( request , user , signup = False ) :
"""E - mail verification mails are sent :
a ) Explicitly : when a user signs up
b ) Implicitly : when a user attempts to log in using an unverified
e - mail while EMAIL _ VERIFICATION is mandatory .
Especially in case of b ) , we want to limit the number of mails
sent ( consider a user retrying a few times ) , which is why there is
a cooldown period before sending a new mail . This cooldown period
can be configured in ACCOUNT _ EMAIL _ CONFIRMATION _ COOLDOWN setting .""" | from . models import EmailAddress , EmailConfirmation
cooldown_period = timedelta ( seconds = app_settings . EMAIL_CONFIRMATION_COOLDOWN )
email = user_email ( user )
if email :
try :
email_address = EmailAddress . objects . get_for_user ( user , email )
if not email_address . verified :
if app_settings . EMAIL_CONFIRMATION_HMAC :
send_email = True
else :
send_email = not EmailConfirmation . objects . filter ( sent__gt = now ( ) - cooldown_period , email_address = email_address ) . exists ( )
if send_email :
email_address . send_confirmation ( request , signup = signup )
else :
send_email = False
except EmailAddress . DoesNotExist :
send_email = True
email_address = EmailAddress . objects . add_email ( request , user , email , signup = signup , confirm = True )
assert email_address
# At this point , if we were supposed to send an email we have sent it .
if send_email :
get_adapter ( request ) . add_message ( request , messages . INFO , 'account/messages/' 'email_confirmation_sent.txt' , { 'email' : email } )
if signup :
get_adapter ( request ) . stash_user ( request , user_pk_to_url_str ( user ) ) |
def find_one ( self , tname , where = None , where_not = None , columns = None , astype = None ) :
'''Find a single record in the provided table from the database . If multiple match , return
the first one based on the internal order of the records . If no records are found , return
empty dictionary , string or series depending on the value of ` astype ` .
Parameters
tname : str
Table to search records from .
where : dict or None ( default ` None ` )
Dictionary of < column , value > where value can be of str type for exact match or a
compiled regex expression for more advanced matching .
where _ not : dict or None ( default ` None ` )
Identical to ` where ` but for negative - matching .
columns : list of str , str or None ( default ` None ` )
Column ( s ) to return for the found records , if any .
astype : str , type or None ( default ` None ` )
Type to cast the output to . Possible values are : ` nonetype ` , ` series ` , ` str ` , ` dict ` ,
` json ` . If this is ` None ` , falls back to the type provided to the constructor .
If a type was provided to the constructor but the user wants to avoid any casting ,
" nonetype " should be passed as the value .
Returns
records : str , dict or series
Output type depends on ` astype ` parameter .
Examples
> > > db = PandasDatabase ( " test " )
> > > db . insert ( " test " , record = { " Name " : " John " } )
Name John
_ _ id _ _ dc876999-1f5b - 4262 - b6bf - c23b875f3a54
dtype : object
> > > db . find _ one ( " test " , astype = " dict " )
{ ' Name ' : ' John ' , ' _ _ id _ _ ' : ' dc876999-1f5b - 4262 - b6bf - c23b875f3a54 ' }
> > > db . find _ one ( " test " , astype = " series " )
_ _ id _ _ dc876999-1f5b - 4262 - b6bf - c23b875f3a54
Name John
Name : 0 , dtype : object
> > > db . find _ one ( " test " , astype = None )
_ _ id _ _ dc876999-1f5b - 4262 - b6bf - c23b875f3a54
Name John
Name : 0 , dtype : object
> > > db . find _ one ( " test " , where = { " Name " : " John " } , astype = " dict " )
{ ' Name ' : ' John ' , ' _ _ id _ _ ' : ' dc876999-1f5b - 4262 - b6bf - c23b875f3a54 ' }
> > > db . find _ one ( " test " , where _ not = { " Name " : " John " } , astype = " dict " )''' | records = self . find ( tname , where = where , where_not = where_not , columns = columns , astype = 'dataframe' )
return self . _output ( records , single = True , astype = astype ) |
def pdu_to_function_code_or_raise_error ( resp_pdu ) :
"""Parse response PDU and return of : class : ` ModbusFunction ` or
raise error .
: param resp _ pdu : PDU of response .
: return : Subclass of : class : ` ModbusFunction ` matching the response .
: raises ModbusError : When response contains error code .""" | function_code = struct . unpack ( '>B' , resp_pdu [ 0 : 1 ] ) [ 0 ]
if function_code not in function_code_to_function_map . keys ( ) :
error_code = struct . unpack ( '>B' , resp_pdu [ 1 : 2 ] ) [ 0 ]
raise error_code_to_exception_map [ error_code ]
return function_code |
def handler ( event , context ) :
"""Historical { { cookiecutter . technology _ name } } event poller .
This poller is run at a set interval in order to ensure that changes do not go undetected by historical .
Historical pollers generate ` polling events ` which simulate changes . These polling events contain configuration
data such as the account / region defining where the collector should attempt to gather data from .""" | log . debug ( 'Running poller. Configuration: {}' . format ( event ) )
for account in get_historical_accounts ( ) :
try : # TODO describe all items
# Example : :
# groups = describe _ security _ groups (
# account _ number = account [ ' id ' ] ,
# assume _ role = HISTORICAL _ ROLE ,
# region = CURRENT _ REGION
# events = [ security _ group _ polling _ schema . serialize ( account [ ' id ' ] , g ) for g in groups [ ' SecurityGroups ' ] ]
events = [ ]
produce_events ( events , os . environ . get ( 'HISTORICAL_STREAM' , 'Historical{{cookiecutter.technology_slug | titlecase }}PollerStream' ) )
log . debug ( 'Finished generating polling events. Account: {} Events Created: {}' . format ( account [ 'id' ] , len ( events ) ) )
except ClientError as e :
log . warning ( 'Unable to generate events for account. AccountId: {account_id} Reason: {reason}' . format ( account_id = account [ 'id' ] , reason = e ) ) |
def AddInstanceTags ( r , instance , tags , dry_run = False ) :
"""Adds tags to an instance .
@ type instance : str
@ param instance : instance to add tags to
@ type tags : list of str
@ param tags : tags to add to the instance
@ type dry _ run : bool
@ param dry _ run : whether to perform a dry run
@ rtype : int
@ return : job id""" | query = { "tag" : tags , "dry-run" : dry_run , }
return r . request ( "put" , "/2/instances/%s/tags" % instance , query = query ) |
def get_assessment_taken_admin_session ( self , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the assessment taken administration service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . assessment . AssessmentTakenAdminSession ) - an
` ` AssessmentTakenAdminSession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ assessment _ taken _ admin ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ assessment _ taken _ admin ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_assessment_taken_admin ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . AssessmentTakenAdminSession ( proxy = proxy , runtime = self . _runtime ) |
def ptmsiReallocationComplete ( ) :
"""P - TMSI REALLOCATION COMPLETE Section 9.4.8""" | a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x11 )
# 00010001
packet = a / b
return packet |
def bm3_big_F ( p , v , v0 ) :
"""calculate big F for linearlized form
not fully tested
: param p :
: param f :
: return :""" | f = bm3_small_f ( v , v0 )
return cal_big_F ( p , f ) |
def get_sec2gos ( sortobj ) :
"""Initialize section _ name2goids .""" | sec_gos = [ ]
for section_name , nts in sortobj . get_desc2nts_fnc ( hdrgo_prt = True ) [ 'sections' ] :
sec_gos . append ( ( section_name , set ( nt . GO for nt in nts ) ) )
return cx . OrderedDict ( sec_gos ) |
def get_cognitive_process_metadata ( self ) :
"""Gets the metadata for a cognitive process .
return : ( osid . Metadata ) - metadata for the cognitive process
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'cognitive_process' ] )
metadata . update ( { 'existing_id_values' : self . _my_map [ 'cognitiveProcessId' ] } )
return Metadata ( ** metadata ) |
def create_module ( self , spec ) :
"""Improve python2 semantics for module creation .""" | mod = super ( NamespaceLoader2 , self ) . create_module ( spec )
# Set a few properties required by PEP 302
# mod . _ _ file _ _ = [ p for p in self . path ]
# this will set mod . _ _ repr _ _ to not builtin . . . shouldnt break anything in py2 . . .
# CAREFUL : get _ filename present implies the module has ONE location , which is not true with namespaces
return mod |
def get_requirements_information ( self , path : Path ) -> Tuple [ RequirementsOptions , Optional [ str ] ] :
"""Returns the information needed to install requirements for a repository - what kind is used and the hash
of contents of the defining file .""" | if self . pipfile_location is not None :
pipfile = path / self . pipfile_location / "Pipfile"
pipfile_lock = path / self . pipfile_location / "Pipfile.lock"
pipfile_exists = pipfile . exists ( )
pipfile_lock_exists = pipfile_lock . exists ( )
if pipfile_exists and pipfile_lock_exists :
option = RequirementsOptions . pipfile
return option , self . hash_file_contents ( option , pipfile_lock )
elif pipfile_exists :
raise BuildError ( "Only the Pipfile is included in the repository, Arca does not support that." )
elif pipfile_lock_exists :
raise BuildError ( "Only the Pipfile.lock file is include in the repository, Arca does not support that." )
if self . requirements_location :
requirements_file = path / self . requirements_location
if requirements_file . exists ( ) :
option = RequirementsOptions . requirements_txt
return option , self . hash_file_contents ( option , requirements_file )
return RequirementsOptions . no_requirements , None |
def setGridColor ( self , color ) :
"""Sets the color for the grid for this instance to the given color .
: param color | < QColor >""" | palette = self . palette ( )
palette . setColor ( palette . GridForeground , QColor ( color ) ) |
async def trigger_act ( self , addr ) :
"""Trigger agent in : attr : ` addr ` to act .
This method is quite inefficient if used repeatedly for a large number
of agents .
. . seealso : :
: py : meth : ` creamas . mp . MultiEnvironment . trigger _ all `""" | r_agent = await self . env . connect ( addr , timeout = TIMEOUT )
return await r_agent . act ( ) |
def close ( self ) :
"""Cleanly shutdown the connection to RabbitMQ
: raises : sprockets . mixins . amqp . ConnectionStateError""" | if not self . closable :
LOGGER . warning ( 'Closed called while %s' , self . state_description )
raise ConnectionStateError ( self . state_description )
self . state = self . STATE_CLOSING
LOGGER . info ( 'Closing RabbitMQ connection' )
self . connection . close ( ) |
def survey_change_name ( request , pk ) :
"""Works well with :
http : / / www . appelsiini . net / projects / jeditable""" | survey = get_object_or_404 ( Survey , pk = pk )
if not request . user . has_perm ( "formly.change_survey_name" , obj = survey ) :
raise PermissionDenied ( )
survey . name = request . POST . get ( "name" )
survey . save ( )
return JsonResponse ( { "status" : "OK" , "name" : survey . name } ) |
def _need_bib_run ( self , old_cite_counter ) :
'''Determine if you need to run " bibtex " .
1 . Check if * . bib exists .
2 . Check latex output for hints .
3 . Test if the numbers of citations changed
during first latex run .
4 . Examine * . bib for changes .''' | with open ( '%s.aux' % self . project_name ) as fobj :
match = BIB_PATTERN . search ( fobj . read ( ) )
if not match :
return False
else :
self . bib_file = match . group ( 1 )
if not os . path . isfile ( '%s.bib' % self . bib_file ) :
self . log . warning ( 'Could not find *.bib file.' )
return False
if ( re . search ( 'No file %s.bbl.' % self . project_name , self . out ) or re . search ( 'LaTeX Warning: Citation .* undefined' , self . out ) ) :
return True
if old_cite_counter != self . generate_citation_counter ( ) :
return True
if os . path . isfile ( '%s.bib.old' % self . bib_file ) :
new = '%s.bib' % self . bib_file
old = '%s.bib.old' % self . bib_file
if not filecmp . cmp ( new , old ) :
return True |
def transformToNative ( obj ) :
"""Turn obj . value into a date or datetime .""" | if obj . isNative :
return obj
obj . isNative = True
if obj . value == '' :
return obj
obj . value = obj . value
obj . value = parseDtstart ( obj , allowSignatureMismatch = True )
if getattr ( obj , 'value_param' , 'DATE-TIME' ) . upper ( ) == 'DATE-TIME' :
if hasattr ( obj , 'tzid_param' ) : # Keep a copy of the original TZID around
obj . params [ 'X-VOBJ-ORIGINAL-TZID' ] = [ obj . tzid_param ]
del obj . tzid_param
return obj |
def post ( self , request , * args , ** kwargs ) :
"""Method for handling POST requests .
Deletes the object . Successful deletes are logged .
Returns a ' render redirect ' to the result of the
` get _ done _ url ` method .
If a ProtectedError is raised , the ` render ` method
is called with message explaining the error added
to the context as ` protected ` .""" | self . object = self . get_object ( )
msg = None
if request . POST . get ( 'delete' ) :
try :
with transaction . commit_on_success ( ) :
self . log_action ( self . object , CMSLog . DELETE )
msg = "%s deleted" % self . object
self . object . delete ( )
except ProtectedError , e :
protected = [ ]
for x in e . protected_objects :
if hasattr ( x , 'delete_blocked_message' ) :
protected . append ( x . delete_blocked_message ( ) )
else :
protected . append ( u"%s: %s" % ( x . _meta . verbose_name , x ) )
return self . render ( request , obj = self . object , protected = protected )
return self . render ( request , redirect_url = self . get_done_url ( ) , obj = self . object , message = msg , collect_render_data = False ) |
def getIdentifierForPoint ( self , point ) :
"""Create a unique identifier for and assign it to ` ` point ` ` .
If the point already has an identifier , the existing
identifier will be returned .
> > > contour . getIdentifierForPoint ( point )
' ILHGJlygfds '
` ` point ` ` must be a : class : ` BasePoint ` . The returned value
will be a : ref : ` type - identifier ` .""" | point = normalizers . normalizePoint ( point )
return self . _getIdentifierforPoint ( point ) |
def to_array ( self ) :
"""Serializes this ChatActionMessage to a dictionary .
: return : dictionary representation of this object .
: rtype : dict""" | array = super ( ChatActionMessage , self ) . to_array ( )
array [ 'action' ] = u ( self . action )
# py2 : type unicode , py3 : type str
if self . receiver is not None :
if isinstance ( self . receiver , None ) :
array [ 'chat_id' ] = None ( self . receiver )
# type Noneelif isinstance ( self . receiver , str ) :
array [ 'chat_id' ] = u ( self . receiver )
# py2 : type unicode , py3 : type str
elif isinstance ( self . receiver , int ) :
array [ 'chat_id' ] = int ( self . receiver )
# type intelse :
raise TypeError ( 'Unknown type, must be one of None, str, int.' )
# end if
return array |
def model ( self ) :
"""Model of the spectrum with given redshift .""" | if self . z == 0 :
m = self . _model
else : # wavelength
if self . _internal_wave_unit . physical_type == 'length' :
rs = self . _redshift_model . inverse
# frequency or wavenumber
# NOTE : This will never execute as long as internal wavelength
# unit remains Angstrom .
else : # pragma : no cover
rs = self . _redshift_model
if self . z_type == 'wavelength_only' :
m = rs | self . _model
else : # conserve _ flux
m = rs | self . _model | self . _redshift_flux_model
return m |
def main ( ) :
"""Command - line entry point for running the view server .""" | import getopt
from . import __version__ as VERSION
try :
option_list , argument_list = getopt . gnu_getopt ( sys . argv [ 1 : ] , 'h' , [ 'version' , 'help' , 'json-module=' , 'debug' , 'log-file=' ] )
message = None
for option , value in option_list :
if option in ( '--version' ) :
message = _VERSION % dict ( name = os . path . basename ( sys . argv [ 0 ] ) , version = VERSION )
elif option in ( '-h' , '--help' ) :
message = _HELP % dict ( name = os . path . basename ( sys . argv [ 0 ] ) )
elif option in ( '--json-module' ) :
json . use ( module = value )
elif option in ( '--debug' ) :
log . setLevel ( logging . DEBUG )
elif option in ( '--log-file' ) :
if value == '-' :
handler = logging . StreamHandler ( sys . stderr )
handler . setFormatter ( logging . Formatter ( ' -> [%(levelname)s] %(message)s' ) )
else :
handler = logging . FileHandler ( value )
handler . setFormatter ( logging . Formatter ( '[%(asctime)s] [%(levelname)s] %(message)s' ) )
log . addHandler ( handler )
if message :
sys . stdout . write ( message )
sys . stdout . flush ( )
sys . exit ( 0 )
except getopt . GetoptError , error :
message = '%s\n\nTry `%s --help` for more information.\n' % ( str ( error ) , os . path . basename ( sys . argv [ 0 ] ) )
sys . stderr . write ( message )
sys . stderr . flush ( )
sys . exit ( 1 )
sys . exit ( run ( ) ) |
def queryAll ( self , * args , ** kwargs ) :
"""Returns a : class : ` Deferred ` object which will have its callback invoked
with a : class : ` BatchedView ` when the results are complete .
Parameters follow conventions of
: meth : ` ~ couchbase . bucket . Bucket . query ` .
Example : :
d = cb . queryAll ( " beer " , " brewery _ beers " )
def on _ all _ rows ( rows ) :
for row in rows :
print ( " Got row { 0 } " . format ( row ) )
d . addCallback ( on _ all _ rows )""" | if not self . connected :
cb = lambda x : self . queryAll ( * args , ** kwargs )
return self . connect ( ) . addCallback ( cb )
kwargs [ 'itercls' ] = BatchedView
o = super ( RawBucket , self ) . query ( * args , ** kwargs )
o . start ( )
return o . _getDeferred ( ) |
async def set_position ( self , position , wait_for_completion = True ) :
"""Set window to desired position .
Parameters :
* position : Position object containing the target position .
* wait _ for _ completion : If set , function will return
after device has reached target position .""" | command_send = CommandSend ( pyvlx = self . pyvlx , wait_for_completion = wait_for_completion , node_id = self . node_id , parameter = position )
await command_send . do_api_call ( )
if not command_send . success :
raise PyVLXException ( "Unable to send command" )
await self . after_update ( ) |
def _get_handled_methods ( self , actions_map ) :
"""Get names of HTTP methods that can be used at requested URI .
Arguments :
: actions _ map : Map of actions . Must have the same structure as
self . _ item _ actions and self . _ collection _ actions""" | methods = ( 'OPTIONS' , )
defined_actions = [ ]
for action_name in actions_map . keys ( ) :
view_method = getattr ( self , action_name , None )
method_exists = view_method is not None
method_defined = view_method != self . not_allowed_action
if method_exists and method_defined :
defined_actions . append ( action_name )
for action in defined_actions :
methods += actions_map [ action ]
return methods |
def get_volume_by_id ( self , id ) :
"""Get ScaleIO Volume object by its ID
: param name : ID of volume
: return : ScaleIO Volume object
: raise KeyError : No Volume with specified ID found
: rtype : ScaleIO Volume object""" | for vol in self . conn . volumes :
if vol . id == id :
return vol
raise KeyError ( "Volume with ID " + id + " not found" ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.