signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def acquire_discharge ( self , cav , payload ) :
'''Request a discharge macaroon from the caveat location
as an HTTP URL .
@ param cav Third party { pymacaroons . Caveat } to be discharged .
@ param payload External caveat data { bytes } .
@ return The acquired macaroon { macaroonbakery . Macaroon }'''
|
resp = self . _acquire_discharge_with_token ( cav , payload , None )
# TODO Fabrice what is the other http response possible ? ?
if resp . status_code == 200 :
return bakery . Macaroon . from_dict ( resp . json ( ) . get ( 'Macaroon' ) )
cause = Error . from_dict ( resp . json ( ) )
if cause . code != ERR_INTERACTION_REQUIRED :
raise DischargeError ( cause . message )
if cause . info is None :
raise DischargeError ( 'interaction-required response with no info: {}' . format ( resp . json ( ) ) )
loc = cav . location
if not loc . endswith ( '/' ) :
loc = loc + '/'
token , m = self . _interact ( loc , cause , payload )
if m is not None : # We ' ve acquired the macaroon directly via legacy interaction .
return m
# Try to acquire the discharge again , but this time with
# the token acquired by the interaction method .
resp = self . _acquire_discharge_with_token ( cav , payload , token )
if resp . status_code == 200 :
return bakery . Macaroon . from_dict ( resp . json ( ) . get ( 'Macaroon' ) )
else :
raise DischargeError ( 'discharge failed with code {}' . format ( resp . status_code ) )
|
def query_one ( cls , * args , ** kwargs ) :
"""Same as collection . find _ one , but return Document then dict"""
|
doc = cls . _coll . find_one ( * args , ** kwargs )
if doc :
return cls . from_storage ( doc )
|
def check_platform ( self , dataset ) :
'''int platform _ variable ; / / . . . . . RECOMMENDED - a container variable storing information about the platform . If more than one , can expand each attribute into a variable . For example , platform _ call _ sign and platform _ nodc _ code . See instrument _ parameter _ variable for an example .
platform _ variable : long _ name = " " ; / / . . . . . RECOMMENDED - Provide a descriptive , long name for this variable .
platform _ variable : comment = " " ; / / . . . . . RECOMMENDED - Add useful , additional information here .
platform _ variable : call _ sign = " " ; / / . . . . . RECOMMENDED - This attribute identifies the call sign of the platform .
platform _ variable : ncei _ code = " " ; / / . . . . . RECOMMENDED - This attribute identifies the NCEI code of the platform . Look at http : / / www . nodc . noaa . gov / cgi - bin / OAS / prd / platform to find if NCEI codes are available .
platform _ variable : wmo _ code = " " ; / / . . . . . RECOMMENDED - This attribute identifies the wmo code of the platform . Information on getting WMO codes is available at http : / / www . wmo . int / pages / prog / amp / mmop / wmo - number - rules . html
platform _ variable : imo _ code = " " ; / / . . . . . RECOMMENDED - This attribute identifies the International Maritime Organization ( IMO ) number assigned by Lloyd ' s register .'''
|
# Check for the platform variable
platforms = util . get_platform_variables ( dataset )
if not platforms :
return Result ( BaseCheck . MEDIUM , False , 'A container variable storing information about the platform exists' , [ 'Create a variable to store the platform information' ] )
results = [ ]
for platform in platforms :
test_ctx = TestCtx ( BaseCheck . MEDIUM , 'Recommended attributes for platform variable {}' . format ( platform ) )
pvar = dataset . variables [ platform ]
test_ctx . assert_true ( getattr ( pvar , 'long_name' , '' ) != '' , 'long_name attribute should exist and not be empty' )
if hasattr ( pvar , 'comment' ) :
test_ctx . assert_true ( getattr ( pvar , 'comment' , '' ) != '' , 'comment attribute should not be empty if specified' )
# We only check to see if nodc _ code , wmo _ code and imo _ code are empty . They are only recommended if they exist for the platform .
found_identifier = False
if hasattr ( pvar , 'ncei_code' ) :
test_ctx . assert_true ( getattr ( pvar , 'ncei_code' , '' ) != '' , 'ncei_code should not be empty if specified' )
found_identifier = True
if hasattr ( pvar , 'wmo_code' ) :
test_ctx . assert_true ( getattr ( pvar , 'wmo_code' , '' ) != '' , 'wmo_code should not be empty if specified' )
found_identifier = True
if hasattr ( pvar , 'imo_code' ) :
test_ctx . assert_true ( getattr ( pvar , 'imo_code' , '' ) != '' , 'imo_code should not be empty if specified' )
found_identifier = True
if hasattr ( pvar , 'call_sign' ) :
test_ctx . assert_true ( getattr ( pvar , 'call_sign' , '' ) != '' , 'call_sign attribute should not be empty if specified' )
found_identifier = True
test_ctx . assert_true ( found_identifier , 'At least one attribute should be defined to identify the platform: ncei_code, wmo_code, imo_code, call_sign.' )
results . append ( test_ctx . to_result ( ) )
return results
|
def find_distributions ( path_item , only = False ) :
"""Yield distributions accessible via ` path _ item `"""
|
importer = get_importer ( path_item )
finder = _find_adapter ( _distribution_finders , importer )
return finder ( importer , path_item , only )
|
def _markup ( self , entry ) :
"""Recursively generates HTML for the current entry .
Parameters
entry : object
Object to convert to HTML . Maybe be a single entity or contain multiple and / or nested objects .
Returns
str
String of HTML formatted json ."""
|
if entry is None :
return ""
if isinstance ( entry , list ) :
list_markup = "<ul>"
for item in entry :
list_markup += "<li>{:s}</li>" . format ( self . _markup ( item ) )
list_markup += "</ul>"
return list_markup
if isinstance ( entry , dict ) :
return self . convert ( entry )
# default to stringifying entry
return str ( entry )
|
def connect ( self , force = False ) :
'''Establish a connection'''
|
# Don ' t re - establish existing connections
if not force and self . alive ( ) :
return True
self . _reset ( )
# Otherwise , try to connect
with self . _socket_lock :
try :
logger . info ( 'Creating socket...' )
self . _socket = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
self . _socket . settimeout ( self . _timeout )
logger . info ( 'Connecting to %s, %s' , self . host , self . port )
self . _socket . connect ( ( self . host , self . port ) )
# Set our socket ' s blocking state to whatever ours is
self . _socket . setblocking ( self . _blocking )
# Safely write our magic
self . _pending . append ( constants . MAGIC_V2 )
while self . pending ( ) :
self . flush ( )
# And send our identify command
self . identify ( self . _identify_options )
while self . pending ( ) :
self . flush ( )
self . _reconnnection_counter . success ( )
# Wait until we ' ve gotten a response to IDENTIFY , try to read
# one . Also , only spend up to the provided timeout waiting to
# establish the connection .
limit = time . time ( ) + self . _timeout
responses = self . _read ( 1 )
while ( not responses ) and ( time . time ( ) < limit ) :
responses = self . _read ( 1 )
if not responses :
raise ConnectionTimeoutException ( 'Read identify response timed out (%ss)' % self . _timeout )
self . identified ( responses [ 0 ] )
return True
except :
logger . exception ( 'Failed to connect' )
if self . _socket :
self . _socket . close ( )
self . _reconnnection_counter . failed ( )
self . _reset ( )
return False
|
def get_ZXY_freqs ( Data , zfreq , xfreq , yfreq , bandwidth = 5000 ) :
"""Determines the exact z , x and y peak frequencies from approximate
frequencies by finding the highest peak in the PSD " close to " the
approximate peak frequency . By " close to " I mean within the range :
approxFreq - bandwidth / 2 to approxFreq + bandwidth / 2
Parameters
Data : DataObject
DataObject containing the data for which you want to determine the
z , x and y frequencies .
zfreq : float
An approximate frequency for the z peak
xfreq : float
An approximate frequency for the z peak
yfreq : float
An approximate frequency for the z peak
bandwidth : float , optional
The bandwidth around the approximate peak to look for the actual peak . The default value is 5000
Returns
trapfreqs : list
List containing the trap frequencies in the following order ( z , x , y )"""
|
trapfreqs = [ ]
for freq in [ zfreq , xfreq , yfreq ] :
z_f_fit_lower = take_closest ( Data . freqs , freq - bandwidth / 2 )
z_f_fit_upper = take_closest ( Data . freqs , freq + bandwidth / 2 )
z_indx_fit_lower = int ( _np . where ( Data . freqs == z_f_fit_lower ) [ 0 ] [ 0 ] )
z_indx_fit_upper = int ( _np . where ( Data . freqs == z_f_fit_upper ) [ 0 ] [ 0 ] )
z_index_OmegaTrap = _np . where ( Data . PSD == max ( Data . PSD [ z_indx_fit_lower : z_indx_fit_upper ] ) ) [ 0 ] [ 0 ]
# find highest point in region about guess for trap frequency
# use that as guess for trap frequency and recalculate region
# about the trap frequency
z_OmegaTrap = Data . freqs [ z_index_OmegaTrap ]
trapfreqs . append ( z_OmegaTrap )
return trapfreqs
|
def _get_model_nodes ( self , model ) :
"""Find all the non - auto created nodes of the model ."""
|
nodes = [ ( name , node ) for name , node in model . _nodes . items ( ) if node . _is_auto_created is False ]
nodes . sort ( key = lambda n : n [ 0 ] )
return nodes
|
def _maintain_dep_graph ( self , p_todo ) :
"""Makes sure that the dependency graph is consistent according to the
given todo ."""
|
dep_id = p_todo . tag_value ( 'id' )
# maintain dependency graph
if dep_id :
self . _parentdict [ dep_id ] = p_todo
self . _depgraph . add_node ( hash ( p_todo ) )
# connect all tasks we have in memory so far that refer to this
# task
for dep in [ dep for dep in self . _todos if dep . has_tag ( 'p' , dep_id ) ] :
self . _add_edge ( p_todo , dep , dep_id )
for dep_id in p_todo . tag_values ( 'p' ) :
try :
parent = self . _parentdict [ dep_id ]
self . _add_edge ( parent , p_todo , dep_id )
except KeyError :
pass
|
def ask_int ( msg = "Enter an integer" , dft = None , vld = None , hlp = None ) :
"""Prompts the user for an integer ."""
|
vld = vld or [ int ]
return ask ( msg , dft = dft , vld = vld , fmt = partial ( cast , typ = int ) , hlp = hlp )
|
def pos ( self , element = None ) :
'''Tries to decide about the part of speech .'''
|
tags = [ ]
if element :
if re . search ( '[\w|\s]+ [m|f]\.' , element , re . U ) :
tags . append ( 'NN' )
if '[VERB]' in element :
tags . append ( 'VB' )
if 'adj.' in element and re . search ( '([\w|\s]+, [\w|\s]+)' , element , re . U ) :
tags . append ( 'JJ' )
else :
for element in self . elements :
if element . startswith ( self . word ) :
tags += self . pos ( element )
return list ( set ( tags ) )
|
def get_order_in_album ( self , reversed_ordering = True ) :
'''Returns image order number . It is calculated as ( number + 1 ) of images
attached to the same content _ object whose order is greater
( if ' reverse _ ordering ' is True ) or lesser ( if ' reverse _ ordering ' is
False ) than image ' s order .'''
|
lookup = 'order__gt' if reversed_ordering else 'order__lt'
return self . __class__ . objects . for_model ( self . content_object , self . content_type ) . filter ( ** { lookup : self . order } ) . count ( ) + 1
|
def calculate_size ( name , replica_timestamps , target_replica ) :
"""Calculates the request payload size"""
|
data_size = 0
data_size += calculate_size_str ( name )
data_size += INT_SIZE_IN_BYTES
for replica_timestamps_item in replica_timestamps :
key = replica_timestamps_item [ 0 ]
val = replica_timestamps_item [ 1 ]
data_size += calculate_size_str ( key )
data_size += LONG_SIZE_IN_BYTES
data_size += calculate_size_address ( target_replica )
return data_size
|
def PushBack ( self , string = "" , ** _ ) :
"""Push the match back on the stream ."""
|
precondition . AssertType ( string , Text )
self . buffer = string + self . buffer
self . processed_buffer = self . processed_buffer [ : - len ( string ) ]
|
def isUserCert ( self , name ) :
'''Checks if a user certificate exists .
Args :
name ( str ) : The name of the user keypair .
Examples :
Check if the user cert " myuser " exists :
exists = cdir . isUserCert ( ' myuser ' )
Returns :
bool : True if the certificate is present , False otherwise .'''
|
crtpath = self . _getPathJoin ( 'users' , '%s.crt' % name )
return os . path . isfile ( crtpath )
|
def fetch_metric ( self , cursor , results , tags ) :
'''Because we need to query the metrics by matching pairs , we can ' t query
all of them together without having to perform some matching based on
the name afterwards so instead we query instance by instance .
We cache the list of instance so that we don ' t have to look it up every time'''
|
if self . sql_name not in results :
self . log . warning ( "Couldn't find {} in results" . format ( self . sql_name ) )
return
tags = tags + self . tags
results_list = results [ self . sql_name ]
done_instances = [ ]
for ndx , row in enumerate ( results_list ) :
ctype = row [ 0 ]
cval = row [ 1 ]
inst = row [ 2 ]
object_name = row [ 3 ]
if inst in done_instances :
continue
if ( self . instance != ALL_INSTANCES and inst != self . instance ) or ( self . object_name and object_name != self . object_name ) :
done_instances . append ( inst )
continue
# find the next row which has the same instance
cval2 = None
ctype2 = None
for second_row in results_list [ : ndx + 1 ] :
if inst == second_row [ 2 ] :
cval2 = second_row [ 1 ]
ctype2 = second_row [ 0 ]
if cval2 is None :
self . log . warning ( "Couldn't find second value for {}" . format ( self . sql_name ) )
continue
done_instances . append ( inst )
if ctype < ctype2 :
value = cval
base = cval2
else :
value = cval2
base = cval
metric_tags = list ( tags )
if self . instance == ALL_INSTANCES :
metric_tags . append ( '{}:{}' . format ( self . tag_by , inst . strip ( ) ) )
self . report_fraction ( value , base , metric_tags )
|
def getEdges ( self , fromVol ) :
"""Return the edges available from fromVol ."""
|
return [ self . toObj . diff ( diff ) for diff in self . _client . getEdges ( self . toArg . vol ( fromVol ) ) ]
|
def render_pdf ( template , file_ , url_fetcher = staticfiles_url_fetcher , context = None , ) :
"""Writes the PDF data into ` ` file _ ` ` . Note that ` ` file _ ` ` can actually be a
Django Response object as well .
This function may be used as a helper that can be used to save a PDF file
to a file ( or anything else outside of a request / response cycle ) , eg : :
: param str html : A rendered HTML .
: param file file _ : A file like object ( or a Response ) where to output
the rendered PDF ."""
|
context = context or { }
html = get_template ( template ) . render ( context )
HTML ( string = html , base_url = 'not-used://' , url_fetcher = url_fetcher , ) . write_pdf ( target = file_ , )
|
def sanitizeparameters ( parameters ) :
"""Construct a dictionary of parameters , for internal use only"""
|
if not isinstance ( parameters , dict ) :
d = { }
for x in parameters :
if isinstance ( x , tuple ) and len ( x ) == 2 :
for parameter in x [ 1 ] :
d [ parameter . id ] = parameter
elif isinstance ( x , clam . common . parameters . AbstractParameter ) :
d [ x . id ] = x
return d
else :
return parameters
|
def _gap ( src_interval , tar_interval ) :
"""Refer section 3.1 ; gap function .
: param src _ interval : first argument or interval 1
: param tar _ interval : second argument or interval 2
: return : Interval representing gap between two intervals"""
|
assert src_interval . bits == tar_interval . bits , "Number of bits should be same for operands"
# use the same variable names as in paper
s = src_interval
t = tar_interval
( _ , b ) = ( s . lower_bound , s . upper_bound )
( c , _ ) = ( t . lower_bound , t . upper_bound )
w = s . bits
# case 1
if ( not t . _surrounds_member ( b ) ) and ( not s . _surrounds_member ( c ) ) : # FIXME : maybe we can do better here and to not fix the stride to 1
# FIXME : found the first common integer for more precision
return StridedInterval ( lower_bound = c , upper_bound = b , bits = w , stride = 1 ) . complement
# otherwise
return StridedInterval . empty ( w )
|
def method_codes_to_geomagia ( magic_method_codes , geomagia_table ) :
"""Looks at the MagIC method code list and returns the correct GEOMAGIA code number depending
on the method code list and the GEOMAGIA table specified . Returns O , GEOMAGIA ' s " Not specified " value , if no match .
When mutiple codes are matched they are separated with -"""
|
codes = magic_method_codes
geomagia = geomagia_table . lower ( )
geomagia_code = '0'
if geomagia == 'alteration_monit_corr' :
if "DA-ALT-V" or "LP-PI-ALT-PTRM" or "LP-PI-ALT-PMRM" in codes :
geomagia_code = '1'
elif "LP-PI-ALT-SUSC" in codes :
geomagia_code = '2'
elif "DA-ALT-RS" or "LP-PI-ALT-AFARM" in codes :
geomagia_code = '3'
elif "LP-PI-ALT-WALTON" in codes :
geomagia_code = '4'
elif "LP-PI-ALT-TANGUY" in codes :
geomagia_code = '5'
elif "DA-ALT" in codes :
geomagia_code = '6'
# at end to fill generic if others don ' t exist
elif "LP-PI-ALT-FABIAN" in codes :
geomagia_code = '7'
if geomagia == 'md_checks' :
if ( "LT-PTRM-MD" in codes ) or ( "LT-PMRM-MD" in codes ) :
geomagia_code = '1:'
if ( "LP-PI-BT-LT" in codes ) or ( "LT-LT-Z" in codes ) :
if "0" in geomagia_code :
geomagia_code = "23:"
else :
geomagia_code += '2:'
geomagia_code = geomagia_code [ : - 1 ]
if geomagia == 'anisotropy_correction' :
if "DA-AC-AMS" in codes :
geomagia_code = '1'
elif "DA-AC-AARM" in codes :
geomagia_code = '2'
elif "DA-AC-ATRM" in codes :
geomagia_code = '3'
elif "LT-NRM-PAR" in codes :
geomagia_code = '4'
elif "DA-AC-AIRM" in codes :
geomagia_code = '6'
elif "DA-AC" in codes : # at end to fill generic if others don ' t exist
geomagia_code = '5'
if geomagia == 'cooling_rate' :
if "DA-CR" in codes : # all current CR codes but CR - EG are a 1 but may change in the future
geomagia_code = '1'
if "DA-CR-EG" in codes :
geomagia_code = '2'
if geomagia == 'dm_methods' :
if "LP-DIR-AF" in codes :
geomagia_code = '1'
elif "LT-AF-D" in codes :
geomagia_code = '1'
elif "LT-AF-G" in codes :
geomagia_code = '1'
elif "LT-AF-Z" in codes :
geomagia_code = '1'
elif "LP-DIR-T" in codes :
geomagia_code = '2'
elif "LT-AF-Z" in codes :
geomagia_code = '2'
elif "LP-DIR-M" in codes :
geomagia_code = '5'
elif "LT-M-Z" in codes :
geomagia_code = '5'
if geomagia == 'dm_analysis' :
if "DE-BFL" in codes :
geomagia_code = '1'
elif "DE-BLANKET" in codes :
geomagia_code = '2'
elif "DE-FM" in codes :
geomagia_code = '3'
elif "DE-NRM" in codes :
geomagia_code = '6'
if geomagia == 'specimen_type_id' :
if "SC-TYPE-CYC" in codes :
geomagia_code = '1'
elif "SC-TYPE-CUBE" in codes :
geomagia_code = '2'
elif "SC-TYPE-MINI" in codes :
geomagia_code = '3'
elif "SC-TYPE-SC" in codes :
geomagia_code = '4'
elif "SC-TYPE-UC" in codes :
geomagia_code = '5'
elif "SC-TYPE-LARGE" in codes :
geomagia_code = '6'
return geomagia_code
|
def _set_version ( self , version ) :
'''Set up this object based on the capabilities of the
known versions of Redmine'''
|
# Store the version we are evaluating
self . version = version or None
# To evaluate the version capabilities ,
# assume the best - case if no version is provided .
version_check = version or 9999.0
if version_check < 1.0 :
raise RedmineError ( 'This library will only work with ' 'Redmine version 1.0 and higher.' )
# # SECURITY AUGMENTATION
# All versions support the key in the request
# ( http : / / server / stuff . json ? key = blah )
# But versions 1.1 and higher can put the key in a header field
# for better security .
# If no version was provided ( 0.0 ) then assume we should
# set the key with the request .
self . key_in_header = version >= 1.1
# it puts the key in the header or
# it gets the hose , but not for 1.0.
self . impersonation_supported = version_check >= 2.2
self . has_project_memberships = version_check >= 1.4
self . has_project_versions = version_check >= 1.3
self . has_wiki_pages = version_check >= 2.2
# # ITEM MANAGERS
# Step through all the item managers by version
# and instatiate and item manager for that item .
for manager_version in self . _item_managers_by_version :
if version_check >= manager_version :
managers = self . _item_managers_by_version [ manager_version ]
for attribute_name , item in managers . iteritems ( ) :
setattr ( self , attribute_name , Redmine_Items_Manager ( self , item ) )
|
def verify ( self ) :
"""Ensure all expected calls were called ,
raise AssertionError otherwise .
You do not need to use this directly . Use fudge . verify ( )"""
|
try :
for exp in self . get_expected_calls ( ) :
exp . assert_called ( )
exp . assert_times_called ( )
for fake , call_order in self . get_expected_call_order ( ) . items ( ) :
call_order . assert_order_met ( finalize = True )
finally :
self . clear_calls ( )
|
def total_review_average ( obj , normalize_to = 100 ) :
"""Returns the average for all reviews of the given object ."""
|
ctype = ContentType . objects . get_for_model ( obj )
total_average = 0
reviews = models . Review . objects . filter ( content_type = ctype , object_id = obj . id )
for review in reviews :
total_average += review . get_average_rating ( normalize_to )
if reviews :
total_average /= reviews . count ( )
return total_average
|
async def serialize_rctsig_prunable ( self , ar , type , inputs , outputs , mixin ) :
"""Serialize rct sig
: param ar :
: type ar : x . Archive
: param type :
: param inputs :
: param outputs :
: param mixin :
: return :"""
|
if type == RctType . Null :
return True
if type != RctType . Full and type != RctType . Bulletproof and type != RctType . Simple and type != RctType . Bulletproof2 :
raise ValueError ( 'Unknown type' )
if is_rct_bp ( type ) :
await ar . tag ( 'bp' )
await ar . begin_array ( )
bps = [ 0 ]
if ar . writing :
bps [ 0 ] = len ( self . bulletproofs )
if type == RctType . Bulletproof2 :
await ar . field ( elem = eref ( bps , 0 ) , elem_type = x . UVarintType )
else :
await ar . field ( elem = eref ( bps , 0 ) , elem_type = x . UInt32 )
await ar . prepare_container ( bps [ 0 ] , eref ( self , 'bulletproofs' ) , elem_type = Bulletproof )
for i in range ( bps [ 0 ] ) :
await ar . field ( elem = eref ( self . bulletproofs , i ) , elem_type = Bulletproof )
await ar . end_array ( )
else :
await ar . tag ( 'rangeSigs' )
await ar . begin_array ( )
await ar . prepare_container ( outputs , eref ( self , 'rangeSigs' ) , elem_type = RangeSig )
if len ( self . rangeSigs ) != outputs :
raise ValueError ( 'rangeSigs size mismatch' )
for i in range ( len ( self . rangeSigs ) ) :
await ar . field ( elem = eref ( self . rangeSigs , i ) , elem_type = RangeSig )
await ar . end_array ( )
await ar . tag ( 'MGs' )
await ar . begin_array ( )
# We keep a byte for size of MGs , because we don ' t know whether this is
# a simple or full rct signature , and it ' s starting to annoy the hell out of me
is_full = type == RctType . Full
mg_elements = inputs if not is_full else 1
await ar . prepare_container ( mg_elements , eref ( self , 'MGs' ) , elem_type = MgSig )
if len ( self . MGs ) != mg_elements :
raise ValueError ( 'MGs size mismatch' )
for i in range ( mg_elements ) : # We save the MGs contents directly , because we want it to save its
# arrays and matrices without the size prefixes , and the load can ' t
# know what size to expect if it ' s not in the data
await ar . begin_object ( )
await ar . tag ( 'ss' )
await ar . begin_array ( )
await ar . prepare_container ( mixin + 1 , eref ( self . MGs [ i ] , 'ss' ) , elem_type = KeyM )
if ar . writing and len ( self . MGs [ i ] . ss ) != mixin + 1 :
raise ValueError ( 'MGs size mismatch' )
for j in range ( mixin + 1 ) :
await ar . begin_array ( )
mg_ss2_elements = 1 + ( 1 if not is_full else inputs )
await ar . prepare_container ( mg_ss2_elements , eref ( self . MGs [ i ] . ss , j ) , elem_type = KeyM . ELEM_TYPE )
if ar . writing and len ( self . MGs [ i ] . ss [ j ] ) != mg_ss2_elements :
raise ValueError ( 'MGs size mismatch 2' )
for k in range ( mg_ss2_elements ) :
await ar . field ( eref ( self . MGs [ i ] . ss [ j ] , k ) , elem_type = KeyV . ELEM_TYPE )
await ar . end_array ( )
await ar . tag ( 'cc' )
await ar . field ( eref ( self . MGs [ i ] , 'cc' ) , elem_type = ECKey )
await ar . end_object ( )
await ar . end_array ( )
if type in ( RctType . Bulletproof , RctType . Bulletproof2 ) :
await ar . begin_array ( )
await ar . prepare_container ( inputs , eref ( self , 'pseudoOuts' ) , elem_type = KeyV )
if ar . writing and len ( self . pseudoOuts ) != inputs :
raise ValueError ( 'pseudoOuts size mismatch' )
for i in range ( inputs ) :
await ar . field ( eref ( self . pseudoOuts , i ) , elem_type = KeyV . ELEM_TYPE )
await ar . end_array ( )
|
def _stopOnFailure ( self , f ) :
"utility method to stop the service when a failure occurs"
|
if self . running :
d = defer . maybeDeferred ( self . stopService )
d . addErrback ( log . err , 'while stopping broken HgPoller service' )
return f
|
def ProcessListDirectory ( self , responses ) :
"""Processes the results of the ListDirectory client action .
Args :
responses : a flow Responses object ."""
|
if not responses . success :
raise flow . FlowError ( "Unable to list directory." )
with data_store . DB . GetMutationPool ( ) as pool :
for response in responses :
stat_entry = rdf_client_fs . StatEntry ( response )
filesystem . CreateAFF4Object ( stat_entry , self . client_urn , pool , token = self . token )
self . SendReply ( stat_entry )
|
def check_syntax ( code ) :
"""Return True if syntax is okay ."""
|
try :
return compile ( code , '<string>' , 'exec' , dont_inherit = True )
except ( SyntaxError , TypeError , ValueError ) :
return False
|
def find_one ( self , query = None ) :
"""Equivalent to ` ` find ( query , limit = 1 ) [ 0 ] ` `"""
|
try :
return self . find ( query = query , limit = 1 ) [ 0 ]
except ( sqlite3 . OperationalError , IndexError ) :
return None
|
def translate_key_val ( val , delimiter = '=' ) :
'''CLI input is a list of key / val pairs , but the API expects a dictionary in
the format { key : val }'''
|
if isinstance ( val , dict ) :
return val
val = translate_stringlist ( val )
new_val = { }
for item in val :
try :
lvalue , rvalue = split ( item , delimiter , 1 )
except ( AttributeError , TypeError , ValueError ) :
raise SaltInvocationError ( '\'{0}\' is not a key{1}value pair' . format ( item , delimiter ) )
new_val [ lvalue ] = rvalue
return new_val
|
def mainline ( self ) :
"""Returns the main line of the game ( variation A ) as a ' GameTree ' ."""
|
if self . variations :
return GameTree ( self . data + self . variations [ 0 ] . mainline ( ) )
else :
return self
|
def get_iex_next_day_ex_date ( start = None , ** kwargs ) :
"""MOVED to iexfinance . refdata . get _ iex _ next _ day _ ex _ date"""
|
import warnings
warnings . warn ( WNG_MSG % ( "get_iex_next_day_ex_date" , "refdata.get_iex_next_day_ex_date" ) )
return NextDay ( start = start , ** kwargs ) . fetch ( )
|
def _main ( ) :
"""Usage : tabulate [ options ] [ FILE . . . ]
Pretty - print tabular data .
See also https : / / bitbucket . org / astanin / python - tabulate
FILE a filename of the file with tabular data ;
if " - " or missing , read data from stdin .
Options :
- h , - - help show this message
-1 , - - header use the first row of data as a table header
- o FILE , - - output FILE print table to FILE ( default : stdout )
- s REGEXP , - - sep REGEXP use a custom column separator ( default : whitespace )
- F FPFMT , - - float FPFMT floating point number format ( default : g )
- f FMT , - - format FMT set output table format ; supported formats :
plain , simple , grid , fancy _ grid , pipe , orgtbl ,
rst , mediawiki , html , latex , latex _ booktabs , tsv
( default : simple )"""
|
import getopt
import sys
import textwrap
usage = textwrap . dedent ( _main . __doc__ )
try :
opts , args = getopt . getopt ( sys . argv [ 1 : ] , "h1o:s:F:f:" , [ "help" , "header" , "output" , "sep=" , "float=" , "format=" ] )
except getopt . GetoptError as e :
print ( e )
print ( usage )
sys . exit ( 2 )
headers = [ ]
floatfmt = "g"
tablefmt = "simple"
sep = r"\s+"
outfile = "-"
for opt , value in opts :
if opt in [ "-1" , "--header" ] :
headers = "firstrow"
elif opt in [ "-o" , "--output" ] :
outfile = value
elif opt in [ "-F" , "--float" ] :
floatfmt = value
elif opt in [ "-f" , "--format" ] :
if value not in tabulate_formats :
print ( "%s is not a supported table format" % value )
print ( usage )
sys . exit ( 3 )
tablefmt = value
elif opt in [ "-s" , "--sep" ] :
sep = value
elif opt in [ "-h" , "--help" ] :
print ( usage )
sys . exit ( 0 )
files = [ sys . stdin ] if not args else args
with ( sys . stdout if outfile == "-" else open ( outfile , "w" ) ) as out :
for f in files :
if f == "-" :
f = sys . stdin
if _is_file ( f ) :
_pprint_file ( f , headers = headers , tablefmt = tablefmt , sep = sep , floatfmt = floatfmt , file = out )
else :
with open ( f ) as fobj :
_pprint_file ( fobj , headers = headers , tablefmt = tablefmt , sep = sep , floatfmt = floatfmt , file = out )
|
def read_plugin_config ( self ) :
"""Read plugin - specific configuration values ."""
|
folders = self . config [ "pluginfolders" ]
modules = plugins . get_plugin_modules ( folders )
for pluginclass in plugins . get_plugin_classes ( modules ) :
section = pluginclass . __name__
if self . has_section ( section ) :
self . config [ "enabledplugins" ] . append ( section )
self . config [ section ] = pluginclass . read_config ( self )
|
def lstsq ( a , b , rcond = None , weighted = False , extrainfo = False ) :
"""Least - squares solution ` ` x ` ` to ` ` a @ x = b ` ` for | GVar | \ s .
Here ` ` x ` ` is defined to be the solution that minimizes ` ` | | b - a @ x | | ` ` .
If ` ` b ` ` has a covariance matrix , another option is to weight the
norm with the inverse covariance matrix : i . e . , minimize
` ` | | isig @ b - isig @ a @ x | | ` ` where ` ` isig ` ` is the square root of the
inverse of ` ` b ` ` ' s covariance matrix . Set parameter ` ` weighted = True ` ` to
obtain the weighted - least - squares solution .
Args :
a : Matrix / array of shape ` ` ( M , N ) ` ` containing numbers and / or | GVar | \ s .
b : Vector / array of shape ` ` ( M , ) ` ` containing numbers and / or | GVar | \ s .
rcond ( float ) : Cutoff for singular values of ` ` a ` ` . Singular values
smaller than ` ` rcond ` ` times the maximum eigenvalue are ignored .
Default ( ` ` rcond = None ` ` ) is ` ` max ( M , N ) ` ` times machine precision .
weighted ( bool ) : If ` ` True ` ` , use weighted least squares ; otherwise
use unweighted least squares .
extrainfo ( bool ) : If ` ` False ` ` ( default ) only ` ` x ` ` is returned ;
otherwise ` ` ( x , residual , rank , s ) ` ` is returned .
Returns :
Array ` ` x ` ` of shape ` ` ( N , ) ` ` that minimizes ` ` | | b - a @ x | | ` `
if ` ` extrainfo = = False ` ` ( default ) ; otherwise returns a tuple
` ` ( x , residual , rank , s ) ` ` where ` ` residual ` ` is the sum
of the squares of ` ` b - a @ x ` ` , ` ` rank ` ` is the rank of matrix
` ` a ` ` , and ` ` s ` ` is an array containing the singular values ."""
|
a = numpy . asarray ( a )
b = numpy . asarray ( b )
if a . ndim != 2 :
raise ValueError ( 'a must have dimension 2: actual shape = ' + str ( a . shape ) )
if a . shape [ 0 ] != b . shape [ 0 ] :
raise ValueError ( 'a and b shapes mismatched: {} vs {}' . format ( a . shape , b . shape ) )
if rcond is None :
rcond = numpy . finfo ( float ) . eps * max ( a . shape )
if weighted :
try :
cov = gvar . evalcov ( b )
except ValueError :
raise ValueError ( 'b does not have a covariance matrix' )
try :
icov = numpy . linalg . inv ( cov )
except numpy . linalg . LinAlgError :
raise ValueError ( "b's covariance matrix cannot be inverted" )
ata = a . T . dot ( icov . dot ( a ) )
atb = a . T . dot ( icov . dot ( b ) )
else :
ata = a . T . dot ( a )
atb = a . T . dot ( b )
val , vec = gvar . linalg . eigh ( ata )
maxval = numpy . max ( gvar . mean ( val ) )
# N . B . val > 0 required
ans = 0
for i in range ( len ( val ) ) :
if gvar . mean ( val [ i ] ) < rcond * maxval :
continue
ans += vec [ : , i ] * vec [ : , i ] . dot ( atb ) / val [ i ]
if not extrainfo :
return ans
val = val [ val >= rcond * maxval ] ** 0.5
d = a . dot ( ans ) - b
residual = d . dot ( icov . dot ( d ) ) if weighted else d . dot ( d )
k = len ( val )
return ans , residual , k , val
|
def page_template ( template , key = PAGE_LABEL ) :
"""Return a view dynamically switching template if the request is Ajax .
Decorate a view that takes a * template * and * extra _ context * keyword
arguments ( like generic views ) .
The template is switched to * page _ template * if request is ajax and
if * querystring _ key * variable passed by the request equals to * key * .
This allows multiple Ajax paginations in the same page .
The name of the page template is given as * page _ template * in the
extra context ."""
|
def decorator ( view ) :
@ wraps ( view )
def decorated ( request , * args , ** kwargs ) : # Trust the developer : he wrote ` ` context . update ( extra _ context ) ` `
# in his view .
extra_context = kwargs . setdefault ( 'extra_context' , { } )
extra_context [ 'page_template' ] = template
# Switch the template when the request is Ajax .
querystring_key = request . GET . get ( QS_KEY , request . POST . get ( QS_KEY , PAGE_LABEL ) )
if request . is_ajax ( ) and querystring_key == key :
kwargs [ TEMPLATE_VARNAME ] = template
return view ( request , * args , ** kwargs )
return decorated
return decorator
|
def distribute ( self , f , n ) :
"""Distribute the computations amongst the multiprocessing pools
Parameters
f : function
Function to be distributed to the processors
n : int
The values in range ( 0 , n ) will be passed as arguments to the
function f ."""
|
if self . pool is None :
return [ f ( i ) for i in range ( n ) ]
else :
return self . pool . map ( f , range ( n ) )
|
def subset ( self , subset_id ) :
"""Returns information regarding the set"""
|
if subset_id in self . subsetcache :
return self . subsetcache [ subset_id ]
set_uri = self . get_set_uri ( subset_id )
for row in self . graph . query ( "SELECT ?seturi ?setid ?setlabel ?setopen WHERE { ?seturi rdf:type skos:Collection . OPTIONAL { ?seturi skos:notation ?setid } OPTIONAL { ?seturi skos:prefLabel ?setlabel } OPTIONAL { ?seturi fsd:open ?setopen } FILTER (?seturi = <" + str ( set_uri ) + ">) }" ) :
self . subsetcache [ str ( row . setid ) ] = { 'uri' : str ( row . seturi ) , 'id' : str ( row . setid ) , 'label' : str ( row . setlabel ) if row . setlabel else "" , 'open' : bool ( row . setopen ) }
return self . subsetcache [ str ( row . setid ) ]
raise DeepValidationError ( "Unable to find subset (set_uri=" + str ( set_uri ) + ")" )
|
def _list_items ( action , key , profile = None , subdomain = None , api_key = None ) :
'''List items belonging to an API call .
This method should be in utils . pagerduty .'''
|
items = _query ( profile = profile , subdomain = subdomain , api_key = api_key , action = action )
ret = { }
for item in items [ action ] :
ret [ item [ key ] ] = item
return ret
|
def setup_left_panel ( self ) :
"""Setup the UI for left panel .
Generate all exposure , combobox , and edit button ."""
|
hazard = self . parent . step_kw_subcategory . selected_subcategory ( )
left_panel_heading = QLabel ( tr ( 'Classifications' ) )
left_panel_heading . setFont ( big_font )
self . left_layout . addWidget ( left_panel_heading )
inner_left_layout = QGridLayout ( )
row = 0
for exposure in exposure_all :
special_case = False
if not setting ( 'developer_mode' ) : # Filter out unsupported exposure for the hazard
if exposure in hazard [ 'disabled_exposures' ] : # Remove from the storage if the exposure is disabled
if self . layer_mode == layer_mode_continuous :
if exposure [ 'key' ] in self . thresholds :
self . thresholds . pop ( exposure [ 'key' ] )
else :
if exposure [ 'key' ] in self . value_maps :
self . value_maps . pop ( exposure [ 'key' ] )
continue
# Trick for EQ raster for population # 3853
if exposure == exposure_population and hazard == hazard_earthquake :
if is_raster_layer ( self . parent . layer ) :
if self . layer_mode == layer_mode_continuous :
self . use_default_thresholds = True
special_case = True
# Set classification for EQ Raster for Population
self . thresholds [ exposure_population [ 'key' ] ] = { earthquake_mmi_scale [ 'key' ] : { 'classes' : default_classification_thresholds ( earthquake_mmi_scale ) , 'active' : True } }
# Add label
# Hazard on Exposure Classifications
label = tr ( '{hazard_name} on {exposure_name} Classifications' ) . format ( hazard_name = hazard [ 'name' ] , exposure_name = exposure [ 'name' ] )
exposure_label = QLabel ( label )
# Add combo box
exposure_combo_box = QComboBox ( )
hazard_classifications = hazard . get ( 'classifications' )
exposure_combo_box . addItem ( tr ( 'No classifications' ) )
exposure_combo_box . setItemData ( 0 , None , Qt . UserRole )
current_index = 0
i = 0
# Iterate through all available hazard classifications
for hazard_classification in hazard_classifications : # Skip if the classification is not for the exposure
if 'exposures' in hazard_classification :
if exposure not in hazard_classification [ 'exposures' ] :
continue
exposure_combo_box . addItem ( hazard_classification [ 'name' ] )
exposure_combo_box . setItemData ( i + 1 , hazard_classification , Qt . UserRole )
if self . layer_mode == layer_mode_continuous :
current_hazard_classifications = self . thresholds . get ( exposure [ 'key' ] )
else :
current_hazard_classifications = self . value_maps . get ( exposure [ 'key' ] )
if current_hazard_classifications :
current_hazard_classification = current_hazard_classifications . get ( hazard_classification [ 'key' ] )
if current_hazard_classification :
is_active = current_hazard_classification . get ( 'active' )
if is_active :
current_index = i + 1
i += 1
# Set current classification
exposure_combo_box . setCurrentIndex ( current_index )
# Add edit button
exposure_edit_button = QPushButton ( tr ( 'Edit' ) )
# For special case . Raster EQ on Population .
if special_case :
mmi_index = exposure_combo_box . findText ( earthquake_mmi_scale [ 'name' ] )
exposure_combo_box . setCurrentIndex ( mmi_index )
exposure_combo_box . setEnabled ( False )
exposure_edit_button . setEnabled ( False )
tool_tip_message = tr ( 'InaSAFE use default classification for Raster Earthquake ' 'hazard on population.' )
exposure_label . setToolTip ( tool_tip_message )
exposure_combo_box . setToolTip ( tool_tip_message )
exposure_edit_button . setToolTip ( tool_tip_message )
else :
if current_index == 0 : # Disable if there is no classification chosen .
exposure_edit_button . setEnabled ( False )
exposure_edit_button . clicked . connect ( partial ( self . edit_button_clicked , edit_button = exposure_edit_button , exposure_combo_box = exposure_combo_box , exposure = exposure ) )
exposure_combo_box . currentIndexChanged . connect ( partial ( self . classifications_combo_box_changed , exposure = exposure , exposure_combo_box = exposure_combo_box , edit_button = exposure_edit_button ) )
# Arrange in layout
inner_left_layout . addWidget ( exposure_label , row , 0 )
inner_left_layout . addWidget ( exposure_combo_box , row , 1 )
inner_left_layout . addWidget ( exposure_edit_button , row , 2 )
# Adding to step ' s attribute
self . exposures . append ( exposure )
self . exposure_combo_boxes . append ( exposure_combo_box )
self . exposure_edit_buttons . append ( exposure_edit_button )
self . exposure_labels . append ( label )
if special_case :
self . special_case_index = len ( self . exposures ) - 1
row += 1
self . left_layout . addLayout ( inner_left_layout )
# To push the inner _ left _ layout up
self . left_layout . addStretch ( 1 )
|
def _get_matplot_dict ( self , option , prop , defdict ) :
"""Returns a copy of the settings dictionary for the specified option in
curargs with update values where the value is replaced by the key from
the relevant default dictionary .
: arg option : the key in self . curargs to update .
: arg defdict : the default dictionary whose keys should be used when values match ."""
|
cargs = self . curargs [ option ]
result = cargs . copy ( )
for varname in cargs :
if prop in cargs [ varname ] :
name = cargs [ varname ] [ prop ]
for key , val in list ( defdict . items ( ) ) :
if val == name :
cargs [ varname ] [ prop ] = key
break
return result
|
def setup_storage ( self ) :
"""Save existing FileField storages and patch them with test instance ( s ) .
If storage _ per _ field is False ( default ) this function will create a
single instance here and assign it to self . storage to be used for all
filefields .
If storage _ per _ field is True , an independent storage instance will be
used for each FileField ."""
|
if self . storage_callable is not None and not self . storage_per_field :
self . storage = self . get_storage_from_callable ( field = None )
super ( override_storage , self ) . setup_storage ( )
|
def entries ( self ) :
"""Provides access to entry management methods for the given content type .
API reference : https : / / www . contentful . com / developers / docs / references / content - management - api / # / reference / entries
: return : : class : ` ContentTypeEntriesProxy < contentful _ management . content _ type _ entries _ proxy . ContentTypeEntriesProxy > ` object .
: rtype : contentful . content _ type _ entries _ proxy . ContentTypeEntriesProxy
Usage :
> > > content _ type _ entries _ proxy = content _ type . entries ( )
< ContentTypeEntriesProxy space _ id = " cfexampleapi " environment _ id = " master " content _ type _ id = " cat " >"""
|
return ContentTypeEntriesProxy ( self . _client , self . space . id , self . _environment_id , self . id )
|
def toList ( self ) :
"""Returns date as signed list ."""
|
date = self . date ( )
sign = '+' if date [ 0 ] >= 0 else '-'
date [ 0 ] = abs ( date [ 0 ] )
return list ( sign ) + date
|
def _syncronous_batch_evaluation ( self , x ) :
"""Evaluates the function a x , where x can be a single location or a batch . The evaluation is performed in parallel
according to the number of accessible cores ."""
|
from multiprocessing import Process , Pipe
# - - - parallel evaluation of the function
divided_samples = [ x [ i : : self . n_procs ] for i in range ( self . n_procs ) ]
pipe = [ Pipe ( ) for i in range ( self . n_procs ) ]
proc = [ Process ( target = spawn ( self . _eval_func ) , args = ( c , k ) ) for k , ( p , c ) in zip ( divided_samples , pipe ) ]
[ p . start ( ) for p in proc ]
[ p . join ( ) for p in proc ]
# - - - time of evaluation is set to constant ( = 1 ) . This is one of the hypothesis of synchronous batch methods .
f_evals = np . zeros ( ( x . shape [ 0 ] , 1 ) )
cost_evals = np . ones ( ( x . shape [ 0 ] , 1 ) )
i = 0
for ( p , c ) in pipe :
f_evals [ i : : self . n_procs ] = p . recv ( ) [ 0 ]
# throw away costs
i += 1
return f_evals , cost_evals
|
def make_image ( location , size , fmt ) :
'''Create a blank virtual machine image file of the specified size in
megabytes . The image can be created in any format supported by qemu
CLI Example :
. . code - block : : bash
salt ' * ' qemu _ img . make _ image / tmp / image . qcow 2048 qcow2
salt ' * ' qemu _ img . make _ image / tmp / image . raw 10240 raw'''
|
if not os . path . isabs ( location ) :
return ''
if not os . path . isdir ( os . path . dirname ( location ) ) :
return ''
if not __salt__ [ 'cmd.retcode' ] ( 'qemu-img create -f {0} {1} {2}M' . format ( fmt , location , size ) , python_shell = False ) :
return location
return ''
|
def _spec_trace ( trace , cmap = None , wlen = 0.4 , log = False , trc = 'k' , tralpha = 0.9 , size = ( 10 , 2.5 ) , axes = None , title = None ) :
"""Function to plot a trace over that traces spectrogram .
Uses obspys spectrogram routine .
: type trace : obspy . core . trace . Trace
: param trace : trace to plot
: type cmap : str
: param cmap : [ Matplotlib colormap ] ( http : / / matplotlib . org / examples / color /
colormaps _ reference . html )
: type wlen : float
: param wlen : Window length for fft in seconds
: type log : bool
: param log : Use a log frequency scale
: type trc : str
: param trc : Color for the trace .
: type tralpha : float
: param tralpha : Opacity level for the seismogram , from transparent ( 0.0 ) to opaque ( 1.0 ) .
: type size : tuple
: param size : Plot size , tuple of floats , inches
: type axes : matplotlib axes
: param axes : Axes to plot onto , defaults to self generating .
: type title : str
: param title : Title for the plot ."""
|
import matplotlib . pyplot as plt
if not axes :
fig = plt . figure ( figsize = size )
ax1 = fig . add_subplot ( 111 )
else :
ax1 = axes
trace . spectrogram ( wlen = wlen , log = log , show = False , cmap = cmap , axes = ax1 )
fig = plt . gcf ( )
ax2 = ax1 . twinx ( )
y = trace . data
x = np . linspace ( 0 , len ( y ) / trace . stats . sampling_rate , len ( y ) )
ax2 . plot ( x , y , color = trc , linewidth = 2.0 , alpha = tralpha )
ax2 . set_xlim ( min ( x ) , max ( x ) )
ax2 . set_ylim ( min ( y ) * 2 , max ( y ) * 2 )
if title :
ax1 . set_title ( ' ' . join ( [ trace . stats . station , trace . stats . channel , trace . stats . starttime . datetime . strftime ( '%Y/%m/%d %H:%M:%S' ) ] ) )
if not axes :
fig . set_size_inches ( size )
fig . show ( )
else :
return ax1 , ax2
|
def comments ( self , limit = None ) :
"""GETs newest comments from this subreddit . Calls : meth : ` narwal . Reddit . comments ` .
: param limit : max number of links to return"""
|
return self . _reddit . comments ( self . display_name , limit = limit )
|
def save_items ( self , rows = None , verbose = False ) :
"""Return a dictionary of row data for selected rows :
{1 : { col1 : val1 , col2 : val2 } , . . . }
If a list of row numbers isn ' t provided , get data for all ."""
|
if rows :
rows = rows
else :
rows = list ( range ( self . GetNumberRows ( ) ) )
cols = list ( range ( self . GetNumberCols ( ) ) )
data = { }
for row in rows :
data [ row ] = { }
for col in cols :
col_name = self . GetColLabelValue ( col )
if verbose :
print ( col_name , ":" , self . GetCellValue ( row , col ) )
data [ row ] [ col_name ] = self . GetCellValue ( row , col )
return data
|
def tolist ( self ) -> List [ bool ] :
"""Convert the set to a list of 64 bools ."""
|
result = [ False ] * 64
for square in self :
result [ square ] = True
return result
|
def parse_single_report ( f ) :
"""Parse a gatk varianteval varianteval"""
|
# Fixme : Separate GATKReport parsing and data subsetting . A GATKReport parser now available from the GATK MultiqcModel .
data = dict ( )
in_CompOverlap = False
in_CountVariants = False
in_TiTv = False
for l in f : # Detect section headers
if '#:GATKTable:CompOverlap' in l :
in_CompOverlap = True
elif '#:GATKTable:CountVariants' in l :
in_CountVariants = True
elif '#:GATKTable:TiTvVariantEvaluator' in l :
in_TiTv = True
else : # Parse contents using nested loops
if in_CompOverlap :
headers = l . split ( )
while in_CompOverlap :
l = f . readline ( ) . strip ( "\n" )
d = dict ( )
try :
for i , s in enumerate ( l . split ( ) ) :
d [ headers [ i ] ] = s
if d [ 'Novelty' ] == 'all' :
data [ 'reference' ] = d [ 'CompRod' ]
data [ 'comp_rate' ] = float ( d [ 'compRate' ] )
data [ 'concordant_rate' ] = float ( d [ 'concordantRate' ] )
data [ 'eval_variants' ] = int ( d [ 'nEvalVariants' ] )
data [ 'novel_sites' ] = int ( d [ 'novelSites' ] )
elif d [ 'Novelty' ] == 'known' :
data [ 'known_sites' ] = int ( d [ 'nEvalVariants' ] )
except KeyError :
in_CompOverlap = False
elif in_CountVariants :
headers = l . split ( )
while in_CountVariants :
l = f . readline ( ) . strip ( "\n" )
d = dict ( )
try :
for i , s in enumerate ( l . split ( ) ) :
d [ headers [ i ] ] = s
if d [ 'Novelty' ] == 'all' :
data [ 'snps' ] = int ( d [ 'nSNPs' ] )
data [ 'mnps' ] = int ( d [ 'nMNPs' ] )
data [ 'insertions' ] = int ( d [ 'nInsertions' ] )
data [ 'deletions' ] = int ( d [ 'nDeletions' ] )
data [ 'complex' ] = int ( d [ 'nComplex' ] )
data [ 'symbolic' ] = int ( d [ 'nSymbolic' ] )
data [ 'mixed' ] = int ( d [ 'nMixed' ] )
data [ 'nocalls' ] = int ( d [ 'nNoCalls' ] )
except KeyError :
in_CountVariants = False
elif in_TiTv :
headers = l . split ( )
data [ 'titv_reference' ] = 'unknown'
while in_TiTv :
l = f . readline ( ) . strip ( "\n" )
d = dict ( )
try :
for i , s in enumerate ( l . split ( ) ) :
d [ headers [ i ] ] = s
if d [ 'Novelty' ] == 'known' :
data [ 'titv_reference' ] = d [ 'CompRod' ]
data [ 'known_titv' ] = float ( d [ 'tiTvRatio' ] )
elif d [ 'Novelty' ] == 'novel' :
data [ 'novel_titv' ] = float ( d [ 'tiTvRatio' ] )
except KeyError :
in_TiTv = False
return data
|
def update ( self , friendly_name = None , description = None , query = None ) :
"""Selectively updates View information .
Any parameters that are None ( the default ) are not applied in the update .
Args :
friendly _ name : if not None , the new friendly name .
description : if not None , the new description .
query : if not None , a new query string for the View ."""
|
self . _table . _load_info ( )
if query is not None :
if isinstance ( query , _query . Query ) :
query = query . sql
self . _table . _info [ 'view' ] = { 'query' : query }
self . _table . update ( friendly_name = friendly_name , description = description )
|
def getclientloansurl ( idclient , * args , ** kwargs ) :
"""Request Client loans URL .
How to use it ? By default MambuLoan uses getloansurl as the urlfunc .
Override that behaviour by sending getclientloansurl ( this function )
as the urlfunc to the constructor of MambuLoans ( note the final ' s ' )
and voila ! you get the Loans just for a certain client .
If idclient is set , you ' ll get a response adequate for a
MambuLoans object .
If not set , you ' ll get a Jar Jar Binks object , or something quite
strange and useless as JarJar . A MambuError must likely since I
haven ' t needed it for anything but for loans of one and just
one client .
See mambuloan module and pydoc for further information .
Currently implemented filter parameters :
* accountState
See Mambu official developer documentation for further details , and
info on parameters that may be implemented here in the future ."""
|
getparams = [ ]
if kwargs :
try :
if kwargs [ "fullDetails" ] == True :
getparams . append ( "fullDetails=true" )
else :
getparams . append ( "fullDetails=false" )
except Exception as ex :
pass
try :
getparams . append ( "accountState=%s" % kwargs [ "accountState" ] )
except Exception as ex :
pass
clientidparam = "/" + idclient
url = getmambuurl ( * args , ** kwargs ) + "clients" + clientidparam + "/loans" + ( "" if len ( getparams ) == 0 else "?" + "&" . join ( getparams ) )
return url
|
def nm_to_rgb ( nm ) :
"""Convert a wavelength to corresponding RGB values [ 0.0-1.0 ] .
Parameters
nm : int or float
The wavelength of light .
Returns
List of [ R , G , B ] values between 0 and 1
` original code ` _ _
_ _ http : / / www . physics . sfasu . edu / astro / color / spectra . html"""
|
w = int ( nm )
# color - - - - -
if w >= 380 and w < 440 :
R = - ( w - 440. ) / ( 440. - 350. )
G = 0.0
B = 1.0
elif w >= 440 and w < 490 :
R = 0.0
G = ( w - 440. ) / ( 490. - 440. )
B = 1.0
elif w >= 490 and w < 510 :
R = 0.0
G = 1.0
B = - ( w - 510. ) / ( 510. - 490. )
elif w >= 510 and w < 580 :
R = ( w - 510. ) / ( 580. - 510. )
G = 1.0
B = 0.0
elif w >= 580 and w < 645 :
R = 1.0
G = - ( w - 645. ) / ( 645. - 580. )
B = 0.0
elif w >= 645 and w <= 780 :
R = 1.0
G = 0.0
B = 0.0
else :
R = 0.0
G = 0.0
B = 0.0
# intensity correction - - - - -
if w >= 380 and w < 420 :
SSS = 0.3 + 0.7 * ( w - 350 ) / ( 420 - 350 )
elif w >= 420 and w <= 700 :
SSS = 1.0
elif w > 700 and w <= 780 :
SSS = 0.3 + 0.7 * ( 780 - w ) / ( 780 - 700 )
else :
SSS = 0.0
SSS *= 255
return [ float ( int ( SSS * R ) / 256. ) , float ( int ( SSS * G ) / 256. ) , float ( int ( SSS * B ) / 256. ) ]
|
def on_epoch_end ( self , last_metrics , ** kwargs ) :
"Set the final result in ` last _ metrics ` ."
|
return add_metrics ( last_metrics , self . val / self . count )
|
def _submit_rate ( self , metric_name , val , metric , custom_tags = None , hostname = None ) :
"""Submit a metric as a rate , additional tags provided will be added to
the ones from the label provided via the metrics object .
` custom _ tags ` is an array of ' tag : value ' that will be added to the
metric when sending the rate to Datadog ."""
|
_tags = self . _metric_tags ( metric_name , val , metric , custom_tags , hostname )
self . rate ( '{}.{}' . format ( self . NAMESPACE , metric_name ) , val , _tags , hostname = hostname )
|
def extract_file_from_tar ( bytes_io , expected_file ) :
"""extract a file from a bytes _ io tar . Returns bytes"""
|
with open ( 'temp' , 'wb+' ) as f :
bytes_io . seek ( 0 )
shutil . copyfileobj ( bytes_io , f , length = 131072 )
tar = tarfile . open ( 'temp' , mode = 'r:gz' )
os . remove ( 'temp' )
return tar . extractfile ( expected_file ) . read ( )
|
def _get_representative ( self , obj ) :
"""Finds and returns the root of the set containing ` obj ` ."""
|
if obj not in self . _parents :
self . _parents [ obj ] = obj
self . _weights [ obj ] = 1
self . _prev_next [ obj ] = [ obj , obj ]
self . _min_values [ obj ] = obj
return obj
path = [ obj ]
root = self . _parents [ obj ]
while root != path [ - 1 ] :
path . append ( root )
root = self . _parents [ root ]
# compress the path and return
for ancestor in path :
self . _parents [ ancestor ] = root
return root
|
def display_system ( sys , style = 'vdw' ) :
'''Display the system * sys * with the default viewer .'''
|
v = QtViewer ( )
# v . add _ post _ processing ( FXAAEffect )
v . add_post_processing ( SSAOEffect )
if style == 'vdw' :
sr = v . add_renderer ( AtomRenderer , sys . r_array , sys . type_array , backend = 'impostors' )
if style == 'ball-and-stick' :
sr = v . add_renderer ( BallAndStickRenderer , sys . r_array , sys . type_array , sys . bonds )
if sys . box_vectors is not None :
v . add_renderer ( BoxRenderer , sys . box_vectors )
# We autozoom on the box
a , b , c = sys . box_vectors
box_vertices = np . array ( [ [ 0.0 , 0.0 , 0.0 ] , a , b , c , a + b , a + c , b + c , a + b + c ] )
v . widget . camera . autozoom ( box_vertices )
else :
v . widget . camera . autozoom ( sys . r_array )
v . run ( )
|
def _print_topics ( self , header : str , cmds : List [ str ] , verbose : bool ) -> None :
"""Customized version of print _ topics that can switch between verbose or traditional output"""
|
import io
if cmds :
if not verbose :
self . print_topics ( header , cmds , 15 , 80 )
else :
self . stdout . write ( '{}\n' . format ( str ( header ) ) )
widest = 0
# measure the commands
for command in cmds :
width = utils . ansi_safe_wcswidth ( command )
if width > widest :
widest = width
# add a 4 - space pad
widest += 4
if widest < 20 :
widest = 20
if self . ruler :
self . stdout . write ( '{:{ruler}<{width}}\n' . format ( '' , ruler = self . ruler , width = 80 ) )
# Try to get the documentation string for each command
topics = self . get_help_topics ( )
for command in cmds :
cmd_func = self . cmd_func ( command )
# Non - argparse commands can have help _ functions for their documentation
if not hasattr ( cmd_func , 'argparser' ) and command in topics :
help_func = getattr ( self , HELP_FUNC_PREFIX + command )
result = io . StringIO ( )
# try to redirect system stdout
with redirect_stdout ( result ) : # save our internal stdout
stdout_orig = self . stdout
try : # redirect our internal stdout
self . stdout = result
help_func ( )
finally : # restore internal stdout
self . stdout = stdout_orig
doc = result . getvalue ( )
else :
doc = cmd_func . __doc__
# Attempt to locate the first documentation block
if not doc :
doc_block = [ '' ]
else :
doc_block = [ ]
found_first = False
for doc_line in doc . splitlines ( ) :
stripped_line = doc_line . strip ( )
# Don ' t include : param type lines
if stripped_line . startswith ( ':' ) :
if found_first :
break
elif stripped_line :
doc_block . append ( stripped_line )
found_first = True
elif found_first :
break
for doc_line in doc_block :
self . stdout . write ( '{: <{col_width}}{doc}\n' . format ( command , col_width = widest , doc = doc_line ) )
command = ''
self . stdout . write ( "\n" )
|
def plot2d ( points , cells , mesh_color = "k" , show_axes = False ) :
"""Plot a 2D mesh using matplotlib ."""
|
import matplotlib . pyplot as plt
from matplotlib . collections import LineCollection
fig = plt . figure ( )
ax = fig . gca ( )
plt . axis ( "equal" )
if not show_axes :
ax . set_axis_off ( )
xmin = numpy . amin ( points [ : , 0 ] )
xmax = numpy . amax ( points [ : , 0 ] )
ymin = numpy . amin ( points [ : , 1 ] )
ymax = numpy . amax ( points [ : , 1 ] )
width = xmax - xmin
xmin -= 0.1 * width
xmax += 0.1 * width
height = ymax - ymin
ymin -= 0.1 * height
ymax += 0.1 * height
ax . set_xlim ( xmin , xmax )
ax . set_ylim ( ymin , ymax )
edge_nodes , _ = create_edges ( cells )
# Get edges , cut off z - component .
e = points [ edge_nodes ] [ : , : , : 2 ]
line_segments = LineCollection ( e , color = mesh_color )
ax . add_collection ( line_segments )
return fig
|
def drape ( raster , feature ) :
"""Convert a 2D feature to a 3D feature by sampling a raster
Parameters :
raster ( rasterio ) : raster to provide the z coordinate
feature ( dict ) : fiona feature record to convert
Returns :
result ( Point or Linestring ) : shapely Point or LineString of xyz coordinate triples"""
|
coords = feature [ 'geometry' ] [ 'coordinates' ]
geom_type = feature [ 'geometry' ] [ 'type' ]
if geom_type == 'Point' :
xyz = sample ( raster , [ coords ] )
result = Point ( xyz [ 0 ] )
elif geom_type == 'LineString' :
xyz = sample ( raster , coords )
points = [ Point ( x , y , z ) for x , y , z in xyz ]
result = LineString ( points )
else :
logging . error ( 'drape not implemented for {}' . format ( geom_type ) )
return result
|
def get_asset_contents_by_genus_type_for_asset ( self , asset_content_genus_type , asset_id ) :
"""Gets an ` ` AssetContentList ` ` from the given GenusType and Asset Id .
In plenary mode , the returned list contains all known asset contents or
an error results . Otherwise , the returned list may contain only
those asset contents that are accessible through this session .
: param asset _ content _ genus _ type : an an asset content genus type
: type asset _ id : ` ` osid . type . Type ` `
: param asset _ id : an asset ` ` Id ` `
: type asset _ id : ` ` osid . id . Id ` `
: return : the returned ` ` AssetContent list ` `
: rtype : ` ` osid . repository . AssetContentList ` `
: raise : ` ` NullArgument ` ` - - ` ` asset _ content _ genus _ type ` ` or ` ` asset _ id ` ` is ` ` null ` `
: raise : ` ` OperationFailed ` ` - - unable to complete request
: raise : ` ` PermissionDenied ` ` - - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
return AssetContentList ( self . _provider_session . get_asset_contents_by_genus_type_for_asset ( asset_content_genus_type , asset_id ) , self . _config_map )
|
def update ( self , password = values . unset ) :
"""Update the CredentialInstance
: param unicode password : The password will not be returned in the response
: returns : Updated CredentialInstance
: rtype : twilio . rest . api . v2010 . account . sip . credential _ list . credential . CredentialInstance"""
|
return self . _proxy . update ( password = password , )
|
def lookup ( self , dotted_path , lineno = None ) :
"""Given a dotted path in the format ` ` class _ name ` ` or
` ` class _ name : method _ name ` ` this performs an alias lookup . For
methods the line number must be supplied or the result is
unreliable ."""
|
rv = None
try :
rv = rustcall ( _lib . lsm_proguard_mapping_convert_dotted_path , self . _get_ptr ( ) , dotted_path . encode ( 'utf-8' ) , lineno or 0 )
return _ffi . string ( rv ) . decode ( 'utf-8' , 'replace' )
finally :
if rv is not None :
_lib . lsm_buffer_free ( rv )
|
def encrypt ( byte_data , secret_key = '' ) :
'''uses cryptography module to encrypt byte data
cipher : AES ( 128 bit block _ size )
hash : sha512
key size : 256 bit ( first 32 bytes of secret key hash )
vector size : 128 bit ( next 16 bytes of secret key hash )
padding : PKCS7
cipher mode : CBC
backend : openssl 1.0.2a
NOTE : if secret _ key is left blank ,
method generates a 32 byte hexadecimal string
: param byte _ data : bytes with data to encrypt
: param secret _ key : [ optional ] string used to encrypt data
: return : encrypted byte data , secret key hex string'''
|
# validate input
if not isinstance ( byte_data , bytes ) :
raise TypeError ( '\nbyte data input must be a byte datatype.' )
# validate secret key or create secret key
if secret_key :
if not isinstance ( secret_key , str ) :
raise TypeError ( '\nsecret key input must be a utf-8 encoded string.' )
else :
from os import urandom
from binascii import hexlify
secret_key = hexlify ( urandom ( 32 ) ) . decode ( )
# retrieve cipher key and initialization vector from sha256 hash of secret key
key_bytes = hashlib . sha512 ( secret_key . encode ( 'utf-8' ) ) . digest ( )
cipher_key = key_bytes [ 0 : 32 ]
cipher_vector = key_bytes [ 32 : 48 ]
# construct encryptor
cipher_kwargs = { 'algorithm' : algorithms . AES ( cipher_key ) , 'mode' : modes . CBC ( cipher_vector ) , 'backend' : openssl . backend }
cipher = Cipher ( ** cipher_kwargs )
encryptor = cipher . encryptor ( )
# encrypt and add padding
padder = padding . PKCS7 ( 128 ) . padder ( )
padded_data = padder . update ( byte_data )
padded_data += padder . finalize ( )
encrypted_data = encryptor . update ( padded_data ) + encryptor . finalize ( )
return encrypted_data , secret_key
|
def command ( self , dbname , spec , slave_ok = False , read_preference = ReadPreference . PRIMARY , codec_options = DEFAULT_CODEC_OPTIONS , check = True , allowable_errors = None , check_keys = False , read_concern = DEFAULT_READ_CONCERN , write_concern = None , parse_write_concern_error = False , collation = None ) :
"""Execute a command or raise ConnectionFailure or OperationFailure .
: Parameters :
- ` dbname ` : name of the database on which to run the command
- ` spec ` : a command document as a dict , SON , or mapping object
- ` slave _ ok ` : whether to set the SlaveOkay wire protocol bit
- ` read _ preference ` : a read preference
- ` codec _ options ` : a CodecOptions instance
- ` check ` : raise OperationFailure if there are errors
- ` allowable _ errors ` : errors to ignore if ` check ` is True
- ` check _ keys ` : if True , check ` spec ` for invalid keys
- ` read _ concern ` : The read concern for this command .
- ` write _ concern ` : The write concern for this command .
- ` parse _ write _ concern _ error ` : Whether to parse the
` ` writeConcernError ` ` field in the command response .
- ` collation ` : The collation for this command ."""
|
if self . max_wire_version < 4 and not read_concern . ok_for_legacy :
raise ConfigurationError ( 'read concern level of %s is not valid ' 'with a max wire version of %d.' % ( read_concern . level , self . max_wire_version ) )
if not ( write_concern is None or write_concern . acknowledged or collation is None ) :
raise ConfigurationError ( 'Collation is unsupported for unacknowledged writes.' )
if self . max_wire_version >= 5 and write_concern :
spec [ 'writeConcern' ] = write_concern . document
elif self . max_wire_version < 5 and collation is not None :
raise ConfigurationError ( 'Must be connected to MongoDB 3.4+ to use a collation.' )
try :
return command ( self . sock , dbname , spec , slave_ok , self . is_mongos , read_preference , codec_options , check , allowable_errors , self . address , check_keys , self . listeners , self . max_bson_size , read_concern , parse_write_concern_error = parse_write_concern_error , collation = collation )
except OperationFailure :
raise
# Catch socket . error , KeyboardInterrupt , etc . and close ourselves .
except BaseException as error :
self . _raise_connection_failure ( error )
|
def next_week_day ( base_date , weekday ) :
"""Finds next weekday"""
|
day_of_week = base_date . weekday ( )
end_of_this_week = base_date + timedelta ( days = 6 - day_of_week )
day = end_of_this_week + timedelta ( days = 1 )
while day . weekday ( ) != weekday :
day = day + timedelta ( days = 1 )
return day
|
def get_updates ( self , offset = None , limit = 100 , poll_timeout = 0 , allowed_updates = None , request_timeout = None , delta = timedelta ( milliseconds = 100 ) , error_as_empty = False ) :
"""Use this method to receive incoming updates using long polling . An Array of Update objects is returned .
You can choose to set ` error _ as _ empty ` to ` True ` or ` False ` .
If ` error _ as _ empty ` is set to ` True ` , it will log that exception as warning , and fake an empty result ,
intended for use in for loops . In case of such error ( and only in such case ) it contains an " exception " field .
Ìt will look like this : ` { " result " : [ ] , " exception " : e } `
This is useful if you want to use a for loop , but ignore Network related burps .
If ` error _ as _ empty ` is set to ` False ` however , all ` requests . RequestException ` exceptions are normally raised .
: keyword offset : ( Optional ) Identifier of the first update to be returned .
Must be greater by one than the highest among the identifiers of previously received updates .
By default , updates starting with the earliest unconfirmed update are returned .
An update is considered confirmed as soon as : func : ` get _ updates ` is called with
an offset higher than its ` update _ id ` .
: type offset : int
: param limit : Limits the number of updates to be retrieved . Values between 1—100 are accepted . Defaults to 100
: type limit : int
: param poll _ timeout : Timeout in seconds for long polling , e . g . how long we want to wait maximum .
Defaults to 0 , i . e . usual short polling .
: type poll _ timeout : int
: param allowed _ updates : List the types of updates you want your bot to receive .
For example , specify [ “ message ” , “ edited _ channel _ post ” , “ callback _ query ” ] to only
receive updates of these types . See Update for a complete list of available update
types . Specify an empty list to receive all updates regardless of type ( default ) .
If not specified , the previous setting will be used . Please note that this parameter
doesn ' t affect updates created before the call to the get _ updates ,
so unwanted updates may be received for a short period of time .
: type allowed _ updates : list of str
: param request _ timeout : Timeout of the request . Not the long polling server side timeout .
If not specified , it is set to ` poll _ timeout ` + 2.
: type request _ timeout : int
: param delta : Wait minimal ' delta ' seconds , between requests . Useful in a loop .
: type delta : datetime .
: param error _ as _ empty : If errors which subclasses ` requests . RequestException ` will be logged but not raised .
Instead the returned DictObject will contain an " exception " field containing the exception occured ,
the " result " field will be an empty list ` [ ] ` . Defaults to ` False ` .
: type error _ as _ empty : bool
Returns :
: return : An Array of Update objects is returned ,
or an empty array if there was an requests . RequestException and error _ as _ empty is set to True .
: rtype : list of pytgbot . api _ types . receivable . updates . Update"""
|
from datetime import datetime
assert ( offset is None or isinstance ( offset , int ) )
assert ( limit is None or isinstance ( limit , int ) )
assert ( poll_timeout is None or isinstance ( poll_timeout , int ) )
assert ( allowed_updates is None or isinstance ( allowed_updates , list ) )
if poll_timeout and not request_timeout is None :
request_timeout = poll_timeout + 2
# end if
if delta . total_seconds ( ) > poll_timeout :
now = datetime . now ( )
if self . _last_update - now < delta :
wait = ( ( now - self . _last_update ) - delta ) . total_seconds ( )
# can be 0.2
wait = 0 if wait < 0 else wait
if wait != 0 :
logger . debug ( "Sleeping {i} seconds." . format ( i = wait ) )
# end if
sleep ( wait )
# end if
# end if
self . _last_update = datetime . now ( )
try :
result = self . do ( "getUpdates" , offset = offset , limit = limit , timeout = poll_timeout , allowed_updates = allowed_updates , use_long_polling = poll_timeout != 0 , request_timeout = request_timeout )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . updates import Update
try :
return Update . from_array_list ( result , list_level = 1 )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type Update" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result
except ( requests . RequestException , TgApiException ) as e :
if error_as_empty :
logger . warn ( "Network related error happened in get_updates(), but will be ignored: " + str ( e ) , exc_info = True )
self . _last_update = datetime . now ( )
return DictObject ( result = [ ] , exception = e )
else :
raise
|
def wait ( self , auth , resource , options , defer = False ) :
"""This is a HTTP Long Polling API which allows a user to wait on specific resources to be
updated .
Args :
auth : < cik > for authentication
resource : < ResourceID > to specify what resource to wait on .
options : Options for the wait including a timeout ( in ms ) , ( max 5min ) and start time
( null acts as when request is recieved )"""
|
# let the server control the timeout
return self . _call ( 'wait' , auth , [ resource , options ] , defer , notimeout = True )
|
def serve_coll_page ( self , environ , coll = '$root' ) :
"""Render and serve a collections search page ( search . html ) .
: param dict environ : The WSGI environment dictionary for the request
: param str coll : The name of the collection to serve the collections search page for
: return : The WbResponse containing the collections search page
: rtype : WbResponse"""
|
if not self . is_valid_coll ( coll ) :
self . raise_not_found ( environ , 'No handler for "/{0}"' . format ( coll ) )
self . setup_paths ( environ , coll )
metadata = self . get_metadata ( coll )
view = BaseInsertView ( self . rewriterapp . jinja_env , 'search.html' )
wb_prefix = environ . get ( 'SCRIPT_NAME' )
if wb_prefix :
wb_prefix += '/'
content = view . render_to_string ( environ , wb_prefix = wb_prefix , metadata = metadata , coll = coll )
return WbResponse . text_response ( content , content_type = 'text/html; charset="utf-8"' )
|
def sync_one ( self , aws_syncr , amazon , route ) :
"""Make sure this role exists and has only what policies we want it to have"""
|
route_info = amazon . route53 . route_info ( route . name , route . zone )
target = route . record_target
if callable ( target ) :
target = target ( amazon )
if not route_info :
amazon . route53 . create_route ( route . name , route . zone , route . record_type , target )
else :
amazon . route53 . modify_route ( route_info , route . name , route . zone , route . record_type , target )
|
def feed ( self , token , test_newline = True ) :
"""Consume a token and calculate the new line & column .
As an optional optimization , set test _ newline = False is token doesn ' t contain a newline ."""
|
if test_newline :
newlines = token . count ( self . newline_char )
if newlines :
self . line += newlines
self . line_start_pos = self . char_pos + token . rindex ( self . newline_char ) + 1
self . char_pos += len ( token )
self . column = self . char_pos - self . line_start_pos + 1
|
def read_asc_grid ( filename , footer = 0 ) :
"""Reads ASCII grid file ( * . asc ) .
Parameters
filename : str
Name of * . asc file .
footer : int , optional
Number of lines at bottom of * . asc file to skip .
Returns
grid _ array : numpy array , shape ( M , N )
( M , N ) array of grid values , where M is number of Y - coordinates and
N is number of X - coordinates . The array entry corresponding to
the lower - left coordinates is at index [ M , 0 ] , so that
the array is oriented as it would be in X - Y space .
x : numpy array , shape ( N , )
1D array of N X - coordinates .
y : numpy array , shape ( M , )
1D array of M Y - coordinates .
CELLSIZE : tuple or float
Either a two - tuple of ( x - cell size , y - cell size ) ,
or a float that specifies the uniform cell size .
NODATA : float
Value that specifies which entries are not actual data ."""
|
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with io . open ( filename , 'r' ) as f :
while True :
string , value = f . readline ( ) . split ( )
header_lines += 1
if string . lower ( ) == 'ncols' :
ncols = int ( value )
elif string . lower ( ) == 'nrows' :
nrows = int ( value )
elif string . lower ( ) == 'xllcorner' :
xllcorner = float ( value )
elif string . lower ( ) == 'xllcenter' :
xllcenter = float ( value )
elif string . lower ( ) == 'yllcorner' :
yllcorner = float ( value )
elif string . lower ( ) == 'yllcenter' :
yllcenter = float ( value )
elif string . lower ( ) == 'cellsize' :
cellsize = float ( value )
elif string . lower ( ) == 'cell_size' :
cellsize = float ( value )
elif string . lower ( ) == 'dx' :
dx = float ( value )
elif string . lower ( ) == 'dy' :
dy = float ( value )
elif string . lower ( ) == 'nodata_value' :
no_data = float ( value )
elif string . lower ( ) == 'nodatavalue' :
no_data = float ( value )
else :
raise IOError ( "could not read *.asc file. Error in header." )
if ( ncols is not None ) and ( nrows is not None ) and ( ( ( xllcorner is not None ) and ( yllcorner is not None ) ) or ( ( xllcenter is not None ) and ( yllcenter is not None ) ) ) and ( ( cellsize is not None ) or ( ( dx is not None ) and ( dy is not None ) ) ) and ( no_data is not None ) :
break
raw_grid_array = np . genfromtxt ( filename , skip_header = header_lines , skip_footer = footer )
grid_array = np . flipud ( raw_grid_array )
if nrows != grid_array . shape [ 0 ] or ncols != grid_array . shape [ 1 ] :
raise IOError ( "Error reading *.asc file. Encountered problem " "with header: NCOLS and/or NROWS does not match " "number of columns/rows in data file body." )
if xllcorner is not None and yllcorner is not None :
if dx is not None and dy is not None :
xllcenter = xllcorner + dx / 2.0
yllcenter = yllcorner + dy / 2.0
else :
xllcenter = xllcorner + cellsize / 2.0
yllcenter = yllcorner + cellsize / 2.0
if dx is not None and dy is not None :
x = np . arange ( xllcenter , xllcenter + ncols * dx , dx )
y = np . arange ( yllcenter , yllcenter + nrows * dy , dy )
else :
x = np . arange ( xllcenter , xllcenter + ncols * cellsize , cellsize )
y = np . arange ( yllcenter , yllcenter + nrows * cellsize , cellsize )
# Sometimes x and y and can be an entry too long due to imprecision
# in calculating the upper cutoff for np . arange ( ) ; this bit takes care of
# that potential problem .
if x . size == ncols + 1 :
x = x [ : - 1 ]
if y . size == nrows + 1 :
y = y [ : - 1 ]
if cellsize is None :
cellsize = ( dx , dy )
return grid_array , x , y , cellsize , no_data
|
def Q_weir_rectangular_full_SIA ( h1 , h2 , b ) :
r'''Calculates the flow rate across a full - channel rectangular weir from
the height of the liquid above the crest of the weir , the liquid depth
beneath it , and the width of the channel . Model from [ 1 ] _ as reproduced in
[2 ] _ .
Flow rate is given by :
. . math : :
Q = \ frac { 2 } { 3 } \ sqrt { 2 } \ left ( 0.615 + \ frac { 0.000615 } { h _ 1 + 0.0016 } \ right )
b \ sqrt { g } h _ 1 + 0.5 \ left ( \ frac { h _ 1 } { h _ 1 + h _ 2 } \ right ) ^ 2b \ sqrt { g } h _ 1 ^ { 1.5}
Parameters
h1 : float
Height of the fluid above the crest of the weir [ m ]
h2 : float
Height of the fluid below the crest of the weir [ m ]
b : float
Width of the channel section [ m ]
Returns
Q : float
Volumetric flow rate across the weir [ m ^ 3 / s ]
Notes
The following limits apply to the use of this equation :
0.025 < h < 0.8 m
b > 0.3 m
h2 > 0.3 m
h1 / h2 < 1
Examples
Example compares terribly with the Ackers expression - probable error
in [ 2 ] _ . DO NOT USE .
> > > Q _ weir _ rectangular _ full _ SIA ( h1 = 0.3 , h2 = 0.4 , b = 2)
1.1875825055400384
References
. . [ 1 ] Normen für Wassermessungen : bei Durchführung von Abnahmeversuchen an
Wasserkraftmaschinen . SIA , 1924.
. . [ 2 ] Blevins , Robert D . Applied Fluid Dynamics Handbook . New York , N . Y . :
Van Nostrand Reinhold Co . , 1984.'''
|
Q = 2 / 3. * 2 ** 0.5 * ( 0.615 + 0.000615 / ( h1 + 0.0016 ) ) * b * g ** 0.5 * h1 + 0.5 * ( h1 / ( h1 + h2 ) ) ** 2 * b * g ** 0.5 * h1 ** 1.5
return Q
|
def load_tasks_from_file ( self , file_path ) :
"""Imports specified python module and returns subclasses of BaseTask from it
: param file _ path : a fully qualified file path for a python module to import CustomTasks from
: type file _ path : ` str `
: return : a dict of CustomTasks , where key is CustomTask . name , and value is a CustomClass task itself
: rtype : ` dict `"""
|
file_name , module_path , objects = Loader . import_custom_python_file ( file_path )
result = { }
for entry in objects :
try :
if issubclass ( entry , BaseTask ) :
if entry . __name__ != BaseTask . __name__ and entry . name == BaseTask . name :
raise GOSTaskException ( "Class {class_name} form file {file_name} does not have a unique `name` class field. " "All custom tasks must have a unique `name` class field for them, tat is used for future reference" "" . format ( class_name = entry . name , file_name = os . path . join ( module_path , file_name ) ) )
result [ entry . name ] = entry
except TypeError :
continue
return result
|
def get_ugali_dir ( ) :
"""Get the path to the ugali data directory from the environment"""
|
dirname = os . getenv ( 'UGALIDIR' )
# Get the HOME directory
if not dirname :
dirname = os . path . join ( os . getenv ( 'HOME' ) , '.ugali' )
if not os . path . exists ( dirname ) :
from ugali . utils . logger import logger
msg = "Creating UGALIDIR:\n%s" % dirname
logger . warning ( msg )
return mkdir ( dirname )
|
def forall ( self , method ) :
"""TODO : I AM NOT HAPPY THAT THIS WILL NOT WORK WELL WITH WINDOW FUNCTIONS
THE parts GIVE NO INDICATION OF NEXT ITEM OR PREVIOUS ITEM LIKE rownum
DOES . MAYBE ALGEBRAIC EDGES SHOULD BE LOOPED DIFFERENTLY ? ON THE
OTHER HAND , MAYBE WINDOW FUNCTIONS ARE RESPONSIBLE FOR THIS COMPLICATION
MAR 2015 : THE ISSUE IS parts , IT SHOULD BE coord INSTEAD
IT IS EXPECTED THE method ACCEPTS ( value , coord , cube ) , WHERE
value - VALUE FOUND AT ELEMENT
parts - THE ONE PART CORRESPONDING TO EACH EDGE
cube - THE WHOLE CUBE , FOR USE IN WINDOW FUNCTIONS"""
|
if not self . is_value :
Log . error ( "Not dealing with this case yet" )
matrix = self . data . values ( ) [ 0 ]
parts = [ e . domain . partitions for e in self . edges ]
for c in matrix . _all_combos ( ) :
method ( matrix [ c ] , [ parts [ i ] [ cc ] for i , cc in enumerate ( c ) ] , self )
|
def apply_panes_settings ( self ) :
"""Update dockwidgets features settings"""
|
for plugin in ( self . widgetlist + self . thirdparty_plugins ) :
features = plugin . FEATURES
if CONF . get ( 'main' , 'vertical_dockwidget_titlebars' ) :
features = features | QDockWidget . DockWidgetVerticalTitleBar
plugin . dockwidget . setFeatures ( features )
plugin . update_margins ( )
|
def set ( self , request_url , response_json ) :
"""Checks if the maximum size of the cache has been reached and in case
discards the least recently used item from ' usage _ recency ' and ' table ' ;
then adds the response _ json to be cached to the ' table ' dict using as
a lookup key the request _ url of the request that generated the value ;
finally adds it at the front of ' usage _ recency '
: param request _ url : the request URL that uniquely identifies the
request whose response is to be cached
: type request _ url : str
: param response _ json : the response JSON to be cached
: type response _ json : str"""
|
if self . size ( ) == self . _max_size :
popped = self . _usage_recency . pop ( )
del self . _table [ popped ]
current_time = timeutils . now ( 'unix' )
if request_url not in self . _table :
self . _table [ request_url ] = { 'data' : response_json , 'insertion_time' : current_time }
self . _usage_recency . add ( request_url )
else :
self . _table [ request_url ] [ 'insertion_time' ] = current_time
self . _promote ( request_url )
|
def union ( graphs , use_tqdm : bool = False ) :
"""Take the union over a collection of graphs into a new graph .
Assumes iterator is longer than 2 , but not infinite .
: param iter [ BELGraph ] graphs : An iterator over BEL graphs . Can ' t be infinite .
: param use _ tqdm : Should a progress bar be displayed ?
: return : A merged graph
: rtype : BELGraph
Example usage :
> > > import pybel
> > > g = pybel . from _ path ( ' . . . ' )
> > > h = pybel . from _ path ( ' . . . ' )
> > > k = pybel . from _ path ( ' . . . ' )
> > > merged = union ( [ g , h , k ] )"""
|
it = iter ( graphs )
if use_tqdm :
it = tqdm ( it , desc = 'taking union' )
try :
target = next ( it )
except StopIteration as e :
raise ValueError ( 'no graphs given' ) from e
try :
graph = next ( it )
except StopIteration :
return target
else :
target = target . copy ( )
left_full_join ( target , graph )
for graph in it :
left_full_join ( target , graph )
return target
|
def gen_rand_str ( * size , use = None , keyspace = None ) :
"""Generates a random string using random module specified in @ use within
the @ keyspace
@ * size : # int size range for the length of the string
@ use : the random module to use
@ keyspace : # str chars allowed in the random string
from vital . debug import gen _ rand _ str
gen _ rand _ str ( )
# - > ' PRCpAq '
gen _ rand _ str ( 1 , 2)
gen _ rand _ str ( 12 , keyspace = " abcdefg " )
# - > ' gaaacffbedf '"""
|
keyspace = keyspace or ( string . ascii_letters + string . digits )
keyspace = [ char for char in keyspace ]
use = use or _random
use . seed ( )
if size :
size = size if len ( size ) == 2 else ( size [ 0 ] , size [ 0 ] )
else :
size = ( 10 , 12 )
return '' . join ( use . choice ( keyspace ) for _ in range ( use . randint ( * size ) ) )
|
def _parse_title ( dom , details ) :
"""Parse title / name of the book .
Args :
dom ( obj ) : HTMLElement containing whole HTML page .
details ( obj ) : HTMLElement containing slice of the page with details .
Returns :
str : Book ' s title .
Raises :
AssertionError : If title not found ."""
|
title = details . find ( "h1" )
# if the header is missing , try to parse title from the < title > tag
if not title :
title = dom . find ( "title" )
assert title , "Can't find <title> tag!"
return title [ 0 ] . getContent ( ) . split ( "|" ) [ 0 ] . strip ( )
return title [ 0 ] . getContent ( ) . strip ( )
|
def insert ( self , dct , toa = None , comment = "" ) :
"""Create a document
: param dict dct :
: param toa toa : Optional time of action , triggers this to be handled as a future insert action for a new document
: param str comment : A comment
: rtype str :
: returns string bson id :"""
|
if self . schema :
jsonschema . validate ( dct , self . schema )
bson_obj = yield self . collection . insert ( dct )
raise Return ( bson_obj . __str__ ( ) )
|
def expand_by_device ( original_parallelism , device_parallelism , data ) :
"""Opposite of reduce _ by _ device ( ) .
Args :
original _ parallelism : a expert _ utils . Parallelism object .
device _ parallelism : a expert _ utils . Parallelism object .
data : a list of tensors with length device _ parallelism . n
Returns :
a list of Tensors with length original _ parallelism . n"""
|
device_to_datum = { device_parallelism . devices [ i ] : data [ i ] for i in range ( device_parallelism . n ) }
return [ device_to_datum [ d ] for d in original_parallelism . devices ]
|
def _model_predict_is ( self , h , recalculate = False , fit_once = True ) :
"""Outputs ensemble model predictions for the end - of - period data
Parameters
h : int
How many steps at the end of the series to run the ensemble on
recalculate : boolean
Whether to recalculate the predictions or not
fit _ once : boolean
Whether to fit the model once at the beginning , or with every iteration
Returns
- pd . DataFrame of the model predictions , index of dates"""
|
if len ( self . model_predictions_is ) == 0 or h != self . h or recalculate is True :
for no , model in enumerate ( self . model_list ) :
if no == 0 :
result = model . predict_is ( h , fit_once = fit_once )
result . columns = [ model . model_name ]
else :
new_frame = model . predict_is ( h , fit_once = fit_once )
new_frame . columns = [ model . model_name ]
result = pd . concat ( [ result , new_frame ] , axis = 1 )
self . model_predictions_is = result
self . h = h
return result
else :
return self . model_predictions_is
|
def _get_rho ( self , v ) :
"""convert unit - cell volume in A ^ 3 to density in g / cm ^ 3
: param v : unit cell volume in A ^ 3
: return : density in g / cm ^ 3
: note : internal function"""
|
v_mol = vol_uc2mol ( v , self . z )
# in m ^ 3
rho = self . mass / v_mol * 1.e-6
# in g / cm ^ 3
return rho
|
def get_sampling_strategy ( self , sensor_name ) :
"""Get the current sampling strategy for the named sensor
Parameters
sensor _ name : str
Name of the sensor ( normal or escaped form )
Returns
strategy : tuple of str
contains ( < strat _ name > , [ < strat _ parm1 > , . . . ] ) where the strategy names and
parameters are as defined by the KATCP spec"""
|
cache_key = self . _get_strategy_cache_key ( sensor_name )
cached = self . _strategy_cache . get ( cache_key )
if not cached :
return resource . normalize_strategy_parameters ( 'none' )
else :
return cached
|
def _replace_nans ( self , data ) : # return data
"""Checks floating point data columns for nans , and replaces these with
the generic Stata for missing value ( . )"""
|
for c in data :
dtype = data [ c ] . dtype
if dtype in ( np . float32 , np . float64 ) :
if dtype == np . float32 :
replacement = self . MISSING_VALUES [ 'f' ]
else :
replacement = self . MISSING_VALUES [ 'd' ]
data [ c ] = data [ c ] . fillna ( replacement )
return data
|
def _updateNonDefaultsForInspector ( self , inspectorRegItem , inspector ) :
"""Store the ( non - default ) config values for the current inspector in a local dictionary .
This dictionary is later used to store value for persistence .
This function must be called after the inspector was drawn because that may update
some derived config values ( e . g . ranges )"""
|
if inspectorRegItem and inspector :
key = inspectorRegItem . identifier
logger . debug ( "_updateNonDefaultsForInspector: {} {}" . format ( key , type ( inspector ) ) )
self . _inspectorsNonDefaults [ key ] = inspector . config . getNonDefaultsDict ( )
else :
logger . debug ( "_updateNonDefaultsForInspector: no inspector" )
|
def _get_sqs_conn ( profile , region = None , key = None , keyid = None ) :
'''Get a boto connection to SQS .'''
|
if profile :
if isinstance ( profile , six . string_types ) :
_profile = __opts__ [ profile ]
elif isinstance ( profile , dict ) :
_profile = profile
key = _profile . get ( 'key' , None )
keyid = _profile . get ( 'keyid' , None )
region = _profile . get ( 'region' , None )
if not region :
region = __opts__ . get ( 'sqs.region' , 'us-east-1' )
if not key :
key = __opts__ . get ( 'sqs.key' , None )
if not keyid :
keyid = __opts__ . get ( 'sqs.keyid' , None )
try :
conn = boto . sqs . connect_to_region ( region , aws_access_key_id = keyid , aws_secret_access_key = key )
except boto . exception . NoAuthHandlerFound :
log . error ( 'No authentication credentials found when attempting to' ' make sqs_event engine connection to AWS.' )
return None
return conn
|
def add_input_distortions ( flip_left_right , random_crop , random_scale , random_brightness , module_spec ) :
"""Creates the operations to apply the specified distortions .
During training it can help to improve the results if we run the images
through simple distortions like crops , scales , and flips . These reflect the
kind of variations we expect in the real world , and so can help train the
model to cope with natural data more effectively . Here we take the supplied
parameters and construct a network of operations to apply them to an image .
Cropping
Cropping is done by placing a bounding box at a random position in the full
image . The cropping parameter controls the size of that box relative to the
input image . If it ' s zero , then the box is the same size as the input and no
cropping is performed . If the value is 50 % , then the crop box will be half the
width and height of the input . In a diagram it looks like this :
< width >
| width - crop % |
Scaling
Scaling is a lot like cropping , except that the bounding box is always
centered and its size varies randomly within the given range . For example if
the scale percentage is zero , then the bounding box is the same size as the
input and no scaling is applied . If it ' s 50 % , then the bounding box will be in
a random range between half the width and height and full size .
Args :
flip _ left _ right : Boolean whether to randomly mirror images horizontally .
random _ crop : Integer percentage setting the total margin used around the
crop box .
random _ scale : Integer percentage of how much to vary the scale by .
random _ brightness : Integer range to randomly multiply the pixel values by .
graph .
module _ spec : The hub . ModuleSpec for the image module being used .
Returns :
The jpeg input layer and the distorted result tensor ."""
|
input_height , input_width = hub . get_expected_image_size ( module_spec )
input_depth = hub . get_num_image_channels ( module_spec )
jpeg_data = tf . placeholder ( tf . string , name = 'DistortJPGInput' )
decoded_image = tf . image . decode_jpeg ( jpeg_data , channels = input_depth )
# Convert from full range of uint8 to range [ 0,1 ] of float32.
decoded_image_as_float = tf . image . convert_image_dtype ( decoded_image , tf . float32 )
decoded_image_4d = tf . expand_dims ( decoded_image_as_float , 0 )
margin_scale = 1.0 + ( random_crop / 100.0 )
resize_scale = 1.0 + ( random_scale / 100.0 )
margin_scale_value = tf . constant ( margin_scale )
resize_scale_value = tf . random_uniform ( shape = [ ] , minval = 1.0 , maxval = resize_scale )
scale_value = tf . multiply ( margin_scale_value , resize_scale_value )
precrop_width = tf . multiply ( scale_value , input_width )
precrop_height = tf . multiply ( scale_value , input_height )
precrop_shape = tf . stack ( [ precrop_height , precrop_width ] )
precrop_shape_as_int = tf . cast ( precrop_shape , dtype = tf . int32 )
precropped_image = tf . image . resize_bilinear ( decoded_image_4d , precrop_shape_as_int )
precropped_image_3d = tf . squeeze ( precropped_image , axis = [ 0 ] )
cropped_image = tf . random_crop ( precropped_image_3d , [ input_height , input_width , input_depth ] )
if flip_left_right :
flipped_image = tf . image . random_flip_left_right ( cropped_image )
else :
flipped_image = cropped_image
brightness_min = 1.0 - ( random_brightness / 100.0 )
brightness_max = 1.0 + ( random_brightness / 100.0 )
brightness_value = tf . random_uniform ( shape = [ ] , minval = brightness_min , maxval = brightness_max )
brightened_image = tf . multiply ( flipped_image , brightness_value )
distort_result = tf . expand_dims ( brightened_image , 0 , name = 'DistortResult' )
return jpeg_data , distort_result
|
def set ( self , e , k , v , real_k = None , check_kw_name = False ) :
"""override base to handle escape case : replace \" to " """
|
if self . escape :
v = v . strip ( ) . replace ( "\\" + self . quote , self . quote )
return super ( kv_transformer , self ) . set ( e , k , v , real_k = real_k , check_kw_name = check_kw_name )
|
def sync_luigi_config ( self , push = True , pull = True , expand = True ) :
"""Synchronizes sections starting with ` ` " luigi _ " ` ` with the luigi configuration parser . First ,
when * push * is * True * , options that exist in law but * * not * * in luigi are stored as defaults
in the luigi config . Then , when * pull * is * True * , all luigi - related options in the law
config are overwritten with those from luigi . This way , options set via luigi defaults
( environment variables , global configuration files , ` LUIGI _ CONFIG _ PATH ` ) always have
precendence . When * expand * is * True * , environment variables are expanded before pushing them
to the luigi config ."""
|
prefix = "luigi_"
lparser = luigi . configuration . LuigiConfigParser . instance ( )
if push :
for section in self . sections ( ) :
if not section . startswith ( prefix ) :
continue
lsection = section [ len ( prefix ) : ]
if not lparser . has_section ( lsection ) :
lparser . add_section ( lsection )
for option in self . options ( section ) :
if not lparser . has_option ( lsection , option ) :
if expand :
value = self . get_expanded ( section , option )
else :
value = self . get ( section , option )
lparser . set ( lsection , option , value )
if pull :
for lsection in lparser . sections ( ) :
section = prefix + lsection
if not self . has_section ( section ) :
self . add_section ( section )
for option , value in lparser . items ( lsection ) :
self . set ( section , option , value )
|
def FinalizeConfigInit ( config , token , admin_password = None , redownload_templates = False , repack_templates = True , prompt = True ) :
"""Performs the final steps of config initialization ."""
|
config . Set ( "Server.initialized" , True )
print ( "\nWriting configuration to %s." % config [ "Config.writeback" ] )
config . Write ( )
print ( "Initializing the datastore." )
# Reload the config and initialize the GRR database .
server_startup . Init ( )
print ( "\nStep 3: Adding GRR Admin User" )
try :
CreateUser ( "admin" , password = admin_password , is_admin = True )
except UserAlreadyExistsError :
if prompt : # pytype : disable = wrong - arg - count
if ( ( builtins . input ( "User 'admin' already exists, do you want to " "reset the password? [yN]: " ) . upper ( ) or "N" ) == "Y" ) :
UpdateUser ( "admin" , password = admin_password , is_admin = True )
# pytype : enable = wrong - arg - count
else :
UpdateUser ( "admin" , password = admin_password , is_admin = True )
print ( "\nStep 4: Repackaging clients with new configuration." )
if prompt :
redownload_templates = RetryBoolQuestion ( "Server debs include client templates. Re-download templates?" , False )
repack_templates = RetryBoolQuestion ( "Repack client templates?" , True )
if redownload_templates :
InstallTemplatePackage ( )
# Build debug binaries , then build release binaries .
if repack_templates :
repacking . TemplateRepacker ( ) . RepackAllTemplates ( upload = True , token = token )
print ( "\nGRR Initialization complete! You can edit the new configuration " "in %s.\n" % config [ "Config.writeback" ] )
print ( "Please restart the service for the new configuration to take " "effect.\n" )
|
def go_to_index ( self , index ) :
"""Create a new : class : ` . CompletionState ` object with the new index ."""
|
return CompletionState ( self . original_document , self . current_completions , complete_index = index )
|
def wormhole ( context , dump_timing , transit_helper , relay_url , appid ) :
"""Create a Magic Wormhole and communicate through it .
Wormholes are created by speaking the same magic CODE in two
different places at the same time . Wormholes are secure against
anyone who doesn ' t use the same code ."""
|
context . obj = cfg = Config ( )
cfg . appid = appid
cfg . relay_url = relay_url
cfg . transit_helper = transit_helper
cfg . dump_timing = dump_timing
|
def of_text ( self , text , encoding = "utf-8" ) :
"""Use default hash method to return hash value of a piece of string
default setting use ' utf - 8 ' encoding ."""
|
m = self . hash_algo ( )
m . update ( text . encode ( encoding ) )
if self . return_int :
return int ( m . hexdigest ( ) , 16 )
else :
return m . hexdigest ( )
|
def p_definition_list ( p ) :
"""definition _ list : definition definition _ list
| definition"""
|
if len ( p ) == 3 :
p [ 0 ] = p [ 1 ] + p [ 2 ]
elif len ( p ) == 2 :
p [ 0 ] = p [ 1 ]
else :
raise RuntimeError ( "Invalid production rules 'p_action_list'" )
|
def visualize_saliency_with_losses ( input_tensor , losses , seed_input , wrt_tensor = None , grad_modifier = 'absolute' , keepdims = False ) :
"""Generates an attention heatmap over the ` seed _ input ` by using positive gradients of ` input _ tensor `
with respect to weighted ` losses ` .
This function is intended for advanced use cases where a custom loss is desired . For common use cases ,
refer to ` visualize _ class _ saliency ` or ` visualize _ regression _ saliency ` .
For a full description of saliency , see the paper :
[ Deep Inside Convolutional Networks : Visualising Image Classification Models and Saliency Maps ]
( https : / / arxiv . org / pdf / 1312.6034v2 . pdf )
Args :
input _ tensor : An input tensor of shape : ` ( samples , channels , image _ dims . . . ) ` if ` image _ data _ format =
channels _ first ` or ` ( samples , image _ dims . . . , channels ) ` if ` image _ data _ format = channels _ last ` .
losses : List of ( [ Loss ] ( vis . losses # Loss ) , weight ) tuples .
seed _ input : The model input for which activation map needs to be visualized .
wrt _ tensor : Short for , with respect to . The gradients of losses are computed with respect to this tensor .
When None , this is assumed to be the same as ` input _ tensor ` ( Default value : None )
grad _ modifier : gradient modifier to use . See [ grad _ modifiers ] ( vis . grad _ modifiers . md ) . By default ` absolute `
value of gradients are used . To visualize positive or negative gradients , use ` relu ` and ` negate `
respectively . ( Default value = ' absolute ' )
keepdims : A boolean , whether to keep the dimensions or not .
If keepdims is False , the channels axis is deleted .
If keepdims is True , the grad with same shape as input _ tensor is returned . ( Default value : False )
Returns :
The normalized gradients of ` seed _ input ` with respect to weighted ` losses ` ."""
|
opt = Optimizer ( input_tensor , losses , wrt_tensor = wrt_tensor , norm_grads = False )
grads = opt . minimize ( seed_input = seed_input , max_iter = 1 , grad_modifier = grad_modifier , verbose = False ) [ 1 ]
if not keepdims :
channel_idx = 1 if K . image_data_format ( ) == 'channels_first' else - 1
grads = np . max ( grads , axis = channel_idx )
return utils . normalize ( grads ) [ 0 ]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.