signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def energy_at_conditions ( self , pH , V ) :
"""Get free energy for a given pH and V
Args :
pH ( float ) : pH at which to evaluate free energy
V ( float ) : voltage at which to evaluate free energy
Returns :
free energy at conditions""" | return self . energy + self . npH * PREFAC * pH + self . nPhi * V |
def _init_map ( self ) :
"""stub""" | self . my_osid_object_form . _my_map [ 'attempts' ] = int ( self . _attempts_metadata [ 'default_object_values' ] [ 0 ] )
self . my_osid_object_form . _my_map [ 'weight' ] = float ( self . _weight_metadata [ 'default_object_values' ] [ 0 ] )
# self . my _ osid _ object _ form . _ my _ map [ ' rerandomize ' ] = \
# self . _ rerandomize _ metadata [ ' default _ object _ values ' ] [ 0]
self . my_osid_object_form . _my_map [ 'showanswer' ] = str ( self . _showanswer_metadata [ 'default_object_values' ] [ 0 ] )
self . my_osid_object_form . _my_map [ 'markdown' ] = str ( self . _markdown_metadata [ 'default_object_values' ] [ 0 ] ) |
def last_datetime ( self ) :
"""Return the time of the last operation on the bundle as a datetime object""" | from datetime import datetime
try :
return datetime . fromtimestamp ( self . state . lasttime )
except TypeError :
return None |
def last_line ( text ) :
"""Get the last meaningful line of the text , that is the last non - empty line .
: param text : Text to search the last line
: type text : str
: return :
: rtype : str""" | last_line_of_text = ""
while last_line_of_text == "" and len ( text ) > 0 :
last_line_start = text . rfind ( "\n" )
# Handle one - line strings ( without \ n )
last_line_start = max ( 0 , last_line_start )
last_line_of_text = text [ last_line_start : ] . strip ( "\r\n " )
text = text [ : last_line_start ]
return last_line_of_text |
def imshow_item ( self ) :
"""Imshow item""" | index = self . currentIndex ( )
if self . __prepare_plot ( ) :
key = self . model . get_key ( index )
try :
if self . is_image ( key ) :
self . show_image ( key )
else :
self . imshow ( key )
except ( ValueError , TypeError ) as error :
QMessageBox . critical ( self , _ ( "Plot" ) , _ ( "<b>Unable to show image.</b>" "<br><br>Error message:<br>%s" ) % str ( error ) ) |
def runGetDataset ( self , id_ ) :
"""Runs a getDataset request for the specified ID .""" | dataset = self . getDataRepository ( ) . getDataset ( id_ )
return self . runGetRequest ( dataset ) |
def install_antivirus ( version = None , latest = False , synch = False , skip_commit = False , ) :
'''Install anti - virus packages .
Args :
version ( str ) : The version of the PANOS file to install .
latest ( bool ) : If true , the latest anti - virus file will be installed .
The specified version option will be ignored .
synch ( bool ) : If true , the anti - virus will synch to the peer unit .
skip _ commit ( bool ) : If true , the install will skip committing to the device .
CLI Example :
. . code - block : : bash
salt ' * ' panos . install _ antivirus 8.0.0''' | if not version and latest is False :
raise CommandExecutionError ( "Version option must not be none." )
if synch is True :
s = "yes"
else :
s = "no"
if skip_commit is True :
c = "yes"
else :
c = "no"
if latest is True :
query = { 'type' : 'op' , 'cmd' : '<request><anti-virus><upgrade><install>' '<commit>{0}</commit><sync-to-peer>{1}</sync-to-peer>' '<version>latest</version></install></upgrade></anti-virus></request>' . format ( c , s ) }
else :
query = { 'type' : 'op' , 'cmd' : '<request><anti-virus><upgrade><install>' '<commit>{0}</commit><sync-to-peer>{1}</sync-to-peer>' '<version>{2}</version></install></upgrade></anti-virus></request>' . format ( c , s , version ) }
return _get_job_results ( query ) |
def coalesce_headers ( cls , header_lines ) :
"""Collects headers that are spread across multiple lines into a single row""" | header_lines = [ list ( hl ) for hl in header_lines if bool ( hl ) ]
if len ( header_lines ) == 0 :
return [ ]
if len ( header_lines ) == 1 :
return header_lines [ 0 ]
# If there are gaps in the values of a line , copy them forward , so there
# is some value in every position
for hl in header_lines :
last = None
for i in range ( len ( hl ) ) :
hli = text_type ( hl [ i ] )
if not hli . strip ( ) :
hl [ i ] = last
else :
last = hli
headers = [ ' ' . join ( text_type ( col_val ) . strip ( ) if col_val else '' for col_val in col_set ) for col_set in zip ( * header_lines ) ]
headers = [ slugify ( h . strip ( ) ) for h in headers ]
return headers |
def list ( self , ** params ) :
"""Retrieve all leads
Returns all leads available to the user , according to the parameters provided
: calls : ` ` get / leads ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of Leads .
: rtype : list""" | _ , _ , leads = self . http_client . get ( "/leads" , params = params )
return leads |
def slice ( cls , * args , ** kwargs ) :
"""Take a slice of a DataSetFamily to produce a dataset
indexed by asset and date .
Parameters
* args
* * kwargs
The coordinates to fix along each extra dimension .
Returns
dataset : DataSet
A regular pipeline dataset indexed by asset and date .
Notes
The extra dimensions coords used to produce the result are available
under the ` ` extra _ coords ` ` attribute .""" | coords , hash_key = cls . _canonical_key ( args , kwargs )
try :
return cls . _slice_cache [ hash_key ]
except KeyError :
pass
Slice = cls . _make_dataset ( coords )
cls . _slice_cache [ hash_key ] = Slice
return Slice |
def weighted_random ( sequence ) :
"""Given a sequence of pairs ( element , weight ) where weight is an addable / total - order - comparable ( e . g . a number ) ,
it returns a random element ( first item in each pair ) given in a non - uniform way given by the weight of the
element ( second item in each pair )
: param sequence : sequence / iterator of pairs ( element , weight )
: return : any value in the first element of each pair""" | if isinstance ( sequence , dict ) :
sequence = sequence . items ( )
accumulated = list ( labeled_accumulate ( sequence ) )
r = random . random ( ) * accumulated [ - 1 ] [ 1 ]
for k , v in accumulated :
if r < v :
return k
# punto inalcanzable a priori
return None |
def all_pairs_normalized_distances_reference ( X ) :
"""Reference implementation of normalized all - pairs distance , used
for testing the more efficient implementation above for equivalence .""" | n_samples , n_cols = X . shape
# matrix of mean squared difference between between samples
D = np . ones ( ( n_samples , n_samples ) , dtype = "float32" ) * np . inf
for i in range ( n_samples ) :
diffs = X - X [ i , : ] . reshape ( ( 1 , n_cols ) )
missing_diffs = np . isnan ( diffs )
missing_counts_per_row = missing_diffs . sum ( axis = 1 )
valid_rows = missing_counts_per_row < n_cols
D [ i , valid_rows ] = np . nanmean ( diffs [ valid_rows , : ] ** 2 , axis = 1 )
return D |
def create_gemini_db_orig ( gemini_vcf , data , gemini_db = None , ped_file = None ) :
"""Original GEMINI specific data loader , only works with hg19 / GRCh37.""" | if not gemini_db :
gemini_db = "%s.db" % utils . splitext_plus ( gemini_vcf ) [ 0 ]
if not utils . file_exists ( gemini_db ) :
if not vcfutils . vcf_has_variants ( gemini_vcf ) :
return None
with file_transaction ( data , gemini_db ) as tx_gemini_db :
gemini = config_utils . get_program ( "gemini" , data [ "config" ] )
load_opts = ""
if "gemini_allvariants" not in dd . get_tools_on ( data ) :
load_opts += " --passonly"
# For small test files , skip gene table loading which takes a long time
if _is_small_vcf ( gemini_vcf ) :
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf :
load_opts += " --test-mode"
# Skip CADD or gerp - bp if neither are loaded
gemini_dir = install . get_gemini_dir ( data )
for skip_cmd , check_file in [ ( "--skip-cadd" , "whole_genome_SNVs.tsv.compressed.gz" ) ] :
if not os . path . exists ( os . path . join ( gemini_dir , check_file ) ) :
load_opts += " %s" % skip_cmd
# skip gerp - bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data [ "config" ] [ "algorithm" ] . get ( "num_cores" , 1 )
tmpdir = os . path . dirname ( tx_gemini_db )
eanns = _get_effects_flag ( data )
# Apply custom resource specifications , allowing use of alternative annotation _ dir
resources = config_utils . get_resources ( "gemini" , data [ "config" ] )
gemini_opts = " " . join ( [ str ( x ) for x in resources [ "options" ] ] ) if resources . get ( "options" ) else ""
exports = utils . local_path_export ( )
cmd = ( "{exports} {gemini} {gemini_opts} load {load_opts} " "-v {gemini_vcf} {eanns} --cores {num_cores} " "--tempdir {tmpdir} {tx_gemini_db}" )
cmd = cmd . format ( ** locals ( ) )
do . run ( cmd , "Create gemini database for %s" % gemini_vcf , data )
if ped_file :
cmd = [ gemini , "amend" , "--sample" , ped_file , tx_gemini_db ]
do . run ( cmd , "Add PED file to gemini database" , data )
return gemini_db |
def get_device_index_based_on_prompt ( self , prompt ) :
"""Return the device index in the chain based on prompt .""" | conn_info = ""
for device in self . devices :
conn_info += str ( device ) + "->"
if device . prompt == prompt :
self . connection . log ( "Connected: {}" . format ( conn_info ) )
return self . devices . index ( device )
else :
return None |
def add_blacklisted_plugins ( self , plugins ) :
"""add blacklisted plugins .
` plugins ` may be a single object or iterable .""" | plugins = util . return_list ( plugins )
self . blacklisted_plugins . extend ( plugins ) |
def _worker_queue_scheduled_tasks ( self ) :
"""Helper method that takes due tasks from the SCHEDULED queue and puts
them in the QUEUED queue for execution . This should be called
periodically .""" | queues = set ( self . _filter_queues ( self . connection . smembers ( self . _key ( SCHEDULED ) ) ) )
now = time . time ( )
for queue in queues : # Move due items from the SCHEDULED queue to the QUEUED queue . If
# items were moved , remove the queue from the scheduled set if it
# is empty , and add it to the queued set so the task gets picked
# up . If any unique tasks are already queued , don ' t update their
# queue time ( because the new queue time would be later ) .
result = self . scripts . zpoppush ( self . _key ( SCHEDULED , queue ) , self . _key ( QUEUED , queue ) , self . config [ 'SCHEDULED_TASK_BATCH_SIZE' ] , now , now , if_exists = ( 'noupdate' , ) , on_success = ( 'update_sets' , queue , self . _key ( SCHEDULED ) , self . _key ( QUEUED ) ) , )
self . log . debug ( 'scheduled tasks' , queue = queue , qty = len ( result ) )
# XXX : ideally this would be in the same pipeline , but we only want
# to announce if there was a result .
if result :
self . connection . publish ( self . _key ( 'activity' ) , queue )
self . _did_work = True |
def _ltu32 ( ins ) :
"""Compares & pops top 2 operands out of the stack , and checks
if the 1st operand < 2nd operand ( top of the stack ) .
Pushes 0 if False , 1 if True .
32 bit unsigned version""" | op1 , op2 = tuple ( ins . quad [ 2 : ] )
rev = op1 [ 0 ] != 't' and not is_int ( op1 ) and op2 [ 0 ] == 't'
output = _32bit_oper ( op1 , op2 , rev )
output . append ( 'call __SUB32' )
output . append ( 'sbc a, a' )
output . append ( 'push af' )
REQUIRES . add ( 'sub32.asm' )
return output |
def only ( self , * args , ** kwargs ) :
"""Override default implementation to ensure that we * always * include the
` publishing _ is _ draft ` field when ` only ` is invoked , to avoid eternal
recursion errors if ` only ` is called then we check for this item
attribute in our custom ` iterator ` .
Discovered the need for this by tracking down an eternal recursion
error in the ` only ` query performed in
fluent _ pages . urlresolvers . _ get _ pages _ of _ type""" | field_names = args
if 'publishing_is_draft' not in field_names :
field_names += ( 'publishing_is_draft' , )
return super ( PublishingQuerySet , self ) . only ( * field_names , ** kwargs ) |
def get_vulnerability_functions_04 ( fname ) :
"""Parse the vulnerability model in NRML 0.4 format .
: param fname :
path of the vulnerability file
: returns :
a dictionary imt , taxonomy - > vulnerability function + vset""" | categories = dict ( assetCategory = set ( ) , lossCategory = set ( ) , vulnerabilitySetID = set ( ) )
imts = set ( )
taxonomies = set ( )
vf_dict = { }
# imt , taxonomy - > vulnerability function
for vset in nrml . read ( fname ) . vulnerabilityModel :
categories [ 'assetCategory' ] . add ( vset [ 'assetCategory' ] )
categories [ 'lossCategory' ] . add ( vset [ 'lossCategory' ] )
categories [ 'vulnerabilitySetID' ] . add ( vset [ 'vulnerabilitySetID' ] )
IML = vset . IML
imt_str = IML [ 'IMT' ]
imls = ~ IML
imts . add ( imt_str )
for vfun in vset . getnodes ( 'discreteVulnerability' ) :
taxonomy = vfun [ 'vulnerabilityFunctionID' ]
if taxonomy in taxonomies :
raise InvalidFile ( 'Duplicated vulnerabilityFunctionID: %s: %s, line %d' % ( taxonomy , fname , vfun . lineno ) )
taxonomies . add ( taxonomy )
with context ( fname , vfun ) :
loss_ratios = ~ vfun . lossRatio
coefficients = ~ vfun . coefficientsVariation
if len ( loss_ratios ) != len ( imls ) :
raise InvalidFile ( 'There are %d loss ratios, but %d imls: %s, line %d' % ( len ( loss_ratios ) , len ( imls ) , fname , vfun . lossRatio . lineno ) )
if len ( coefficients ) != len ( imls ) :
raise InvalidFile ( 'There are %d coefficients, but %d imls: %s, line %d' % ( len ( coefficients ) , len ( imls ) , fname , vfun . coefficientsVariation . lineno ) )
with context ( fname , vfun ) :
vf_dict [ imt_str , taxonomy ] = scientific . VulnerabilityFunction ( taxonomy , imt_str , imls , loss_ratios , coefficients , vfun [ 'probabilisticDistribution' ] )
categories [ 'id' ] = '_' . join ( sorted ( categories [ 'vulnerabilitySetID' ] ) )
del categories [ 'vulnerabilitySetID' ]
return vf_dict , categories |
def decimate ( x , q = 10 , n = 4 , k = 0.8 , filterfun = ss . cheby1 ) :
"""scipy . signal . decimate like downsampling using filtfilt instead of lfilter ,
and filter coeffs from butterworth or chebyshev type 1.
Parameters
x : numpy . ndarray
Array to be downsampled along last axis .
q : int
Downsampling factor .
n : int
Filter order .
k : float
Aliasing filter critical frequency Wn will be set as Wn = k / q .
filterfun : function
` scipy . signal . filter _ design . cheby1 ` or
` scipy . signal . filter _ design . butter ` function
Returns
numpy . ndarray
Array of downsampled signal .""" | if not isinstance ( q , int ) :
raise TypeError ( "q must be an integer" )
if n is None :
n = 1
if filterfun == ss . butter :
b , a = filterfun ( n , k / q )
elif filterfun == ss . cheby1 :
b , a = filterfun ( n , 0.05 , k / q )
else :
raise Exception ( 'only ss.butter or ss.cheby1 supported' )
try :
y = ss . filtfilt ( b , a , x )
except : # Multidim array can only be processed at once for scipy > = 0.9.0
y = [ ]
for data in x :
y . append ( ss . filtfilt ( b , a , data ) )
y = np . array ( y )
try :
return y [ : , : : q ]
except :
return y [ : : q ] |
def vectors_from_fn ( self , fn : str ) :
"""Run through a single background audio file , overlaying with wake words .
Generates ( mfccs , target ) where mfccs is a series of mfcc values and
target is a single integer classification of the target network output for that chunk""" | audio = load_audio ( fn )
audio_volume = self . calc_volume ( audio )
audio_volume *= 0.4 + 0.5 * random ( )
audio = self . normalize_volume_to ( audio , audio_volume )
self . listener . clear ( )
chunked_bg = chunk_audio ( audio , self . args . chunk_size )
chunked_ww = self . chunk_audio_pieces ( self . generate_wakeword_pieces ( audio_volume ) , self . args . chunk_size )
for i , ( chunk_bg , ( chunk_ww , targets ) ) in enumerate ( zip ( chunked_bg , chunked_ww ) ) :
chunk = self . merge ( chunk_bg , chunk_ww , 0.6 )
self . vals_buffer = np . concatenate ( ( self . vals_buffer [ len ( targets ) : ] , targets ) )
self . audio_buffer = np . concatenate ( ( self . audio_buffer [ len ( chunk ) : ] , chunk ) )
mfccs = self . listener . update_vectors ( chunk )
percent_overlapping = self . max_run_length ( self . vals_buffer , 1 ) / len ( self . vals_buffer )
if self . vals_buffer [ - 1 ] == 0 and percent_overlapping > 0.8 :
target = 1
elif percent_overlapping < 0.5 :
target = 0
else :
continue
if random ( ) > 1.0 - self . args . save_prob :
name = splitext ( basename ( fn ) ) [ 0 ]
wav_file = join ( 'debug' , 'ww' if target == 1 else 'nww' , '{} - {}.wav' . format ( name , i ) )
save_audio ( wav_file , self . audio_buffer )
yield mfccs , target |
def values_for_column ( self , column_name , limit = 10000 ) :
"""Retrieve some values for the given column""" | logging . info ( 'Getting values for columns [{}] limited to [{}]' . format ( column_name , limit ) )
# TODO : Use Lexicographic TopNMetricSpec once supported by PyDruid
if self . fetch_values_from :
from_dttm = utils . parse_human_datetime ( self . fetch_values_from )
else :
from_dttm = datetime ( 1970 , 1 , 1 )
qry = dict ( datasource = self . datasource_name , granularity = 'all' , intervals = from_dttm . isoformat ( ) + '/' + datetime . now ( ) . isoformat ( ) , aggregations = dict ( count = count ( 'count' ) ) , dimension = column_name , metric = 'count' , threshold = limit , )
client = self . cluster . get_pydruid_client ( )
client . topn ( ** qry )
df = client . export_pandas ( )
return [ row [ column_name ] for row in df . to_records ( index = False ) ] |
def sql_key ( self , generation , sql , params , order , result_type , using = 'default' ) :
"""Return the specific cache key for the sql query described by the
pieces of the query and the generation key .""" | # these keys will always look pretty opaque
suffix = self . keygen . gen_key ( sql , params , order , result_type )
using = settings . DB_CACHE_KEYS [ using ]
return '%s_%s_query_%s.%s' % ( self . prefix , using , generation , suffix ) |
def p_lvalue_partselect ( self , p ) :
'lvalue : lpartselect' | p [ 0 ] = Lvalue ( p [ 1 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def end_profiling ( profiler , filename , sorting = None ) :
"""Helper function to stop the profiling process and write out the profiled
data into the given filename . Before this , sort the stats by the passed sorting .
: param profiler : An already started profiler ( probably by start _ profiling ) .
: type profiler : cProfile . Profile
: param filename : The name of the output file to save the profile .
: type filename : basestring
: param sorting : The sorting of the statistics passed to the sort _ stats function .
: type sorting : basestring
: return : None
: rtype : None
Start and stop the profiler with :
> > > profiler = start _ profiling ( )
> > > # Do something you want to profile
> > > end _ profiling ( profiler , " out . txt " , " cumulative " )""" | profiler . disable ( )
s = six . StringIO ( )
ps = pstats . Stats ( profiler , stream = s ) . sort_stats ( sorting )
ps . print_stats ( )
with open ( filename , "w+" ) as f :
_logger . info ( "[calculate_ts_features] Finished profiling of time series feature extraction" )
f . write ( s . getvalue ( ) ) |
def edit ( self , title = github . GithubObject . NotSet , body = github . GithubObject . NotSet , state = github . GithubObject . NotSet , base = github . GithubObject . NotSet ) :
""": calls : ` PATCH / repos / : owner / : repo / pulls / : number < http : / / developer . github . com / v3 / pulls > ` _
: param title : string
: param body : string
: param state : string
: param base : string
: rtype : None""" | assert title is github . GithubObject . NotSet or isinstance ( title , ( str , unicode ) ) , title
assert body is github . GithubObject . NotSet or isinstance ( body , ( str , unicode ) ) , body
assert state is github . GithubObject . NotSet or isinstance ( state , ( str , unicode ) ) , state
assert base is github . GithubObject . NotSet or isinstance ( base , ( str , unicode ) ) , base
post_parameters = dict ( )
if title is not github . GithubObject . NotSet :
post_parameters [ "title" ] = title
if body is not github . GithubObject . NotSet :
post_parameters [ "body" ] = body
if state is not github . GithubObject . NotSet :
post_parameters [ "state" ] = state
if base is not github . GithubObject . NotSet :
post_parameters [ "base" ] = base
headers , data = self . _requester . requestJsonAndCheck ( "PATCH" , self . url , input = post_parameters )
self . _useAttributes ( data ) |
def error_response ( self , e ) :
"""Make response for an IIIFError e .
Also add compliance header .""" | self . add_compliance_header ( )
return self . make_response ( * e . image_server_response ( self . api_version ) ) |
def new ( cls , package , slide_part ) :
"""Create and return a new notes slide part based on the notes master
and related to both the notes master part and * slide _ part * . If no
notes master is present , create one based on the default template .""" | notes_master_part = package . presentation_part . notes_master_part
notes_slide_part = cls . _add_notes_slide_part ( package , slide_part , notes_master_part )
notes_slide = notes_slide_part . notes_slide
notes_slide . clone_master_placeholders ( notes_master_part . notes_master )
return notes_slide_part |
def _split_generators ( self , dl_manager ) :
"""Return the test split of Cifar10.
Args :
dl _ manager : download manager object .
Returns :
test split .""" | path = dl_manager . download_and_extract ( _DOWNLOAD_URL )
return [ tfds . core . SplitGenerator ( name = tfds . Split . TEST , num_shards = 1 , gen_kwargs = { 'data_dir' : os . path . join ( path , _DIRNAME ) } ) ] |
def boundary_interaction ( self , ** kwargs ) :
"""Returns a list of Location4D objects""" | particle = kwargs . pop ( 'particle' )
starting = kwargs . pop ( 'starting' )
ending = kwargs . pop ( 'ending' )
# shoreline
if self . useshore :
intersection_point = self . _shoreline . intersect ( start_point = starting . point , end_point = ending . point )
if intersection_point : # Set the intersection point .
hitpoint = Location4D ( point = intersection_point [ 'point' ] , time = starting . time + ( ending . time - starting . time ) )
particle . location = hitpoint
# This relies on the shoreline to put the particle in water and not on shore .
resulting_point = self . _shoreline . react ( start_point = starting , end_point = ending , hit_point = hitpoint , reverse_distance = self . reverse_distance , feature = intersection_point [ 'feature' ] , distance = kwargs . get ( 'distance' ) , angle = kwargs . get ( 'angle' ) , azimuth = kwargs . get ( 'azimuth' ) , reverse_azimuth = kwargs . get ( 'reverse_azimuth' ) )
ending . latitude = resulting_point . latitude
ending . longitude = resulting_point . longitude
ending . depth = resulting_point . depth
logger . debug ( "%s - hit the shoreline at %s. Setting location to %s." % ( particle . logstring ( ) , hitpoint . logstring ( ) , ending . logstring ( ) ) )
# bathymetry
if self . usebathy :
if not particle . settled :
bintersect = self . _bathymetry . intersect ( start_point = starting , end_point = ending )
if bintersect :
pt = self . _bathymetry . react ( type = 'reverse' , start_point = starting , end_point = ending )
logger . debug ( "%s - hit the bottom at %s. Setting location to %s." % ( particle . logstring ( ) , ending . logstring ( ) , pt . logstring ( ) ) )
ending . latitude = pt . latitude
ending . longitude = pt . longitude
ending . depth = pt . depth
# sea - surface
if self . usesurface :
if ending . depth > 0 :
logger . debug ( "%s - rose out of the water. Setting depth to 0." % particle . logstring ( ) )
ending . depth = 0
particle . location = ending
return |
def commit ( self , client = None ) :
"""Send saved log entries as a single API call .
: type client : : class : ` ~ google . cloud . logging . client . Client ` or
` ` NoneType ` `
: param client : the client to use . If not passed , falls back to the
` ` client ` ` stored on the current batch .""" | if client is None :
client = self . client
kwargs = { "logger_name" : self . logger . full_name }
if self . resource is not None :
kwargs [ "resource" ] = self . resource . _to_dict ( )
if self . logger . labels is not None :
kwargs [ "labels" ] = self . logger . labels
entries = [ entry . to_api_repr ( ) for entry in self . entries ]
client . logging_api . write_entries ( entries , ** kwargs )
del self . entries [ : ] |
def trim_prefix ( text , nchr ) :
"""Trim characters off of the beginnings of text lines .
Parameters
text : str
The text to be trimmed , with newlines ( \n ) separating lines
nchr : int
The number of spaces to trim off the beginning of a line if
it starts with that many spaces
Returns
text : str
The trimmed text""" | res = [ ]
for line in text . split ( '\n' ) :
if line . startswith ( ' ' * nchr ) :
line = line [ nchr : ]
res . append ( line )
return '\n' . join ( res ) |
def mount_http_adapter ( self , protocol = None , max_retries = None , status_forcelist = None , host = None ) :
"""Mount an HTTP adapter to the
: class : ` ArchiveSession < ArchiveSession > ` object .
: type protocol : str
: param protocol : HTTP protocol to mount your adapter to ( e . g . ' https : / / ' ) .
: type max _ retries : int , object
: param max _ retries : The number of times to retry a failed request .
This can also be an ` urllib3 . Retry ` object .
: type status _ forcelist : list
: param status _ forcelist : A list of status codes ( as int ' s ) to retry on .
: type host : str
: param host : The host to mount your adapter to .""" | protocol = protocol if protocol else self . protocol
host = host if host else 'archive.org'
if max_retries is None :
max_retries = self . http_adapter_kwargs . get ( 'max_retries' , 3 )
if not status_forcelist :
status_forcelist = [ 500 , 501 , 502 , 503 , 504 ]
if max_retries and isinstance ( max_retries , ( int , float ) ) :
max_retries = Retry ( total = max_retries , connect = max_retries , read = max_retries , redirect = False , method_whitelist = Retry . DEFAULT_METHOD_WHITELIST , status_forcelist = status_forcelist , backoff_factor = 1 )
self . http_adapter_kwargs [ 'max_retries' ] = max_retries
max_retries_adapter = HTTPAdapter ( ** self . http_adapter_kwargs )
# Don ' t mount on s3 . us . archive . org , only archive . org !
# IA - S3 requires a more complicated retry workflow .
self . mount ( '{0}//{1}' . format ( protocol , host ) , max_retries_adapter ) |
def rtt_get_num_up_buffers ( self ) :
"""After starting RTT , get the current number of up buffers .
Args :
self ( JLink ) : the ` ` JLink ` ` instance
Returns :
The number of configured up buffers on the target .
Raises :
JLinkRTTException if the underlying JLINK _ RTTERMINAL _ Control call fails .""" | cmd = enums . JLinkRTTCommand . GETNUMBUF
dir = ctypes . c_int ( enums . JLinkRTTDirection . UP )
return self . rtt_control ( cmd , dir ) |
def detect_index_renamings ( self , table_differences ) :
"""Try to find indexes that only changed their name ,
rename operations maybe cheaper than add / drop
however ambiguities between different possibilities
should not lead to renaming at all .
: type table _ differences : TableDiff""" | rename_candidates = OrderedDict ( )
# Gather possible rename candidates by comparing
# each added and removed index based on semantics .
for added_index_name , added_index in table_differences . added_indexes . items ( ) :
for removed_index in table_differences . removed_indexes . values ( ) :
if not self . diff_index ( added_index , removed_index ) :
if added_index . get_name ( ) not in rename_candidates :
rename_candidates [ added_index . get_name ( ) ] = [ ]
rename_candidates [ added_index . get_name ( ) ] . append ( ( removed_index , added_index , added_index_name ) )
for candidate_indexes in rename_candidates . values ( ) : # If the current rename candidate contains exactly one semantically equal index ,
# we can safely rename it .
# Otherwise it is unclear if a rename action is really intended ,
# therefore we let those ambiguous indexes be added / dropped .
if len ( candidate_indexes ) == 1 :
removed_index , added_index , _ = candidate_indexes [ 0 ]
removed_index_name = removed_index . get_name ( ) . lower ( )
added_index_name = added_index . get_name ( ) . lower ( )
if not removed_index_name in table_differences . renamed_indexes :
table_differences . renamed_indexes [ removed_index_name ] = added_index
del table_differences . added_indexes [ added_index_name ]
del table_differences . removed_indexes [ removed_index_name ] |
def aggregate ( self , rankings , epsilon , max_iters ) :
"""Description :
Minorization - Maximization algorithm which returns an
estimate of the ground - truth parameters , gamma for
the given data .
Parameters :
rankings : set of rankings to aggregate
epsilon : convergence condition value , set to None for iteration only
max _ iters : maximum number of iterations of MM algorithm""" | # compute the matrix w , the numbers of pairwise wins :
w = np . zeros ( ( self . m , self . m ) )
for ranking in rankings :
localw = np . zeros ( ( self . m , self . m ) )
for ind1 , alt1 in enumerate ( self . alts ) :
for ind2 , alt2 in enumerate ( self . alts ) :
if ind1 == ind2 :
continue
alt1_rank = util . get_index_nested ( ranking , alt1 )
alt2_rank = util . get_index_nested ( ranking , alt2 )
if alt1_rank < alt2_rank : # alt 1 is ranked higher
localw [ ind1 ] [ ind2 ] = 1
w += localw
W = w . sum ( axis = 1 )
# gamma _ t is the value of gamma at time = t
# gamma _ t1 is the value of gamma at time t = t + 1 ( the next iteration )
# initial arbitrary value for gamma :
gamma_t = np . ones ( self . m ) / self . m
gamma_t1 = np . empty ( self . m )
for f in range ( max_iters ) :
for i in range ( self . m ) :
s = 0
# sum of updating function
for j in range ( self . m ) :
if j != i :
s += ( w [ j ] [ i ] + w [ i ] [ j ] ) / ( gamma_t [ i ] + gamma_t [ j ] )
gamma_t1 [ i ] = W [ i ] / s
gamma_t1 /= np . sum ( gamma_t1 )
if epsilon != None and np . all ( np . absolute ( gamma_t1 - gamma_t ) < epsilon ) :
alt_scores = { cand : gamma_t1 [ ind ] for ind , cand in enumerate ( self . alts ) }
self . create_rank_dicts ( alt_scores )
return gamma_t1
# convergence reached before max _ iters
gamma_t = gamma_t1
# update gamma _ t for the next iteration
alt_scores = { cand : gamma_t1 [ ind ] for ind , cand in enumerate ( self . alts ) }
self . create_rank_dicts ( alt_scores )
return gamma_t1 |
def download_file ( self , project , path ) :
"""Read file of a project and download it
: param project : A project object
: param path : The path of the file in the project
: returns : A file stream""" | url = self . _getUrl ( "/projects/{}/files/{}" . format ( project . id , path ) )
response = yield from self . _session ( ) . request ( "GET" , url , auth = self . _auth )
if response . status == 404 :
raise aiohttp . web . HTTPNotFound ( text = "{} not found on compute" . format ( path ) )
return response |
def process_json_response ( self , response ) :
"""For a json response , check if there was any error and throw exception .
Otherwise , create a housecanary . response . Response .""" | response_json = response . json ( )
# handle errors
code_key = "code"
if code_key in response_json and response_json [ code_key ] != constants . HTTP_CODE_OK :
code = response_json [ code_key ]
message = response_json
if "message" in response_json :
message = response_json [ "message" ]
elif "code_description" in response_json :
message = response_json [ "code_description" ]
if code == constants . HTTP_FORBIDDEN :
raise housecanary . exceptions . UnauthorizedException ( code , message )
if code == constants . HTTP_TOO_MANY_REQUESTS :
raise housecanary . exceptions . RateLimitException ( code , message , response )
else :
raise housecanary . exceptions . RequestException ( code , message )
request_url = response . request . url
endpoint_name = self . _parse_endpoint_name_from_url ( request_url )
return Response . create ( endpoint_name , response_json , response ) |
def _get_seq2c_options ( data ) :
"""Get adjustable , through resources , or default options for seq2c .""" | cov2lr_possible_opts = [ "-F" ]
defaults = { }
ropts = config_utils . get_resources ( "seq2c" , data [ "config" ] ) . get ( "options" , [ ] )
assert len ( ropts ) % 2 == 0 , "Expect even number of options for seq2c" % ropts
defaults . update ( dict ( tz . partition ( 2 , ropts ) ) )
cov2lr_out , lr2gene_out = [ ] , [ ]
for k , v in defaults . items ( ) :
if k in cov2lr_possible_opts :
cov2lr_out += [ str ( k ) , str ( v ) ]
else :
lr2gene_out += [ str ( k ) , str ( v ) ]
return cov2lr_out , lr2gene_out |
def set_stream_class_lists ( self , session_id , payload ) :
"""Use this method to change layout classes for OpenTok streams . The layout classes
define how the streams are displayed in the layout of a composed OpenTok archive
: param String session _ id : The ID of the session of the streams that will be updated
: param List payload : A list defining the class lists to apply to the streams .
Each element in the list is a dictionary with two properties : ' id ' and ' layoutClassList ' .
The ' id ' property is the stream ID ( a String ) , and the ' layoutClassList ' is an array of
class names ( Strings ) to apply to the stream . For example :
payload = [
{ ' id ' : ' 7b09ec3c - 26f9-43d7-8197 - f608f13d4fb6 ' , ' layoutClassList ' : [ ' focus ' ] } ,
{ ' id ' : ' 567bc941-6ea0-4c69-97fc - 70a740b68976 ' , ' layoutClassList ' : [ ' top ' ] } ,
{ ' id ' : ' 307dc941-0450-4c09-975c - 705740d08970 ' , ' layoutClassList ' : [ ' bottom ' ] }""" | items_payload = { 'items' : payload }
endpoint = self . endpoints . set_stream_class_lists_url ( session_id )
response = requests . put ( endpoint , data = json . dumps ( items_payload ) , headers = self . json_headers ( ) , proxies = self . proxies , timeout = self . timeout )
if response . status_code == 200 :
pass
elif response . status_code == 400 :
raise SetStreamClassError ( 'Invalid request. This response may indicate that data in your request data ' 'is invalid JSON. It may also indicate that you passed in invalid layout options.' )
elif response . status_code == 403 :
raise AuthError ( 'Authentication error.' )
else :
raise RequestError ( 'OpenTok server error.' , response . status_code ) |
def evaluate_logical_form ( self , logical_form : str , target_list : List [ str ] ) -> bool :
"""Takes a logical form , and the list of target values as strings from the original lisp
string , and returns True iff the logical form executes to the target list , using the
official WikiTableQuestions evaluation script .""" | normalized_target_list = [ TableQuestionContext . normalize_string ( value ) for value in target_list ]
target_value_list = evaluator . to_value_list ( normalized_target_list )
try :
denotation = self . execute ( logical_form )
except ExecutionError :
logger . warning ( f'Failed to execute: {logical_form}' )
return False
if isinstance ( denotation , list ) :
denotation_list = [ str ( denotation_item ) for denotation_item in denotation ]
else :
denotation_list = [ str ( denotation ) ]
denotation_value_list = evaluator . to_value_list ( denotation_list )
return evaluator . check_denotation ( target_value_list , denotation_value_list ) |
def find_tasks ( self , overrides ) :
"""Find the custom tasks and record the associated image with each task""" | tasks = self . default_tasks ( )
configuration = self . collector . configuration
for image in list ( configuration [ "images" ] . keys ( ) ) :
path = configuration . path ( [ "images" , image , "tasks" ] , joined = "images.{0}.tasks" . format ( image ) )
nxt = configuration . get ( path , { } )
tasks . update ( nxt )
if overrides :
tasks . update ( overrides )
self . tasks = tasks
return tasks |
def find ( cls , db , * args , ** kwargs ) :
"""Returns a : class : ` MongoResultSet ` object .
Example : :
items = Item . find ( db , { ' title ' : u ' Hello ' } )
. . note : :
The arguments are those of pymongo collection ' s ` find ` method .
A frequent error is to pass query key / value pairs as keyword
arguments . This is * * wrong * * . In most cases you will want to pass
a dictionary ( " query spec " ) as the first positional argument .""" | cls . _ensure_indexes ( db )
docs = db [ cls . collection ] . find ( * args , ** kwargs )
return MongoResultSet ( docs , partial ( cls . wrap_incoming , db = db ) ) |
def setObsoletedBy ( self , pid , obsoletedByPid , serialVersion , vendorSpecific = None ) :
"""See Also : setObsoletedByResponse ( )
Args :
pid :
obsoletedByPid :
serialVersion :
vendorSpecific :
Returns :""" | response = self . setObsoletedByResponse ( pid , obsoletedByPid , serialVersion , vendorSpecific )
return self . _read_boolean_response ( response ) |
def set_ortho ( self , l , r , b , t , n , f ) :
"""Set ortho transform
Parameters
l : float
Left .
r : float
Right .
b : float
Bottom .
t : float
Top .
n : float
Near .
f : float
Far .""" | self . matrix = transforms . ortho ( l , r , b , t , n , f ) |
def value ( self ) :
"""Utility method to retrieve Response Object information""" | # Set the code to the status value
if isinstance ( self . code , Status ) :
code = self . code . value
else :
code = self . code
return { 'code' : code , 'errors' : self . errors } |
def add_component ( self , component ) :
'''Adds a Component to an Entity''' | if component not in self . _components :
self . _components . append ( component )
else : # Replace Component
self . _components [ self . _components . index ( component ) ] = component |
def has_channel ( val : Any ) -> bool :
"""Returns whether the value has a channel representation .
Returns :
If ` val ` has a ` _ has _ channel _ ` method and its result is not
NotImplemented , that result is returned . Otherwise , if ` val ` has a
` _ has _ mixture _ ` method and its result is not NotImplemented , that
result is returned . Otherwise if ` val ` has a ` _ has _ unitary _ ` method
and its results is not NotImplemented , that result is returned .
Otherwise , if the value has a _ channel _ method return if that
has a non - default value . Returns False if none of these functions
exists .""" | channel_getter = getattr ( val , '_has_channel_' , None )
result = NotImplemented if channel_getter is None else channel_getter ( )
if result is not NotImplemented :
return result
result = has_mixture_channel ( val )
if result is not NotImplemented and result :
return result
# No has methods , use ` _ channel _ ` or delegates instead .
return channel ( val , None ) is not None |
def setup ( self , analysis_project_name , remote_project_name , incident_id , zone , boot_disk_size , cpu_cores , remote_instance_name = None , disk_names = None , all_disks = False , image_project = "ubuntu-os-cloud" , image_family = "ubuntu-1604-lts" ) :
"""Sets up a Google cloud collector .
This method creates and starts an analysis VM in the analysis project and
selects disks to copy from the remote project .
If disk _ names is specified , it will copy the corresponding disks from the
project , ignoring disks belonging to any specific instances .
If remote _ instance _ name is specified , two behaviors are possible :
- If no other parameters are specified , it will select the instance ' s boot
disk
- if all _ disks is set to True , it will select all disks in the project
that are attached to the instance
disk _ names takes precedence over instance _ names
Args :
analysis _ project _ name : The name of the project that contains the analysis
VM ( string ) .
remote _ project _ name : The name of the remote project where the disks must
be copied from ( string ) .
incident _ id : The incident ID on which the name of the analysis VM will be
based ( string ) .
zone : The zone in which new resources should be created ( string ) .
boot _ disk _ size : The size of the analysis VM boot disk ( in GB ) ( float ) .
cpu _ cores : The number of CPU cores to create the machine with .
remote _ instance _ name : The name of the instance in the remote project
containing the disks to be copied ( string ) .
disk _ names : Comma separated string with disk names to copy ( string ) .
all _ disks : Copy all disks attached to the source instance ( bool ) .
image _ project : Name of the project where the analysis VM image is hosted .
image _ family : Name of the image to use to create the analysis VM .""" | disk_names = disk_names . split ( "," ) if disk_names else [ ]
self . analysis_project = libcloudforensics . GoogleCloudProject ( analysis_project_name , default_zone = zone )
remote_project = libcloudforensics . GoogleCloudProject ( remote_project_name )
if not ( remote_instance_name or disk_names ) :
self . state . add_error ( "You need to specify at least an instance name or disks to copy" , critical = True )
return
self . incident_id = incident_id
analysis_vm_name = "gcp-forensics-vm-{0:s}" . format ( incident_id )
print ( "Your analysis VM will be: {0:s}" . format ( analysis_vm_name ) )
print ( "Complimentary gcloud command:" )
print ( "gcloud compute ssh --project {0:s} {1:s} --zone {2:s}" . format ( analysis_project_name , analysis_vm_name , zone ) )
try : # TODO : Make creating an analysis VM optional
# pylint : disable = too - many - function - args
self . analysis_vm , _ = libcloudforensics . start_analysis_vm ( self . analysis_project . project_id , analysis_vm_name , zone , boot_disk_size , int ( cpu_cores ) , attach_disk = None , image_project = image_project , image_family = image_family )
if disk_names :
for name in disk_names :
try :
self . disks_to_copy . append ( remote_project . get_disk ( name ) )
except RuntimeError :
self . state . add_error ( "Disk '{0:s}' was not found in project {1:s}" . format ( name , remote_project_name ) , critical = True )
break
elif remote_instance_name :
remote_instance = remote_project . get_instance ( remote_instance_name )
if all_disks :
self . disks_to_copy = [ remote_project . get_disk ( disk_name ) for disk_name in remote_instance . list_disks ( ) ]
else :
self . disks_to_copy = [ remote_instance . get_boot_disk ( ) ]
if not self . disks_to_copy :
self . state . add_error ( "Could not find any disks to copy" , critical = True )
except AccessTokenRefreshError as err :
self . state . add_error ( "Something is wrong with your gcloud access token." )
self . state . add_error ( err , critical = True )
except ApplicationDefaultCredentialsError as err :
self . state . add_error ( "Something is wrong with your Application Default " "Credentials. Try running:\n" " $ gcloud auth application-default login" )
self . state . add_error ( err , critical = True )
except HttpError as err :
if err . resp . status == 403 :
self . state . add_error ( "Make sure you have the appropriate permissions on the project" )
if err . resp . status == 404 :
self . state . add_error ( "GCP resource not found. Maybe a typo in the project / instance / " "disk name?" )
self . state . add_error ( err , critical = True ) |
def find_mature ( x , y , win = 10 ) :
"""Window apprach to find hills in the expression profile""" | previous = min ( y )
peaks = [ ]
intervals = range ( x , y , win )
for pos in intervals :
if y [ pos ] > previous * 10 :
previous = y [ pos ]
peaks . add ( pos )
peaks = _summarize_peaks ( peaks ) |
def set_trim_user ( self , trim ) :
"""Sets ' trim _ user ' parameter . When set to True , each tweet returned in a timeline will include a user object including only the status authors numerical ID
: param trim : Boolean triggering the usage of the parameter
: raises : TwitterSearchException""" | if not isinstance ( trim , bool ) :
raise TwitterSearchException ( 1008 )
self . arguments . update ( { 'trim_user' : 'true' if trim else 'false' } ) |
def add ( self , ** kwargs ) :
"""Adds a new element at the end of the list and returns it . Keyword
arguments may be used to initialize the element .""" | new_element = self . _message_descriptor . _concrete_class ( ** kwargs )
new_element . _SetListener ( self . _message_listener )
self . _values . append ( new_element )
if not self . _message_listener . dirty :
self . _message_listener . Modified ( )
return new_element |
def _GetServerCipher ( self ) :
"""Returns the cipher for self . server _ name .""" | if self . server_cipher is not None :
expiry = self . server_cipher_age + rdfvalue . Duration ( "1d" )
if expiry > rdfvalue . RDFDatetime . Now ( ) :
return self . server_cipher
remote_public_key = self . _GetRemotePublicKey ( self . server_name )
self . server_cipher = Cipher ( self . common_name , self . private_key , remote_public_key )
self . server_cipher_age = rdfvalue . RDFDatetime . Now ( )
return self . server_cipher |
def transform_polygon ( polygon , matrix ) :
"""Transform a polygon by a a 2D homogenous transform .
Parameters
polygon : shapely . geometry . Polygon
2D polygon to be transformed .
matrix : ( 3 , 3 ) float
2D homogenous transformation .
Returns
result : shapely . geometry . Polygon
Polygon transformed by matrix .""" | matrix = np . asanyarray ( matrix , dtype = np . float64 )
if util . is_sequence ( polygon ) :
result = [ transform_polygon ( p , t ) for p , t in zip ( polygon , matrix ) ]
return result
# transform the outer shell
shell = transform_points ( np . array ( polygon . exterior . coords ) , matrix ) [ : , : 2 ]
# transform the interiors
holes = [ transform_points ( np . array ( i . coords ) , matrix ) [ : , : 2 ] for i in polygon . interiors ]
# create a new polygon with the result
result = Polygon ( shell = shell , holes = holes )
return result |
def main ( ) :
"""Get arguments from get _ args and create / send the number
of indications defined . Each indication is created from a
template .""" | opts , argparser = get_args ( )
start_time = time ( )
url = opts . url
if re . search ( r":([0-9]+)$" , opts . url ) :
if opts . listenerPort is not None :
argparser . error ( 'Simultaneous url with port and -p port option ' 'invalid' )
else :
if opts . listenerPort is None :
url = '%s:%s' % ( opts . url , 5000 )
else :
url = '%s:%s' % ( opts . url , opts . listenerPort )
if opts . verbose :
print ( 'url=%s' % url )
cim_protocol_version = '1.4'
# requests module combines the verification flag and certfile attribute
# If verify = False , there is no verification of the server cert . If
# verify = < file _ name or dir name > it is the directory of the cert to
# use for verification .
verification = False if opts . cert_file is None else opts . cert_file
headers = { 'content-type' : 'application/xml; charset=utf-8' , 'CIMExport' : 'MethodRequest' , 'CIMExportMethod' : 'ExportIndication' , 'Accept-Encoding' : 'Identity' , 'CIMProtocolVersion' : cim_protocol_version }
# includes accept - encoding because of requests issue . He supplies it if
# we don ' t TODO try None
delta_time = time ( ) - start_time
rand_base = randint ( 1 , 1000 )
timer = ElapsedTimer ( )
source_id = 'send_indications.py'
for i in range ( opts . deliver ) :
msg_id = '%s' % ( i + rand_base )
payload = create_indication_data ( msg_id , i , source_id , delta_time , cim_protocol_version )
if opts . verbose :
print ( 'headers=%s\n\npayload=%s' % ( headers , payload ) )
success = send_indication ( url , headers , payload , opts . verbose , verify = verification )
if success :
if opts . verbose :
print ( 'sent # %s' % i )
else :
if i % 100 == 0 :
sys . stdout . write ( '.' )
sys . stdout . flush ( )
else :
print ( 'Error return from send. Terminating.' )
return
endtime = timer . elapsed_sec ( )
print ( 'Sent %s in %s sec or %.2f ind/sec' % ( opts . deliver , endtime , ( opts . deliver / endtime ) ) ) |
def write_input_files ( pst ) :
"""write parameter values to a model input files using a template files with
current parameter values ( stored in Pst . parameter _ data . parval1 ) .
This is a simple implementation of what PEST does . It does not
handle all the special cases , just a basic function . . . user beware
Parameters
pst : ( pyemu . Pst )
a Pst instance""" | par = pst . parameter_data
par . loc [ : , "parval1_trans" ] = ( par . parval1 * par . scale ) + par . offset
for tpl_file , in_file in zip ( pst . template_files , pst . input_files ) :
write_to_template ( pst . parameter_data . parval1_trans , tpl_file , in_file ) |
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : PhoneNumberContext for this PhoneNumberInstance
: rtype : twilio . rest . trunking . v1 . trunk . phone _ number . PhoneNumberContext""" | if self . _context is None :
self . _context = PhoneNumberContext ( self . _version , trunk_sid = self . _solution [ 'trunk_sid' ] , sid = self . _solution [ 'sid' ] , )
return self . _context |
def is_callable_type ( tp ) :
"""Test if the type is a generic callable type , including subclasses
excluding non - generic types and callables .
Examples : :
is _ callable _ type ( int ) = = False
is _ callable _ type ( type ) = = False
is _ callable _ type ( Callable ) = = True
is _ callable _ type ( Callable [ . . . , int ] ) = = True
is _ callable _ type ( Callable [ [ int , int ] , Iterable [ str ] ] ) = = True
class MyClass ( Callable [ [ int ] , int ] ) :
is _ callable _ type ( MyClass ) = = True
For more general tests use callable ( ) , for more precise test
( excluding subclasses ) use : :
get _ origin ( tp ) is collections . abc . Callable # Callable prior to Python 3.7""" | if NEW_TYPING :
return ( tp is Callable or isinstance ( tp , _GenericAlias ) and tp . __origin__ is collections . abc . Callable or isinstance ( tp , type ) and issubclass ( tp , Generic ) and issubclass ( tp , collections . abc . Callable ) )
return type ( tp ) is CallableMeta |
def echo_detected_environment ( env_name , env_vars ) :
"""Print a helper note about how the environment was determined .""" | env_override_name = 'DEPLOY_ENVIRONMENT'
LOGGER . info ( "" )
if env_override_name in env_vars :
LOGGER . info ( "Environment \"%s\" was determined from the %s environment variable." , env_name , env_override_name )
LOGGER . info ( "If this is not correct, update " "the value (or unset it to fall back to the name of " "the current git branch or parent directory)." )
else :
LOGGER . info ( "Environment \"%s\" was determined from the current " "git branch or parent directory." , env_name )
LOGGER . info ( "If this is not the environment name, update the branch/folder name or " "set an override value via the %s environment variable" , env_override_name )
LOGGER . info ( "" ) |
def calc_plateaus ( beta , edges , rel_tol = 1e-4 , verbose = 0 ) :
'''Calculate the plateaus ( degrees of freedom ) of a graph of beta values in linear time .''' | if not isinstance ( edges , dict ) :
raise Exception ( 'Edges must be a map from each node to a list of neighbors.' )
to_check = deque ( range ( len ( beta ) ) )
check_map = np . zeros ( beta . shape , dtype = bool )
check_map [ np . isnan ( beta ) ] = True
plateaus = [ ]
if verbose :
print ( '\tCalculating plateaus...' )
if verbose > 1 :
print ( '\tIndices to check {0} {1}' . format ( len ( to_check ) , check_map . shape ) )
# Loop until every beta index has been checked
while to_check :
if verbose > 1 :
print ( '\t\tPlateau #{0}' . format ( len ( plateaus ) + 1 ) )
# Get the next unchecked point on the grid
idx = to_check . popleft ( )
# If we already have checked this one , just pop it off
while to_check and check_map [ idx ] :
try :
idx = to_check . popleft ( )
except :
break
# Edge case - - If we went through all the indices without reaching an unchecked one .
if check_map [ idx ] :
break
# Create the plateau and calculate the inclusion conditions
cur_plateau = set ( [ idx ] )
cur_unchecked = deque ( [ idx ] )
val = beta [ idx ]
min_member = val - rel_tol
max_member = val + rel_tol
# Check every possible boundary of the plateau
while cur_unchecked :
idx = cur_unchecked . popleft ( )
# neighbors to check
local_check = [ ]
# Generic graph case , get all neighbors of this node
local_check . extend ( edges [ idx ] )
# Check the index ' s unchecked neighbors
for local_idx in local_check :
if not check_map [ local_idx ] and beta [ local_idx ] >= min_member and beta [ local_idx ] <= max_member : # Label this index as being checked so it ' s not re - checked unnecessarily
check_map [ local_idx ] = True
# Add it to the plateau and the list of local unchecked locations
cur_unchecked . append ( local_idx )
cur_plateau . add ( local_idx )
# Track each plateau ' s indices
plateaus . append ( ( val , cur_plateau ) )
# Returns the list of plateaus and their values
return plateaus |
def get_gradebook_column_admin_session_for_gradebook ( self , gradebook_id ) :
"""Gets the ` ` OsidSession ` ` associated with the gradebook column admin service for the given gradebook .
arg : gradebook _ id ( osid . id . Id ) : the ` ` Id ` ` of the gradebook
return : ( osid . grading . GradebookColumnAdminSession ) - ` ` a
GradebookColumnAdminSession ` `
raise : NotFound - ` ` gradebook _ id ` ` not found
raise : NullArgument - ` ` gradebook _ id ` ` is ` ` null ` `
raise : OperationFailed - ` ` unable to complete request ` `
raise : Unimplemented - ` ` supports _ gradebook _ column _ admin ( ) ` ` or
` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ gradebook _ column _ admin ( ) ` ` and
` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` . *""" | if not self . supports_gradebook_column_admin ( ) :
raise errors . Unimplemented ( )
# Also include check to see if the catalog Id is found otherwise raise errors . NotFound
# pylint : disable = no - member
return sessions . GradebookColumnAdminSession ( gradebook_id , runtime = self . _runtime ) |
def delaunay_2d ( self , tol = 1e-05 , alpha = 0.0 , offset = 1.0 , bound = False ) :
"""Apply a delaunay 2D filter along the best fitting plane . This
extracts the grid ' s points and perfoms the triangulation on those alone .""" | return PolyData ( self . points ) . delaunay_2d ( tol = tol , alpha = alpha , offset = offset , bound = bound ) |
def _get_contigs_to_keep ( self , filename ) :
'''Returns a set of names from file called filename . If filename is None , returns an empty set''' | if filename is None :
return set ( )
with open ( filename ) as f :
return { line . rstrip ( ) for line in f } |
def find_solution_ranks ( sdp , xmat = None , baselevel = 0 ) :
"""Helper function to detect rank loop in the solution matrix .
: param sdp : The SDP relaxation .
: type sdp : : class : ` ncpol2sdpa . sdp ` .
: param x _ mat : Optional parameter providing the primal solution of the
moment matrix . If not provided , the solution is extracted
from the sdp object .
: type x _ mat : : class : ` numpy . array ` .
: param base _ level : Optional parameter for specifying the lower level
relaxation for which the rank loop should be tested
against .
: type base _ level : int .
: returns : list of int - - the ranks of the solution matrix with in the
order of increasing degree .""" | if sdp . status == "unsolved" and xmat is None :
raise Exception ( "The SDP relaxation is unsolved and no primal " + "solution is provided!" )
elif sdp . status != "unsolved" and xmat is None :
xmat = sdp . x_mat [ 0 ]
else :
xmat = sdp . x_mat [ 0 ]
if sdp . status == "unsolved" :
raise Exception ( "The SDP relaxation is unsolved!" )
ranks = [ ]
from numpy . linalg import matrix_rank
if baselevel == 0 :
levels = range ( 1 , sdp . level + 1 )
else :
levels = [ baselevel ]
for level in levels :
base_monomials = pick_monomials_up_to_degree ( sdp . monomial_sets [ 0 ] , level )
ranks . append ( matrix_rank ( xmat [ : len ( base_monomials ) , : len ( base_monomials ) ] ) )
if xmat . shape != ( len ( base_monomials ) , len ( base_monomials ) ) :
ranks . append ( matrix_rank ( xmat ) )
return ranks |
def restore_logging ( lines , min_level_value , max_level_value ) :
"""Re - enables logging statements in these lines whose logging level falls
between the specified minimum and maximum levels and which were disabled
by disable _ logging ( ) before .""" | output = ''
while lines :
line = lines [ 0 ]
if line . lstrip ( ) != PASS_LINE_CONTENTS : # not our pass statement here , so just leave the line as - is and keep going
output += line
lines = lines [ 1 : ]
else : # a logging call will start on the next line : find all the lines it includes and those it does not
logging_lines , remaining_lines = split_call ( lines [ 1 : ] )
lines = remaining_lines
logging_stmt = '' . join ( logging_lines )
original_lines = line + logging_stmt
# replace the logging statement if its level falls b / w min and max
if not check_level ( logging_stmt , True , min_level_value , max_level_value ) :
output += logging_stmt
else : # uncomment _ lines of this logging statement and remove the pass line
uncommented_logging_lines = uncomment_lines ( logging_lines )
logging . info ( 'replacing:\n%s\nwith this:\n%s' % ( original_lines . rstrip ( ) , uncommented_logging_lines . rstrip ( ) ) )
output += uncommented_logging_lines
return output |
def get_numpy_type ( dicom_header ) :
"""Make NumPy format code , e . g . " uint16 " , " int32 " etc
from two pieces of info :
mosaic . PixelRepresentation - - 0 for unsigned , 1 for signed ;
mosaic . BitsAllocated - - 8 , 16 , or 32
: param dicom _ header : the read dicom file / headers
: returns : numpy format string""" | format_string = '%sint%d' % ( ( 'u' , '' ) [ dicom_header . PixelRepresentation ] , dicom_header . BitsAllocated )
try :
numpy . dtype ( format_string )
except TypeError :
raise TypeError ( "Data type not understood by NumPy: format='%s', PixelRepresentation=%d, BitsAllocated=%d" % ( format_string , dicom_header . PixelRepresentation , dicom_header . BitsAllocated ) )
return format_string |
def fit ( self , range , function = None ) :
"""Fits a function to the active display ' s data trace within a
specified range of the time window .
E . g . : :
# Fit ' s a gaussian to the first 30 % of the time window .
lockin . fit ( range = ( 0 , 30 ) , function = ' gauss ' )
: param start : The left limit of the time window in percent .
: param stop : The right limit of the time window in percent .
: param function : The function used to fit the data , either ' line ' ,
' exp ' , ' gauss ' or None , the default . The configured fit function is
left unchanged if function is None .
. . note : :
Fitting takes some time . Check the status byte to see when the
operation is done . A running scan will be paused until the
fitting is complete .
. . warning : :
The SR850 will generate an error if the active display trace is not
stored when the fit command is executed .""" | if function is not None :
self . fit_function = function
cmd = 'FITT' , Integer ( min = 0 , max = 100 ) , Integer ( min = 0 , max = 100 )
self . _write ( cmd , start , stop ) |
def refresh ( self ) :
"""Refreshes the internal lookup table if necessary .""" | try :
self . _private_to_public = self . cloud_discovery . discover_nodes ( )
except Exception as ex :
self . logger . warning ( "Failed to load addresses from Hazelcast.cloud: {}" . format ( ex . args [ 0 ] ) , extra = self . _logger_extras ) |
def load ( cls , filename , store_password , try_decrypt_keys = True ) :
"""Convenience wrapper function ; reads the contents of the given file
and passes it through to : func : ` loads ` . See : func : ` loads ` .""" | with open ( filename , 'rb' ) as file :
input_bytes = file . read ( )
ret = cls . loads ( input_bytes , store_password , try_decrypt_keys = try_decrypt_keys )
return ret |
def remote_getWorkerInfo ( self ) :
"""This command retrieves data from the files in WORKERDIR / info / * and
sends the contents to the buildmaster . These are used to describe
the worker and its configuration , and should be created and
maintained by the worker administrator . They will be retrieved each
time the master - worker connection is established .""" | files = { }
basedir = os . path . join ( self . basedir , "info" )
if os . path . isdir ( basedir ) :
for f in os . listdir ( basedir ) :
filename = os . path . join ( basedir , f )
if os . path . isfile ( filename ) :
with open ( filename , "r" ) as fin :
files [ f ] = fin . read ( )
if not self . numcpus :
try :
self . numcpus = multiprocessing . cpu_count ( )
except NotImplementedError :
log . msg ( "warning: could not detect the number of CPUs for " "this worker. Assuming 1 CPU." )
self . numcpus = 1
files [ 'environ' ] = os . environ . copy ( )
files [ 'system' ] = os . name
files [ 'basedir' ] = self . basedir
files [ 'numcpus' ] = self . numcpus
files [ 'version' ] = self . remote_getVersion ( )
files [ 'worker_commands' ] = self . remote_getCommands ( )
return files |
def event_key_pressed ( self , event ) :
"""So a " invert shift " for user inputs :
Convert all lowercase letters to uppercase and vice versa .""" | char = event . char
if not char :
return
if char in string . ascii_letters :
char = invert_shift ( char )
self . user_input_queue . put ( char )
# Don ' t insert the char in text widget , because it will be echoed
# back from the machine !
return "break" |
def GetElementNSdict ( self , elt ) :
'''Get a dictionary of all the namespace attributes for the indicated
element . The dictionaries are cached , and we recurse up the tree
as necessary .''' | d = self . ns_cache . get ( id ( elt ) )
if not d :
if elt != self . dom :
d = self . GetElementNSdict ( elt . parentNode )
for a in _attrs ( elt ) :
if a . namespaceURI == XMLNS . BASE :
if a . localName == "xmlns" :
d [ '' ] = a . nodeValue
else :
d [ a . localName ] = a . nodeValue
self . ns_cache [ id ( elt ) ] = d
return d . copy ( ) |
def __FinalizeRequest ( self , http_request , url_builder ) :
"""Make any final general adjustments to the request .""" | if ( http_request . http_method == 'GET' and len ( http_request . url ) > _MAX_URL_LENGTH ) :
http_request . http_method = 'POST'
http_request . headers [ 'x-http-method-override' ] = 'GET'
http_request . headers [ 'content-type' ] = 'application/x-www-form-urlencoded'
http_request . body = url_builder . query
url_builder . query_params = { }
http_request . url = url_builder . url |
def complete_variable ( text ) :
'''complete a MAVLink variable''' | if text . find ( '.' ) != - 1 :
var = text . split ( '.' ) [ 0 ]
if var in rline_mpstate . status . msgs :
ret = [ ]
for f in rline_mpstate . status . msgs [ var ] . get_fieldnames ( ) :
ret . append ( var + '.' + f )
return ret
return [ ]
return rline_mpstate . status . msgs . keys ( ) |
def get_data_times_for_job_legacy ( self , num_job ) :
"""Get the data that this job will need to read in .""" | # Should all be integers , so no rounding needed
shift_dur = self . curr_seg [ 0 ] + int ( self . job_time_shift * num_job )
job_data_seg = self . data_chunk . shift ( shift_dur )
# If this is the last job , push the end back
if num_job == ( self . num_jobs - 1 ) :
dataPushBack = job_data_seg [ 1 ] - self . curr_seg [ 1 ]
assert dataPushBack >= 0
job_data_seg = segments . segment ( job_data_seg [ 0 ] - dataPushBack , self . curr_seg [ 1 ] )
assert ( abs ( job_data_seg ) == self . data_length )
return job_data_seg |
def abspath ( raw ) :
"""Return what is hopefully a OS independent path .""" | path_bits = [ ]
if raw . find ( '/' ) != - 1 :
path_bits = raw . split ( '/' )
elif raw . find ( '\\' ) != - 1 :
path_bits = raw . split ( '\\' )
else :
path_bits = [ raw ]
return os . path . abspath ( os . sep . join ( path_bits ) ) |
def geo2qd ( self , glat , glon , height ) :
"""Converts geodetic to quasi - dipole coordinates .
Parameters
glat : array _ like
Geodetic latitude
glon : array _ like
Geodetic longitude
height : array _ like
Altitude in km
Returns
qlat : ndarray or float
Quasi - dipole latitude
qlon : ndarray or float
Quasi - dipole longitude""" | glat = helpers . checklat ( glat , name = 'glat' )
qlat , qlon = self . _geo2qd ( glat , glon , height )
# if array is returned , dtype is object , so convert to float
return np . float64 ( qlat ) , np . float64 ( qlon ) |
def from_text ( name , ttl , rdclass , rdtype , * text_rdatas ) :
"""Create an RRset with the specified name , TTL , class , and type and with
the specified rdatas in text format .
@ rtype : dns . rrset . RRset object""" | return from_text_list ( name , ttl , rdclass , rdtype , text_rdatas ) |
def ok ( self , data , schema = None , envelope = None ) :
"""Gets a 200 response with the specified data .
: param data : The content value .
: param schema : The schema to serialize the data .
: param envelope : The key used to envelope the data .
: return : A Flask response object .""" | data = marshal ( data , schema , envelope )
return self . __make_response ( data ) |
def description ( filename ) :
"""Provide a short description .""" | # This ends up in the Summary header for PKG - INFO and it should be a
# one - liner . It will get rendered on the package page just below the
# package version header but above the long _ description , which ironically
# gets stuff into the Description header . It should not include reST , so
# pick out the first single line after the double header .
with open ( filename ) as fp :
for lineno , line in enumerate ( fp ) :
if lineno < 3 :
continue
line = line . strip ( )
if len ( line ) > 0 :
return line |
def _log_info ( self ) :
"""Output test run information to top of log file .""" | if self . cloud == 'ssh' :
self . results [ 'info' ] = { 'platform' : self . cloud , 'distro' : self . distro_name , 'image' : self . instance_ip , 'timestamp' : self . time_stamp , 'log_file' : self . log_file , 'results_file' : self . results_file }
else :
self . results [ 'info' ] = { 'platform' : self . cloud , 'region' : self . region , 'distro' : self . distro_name , 'image' : self . image_id , 'instance' : self . running_instance_id , 'timestamp' : self . time_stamp , 'log_file' : self . log_file , 'results_file' : self . results_file }
self . _write_to_log ( '\n' . join ( '%s: %s' % ( key , val ) for key , val in self . results [ 'info' ] . items ( ) ) ) |
def update_changes ( changes , newtext , change ) :
"decide whether to compact the newest change into the old last ; return new change list . assumes changes is safe to mutate . \
note : newtext MUST be the result of applying change to changes , and is only passed to save doing the computation again ." | # the criteria for a new version are :
# 1 . mode change ( modes are adding to end , deleting from end , internal edits )
# 2 . length changed by more than 256 chars ( why power of 2 ? why not )
# 3 . time delta > COMPACTION _ TIME _ THRESH
if not changes :
return [ change ]
# todo ( awinter ) : needs test case
if change . utc - changes [ - 1 ] . utc > COMPACTION_TIME_THRESH :
changes . append ( change )
return changes
base = reduce ( apply_change , changes [ : - 1 ] , '' )
final = apply_change ( base , changes [ - 1 ] )
prev_mode = detect_change_mode ( base , changes [ - 1 ] )
cur_mode = detect_change_mode ( final , change )
if prev_mode == cur_mode and abs ( len ( newtext ) - len ( final ) < COMPACTION_LEN_THRESH ) :
changes [ - 1 ] = mkchange ( base , newtext , change . version , change . utc )
else :
changes . append ( change )
return changes |
def rest_delete ( url , timeout ) :
'''Call rest delete method''' | try :
response = requests . delete ( url , timeout = timeout )
return response
except Exception as e :
print ( 'Get exception {0} when sending http delete to url {1}' . format ( str ( e ) , url ) )
return None |
def duration ( self ) :
"""Calculate how long the stage took .
Returns :
float : ( current ) duration of the stage""" | duration = 0.0
if len ( self . events ) > 0 :
first = datetime . fromtimestamp ( self . events [ 0 ] [ 'timestamp' ] )
last = datetime . fromtimestamp ( self . events [ - 1 ] [ 'timestamp' ] )
duration = ( last - first ) . total_seconds ( )
return duration |
def validate_args ( self , qubits : Sequence [ Qid ] ) -> None :
"""Checks if this gate can be applied to the given qubits .
By default checks if input is of type Qid and qubit count .
Child classes can override .
Args :
qubits : The collection of qubits to potentially apply the gate to .
Throws :
ValueError : The gate can ' t be applied to the qubits .""" | if len ( qubits ) == 0 :
raise ValueError ( "Applied a gate to an empty set of qubits. Gate: {}" . format ( repr ( self ) ) )
if len ( qubits ) != self . num_qubits ( ) :
raise ValueError ( 'Wrong number of qubits for <{!r}>. ' 'Expected {} qubits but got <{!r}>.' . format ( self , self . num_qubits ( ) , qubits ) )
if any ( [ not isinstance ( qubit , Qid ) for qubit in qubits ] ) :
raise ValueError ( 'Gate was called with type different than Qid.' ) |
def isentropic_interpolation ( theta_levels , pressure , temperature , * args , ** kwargs ) :
r"""Interpolate data in isobaric coordinates to isentropic coordinates .
Parameters
theta _ levels : array
One - dimensional array of desired theta surfaces
pressure : array
One - dimensional array of pressure levels
temperature : array
Array of temperature
args : array , optional
Any additional variables will be interpolated to each isentropic level .
Returns
list
List with pressure at each isentropic level , followed by each additional
argument interpolated to isentropic coordinates .
Other Parameters
axis : int , optional
The axis corresponding to the vertical in the temperature array , defaults to 0.
tmpk _ out : bool , optional
If true , will calculate temperature and output as the last item in the output list .
Defaults to False .
max _ iters : int , optional
The maximum number of iterations to use in calculation , defaults to 50.
eps : float , optional
The desired absolute error in the calculated value , defaults to 1e - 6.
bottom _ up _ search : bool , optional
Controls whether to search for theta levels bottom - up , or top - down . Defaults to
True , which is bottom - up search .
Notes
Input variable arrays must have the same number of vertical levels as the pressure levels
array . Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure . Linear interpolation is then used in the
vertical to find the pressure at each isentropic level . Interpolation method from
[ Ziv1994 ] _ . Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels .
See Also
potential _ temperature""" | # iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter ( iter_log_p , isentlevs_nd , ka , a , b , pok ) :
exner = pok * np . exp ( - ka * iter_log_p )
t = a * iter_log_p + b
# Newton - Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * ( ka * t - a )
return iter_log_p - ( f / fp )
# Change when Python 2.7 no longer supported
# Pull out keyword arguments
tmpk_out = kwargs . pop ( 'tmpk_out' , False )
max_iters = kwargs . pop ( 'max_iters' , 50 )
eps = kwargs . pop ( 'eps' , 1e-6 )
axis = kwargs . pop ( 'axis' , 0 )
bottom_up_search = kwargs . pop ( 'bottom_up_search' , True )
# Get dimensions in temperature
ndim = temperature . ndim
# Convert units
pres = pressure . to ( 'hPa' )
temperature = temperature . to ( 'kelvin' )
slices = [ np . newaxis ] * ndim
slices [ axis ] = slice ( None )
slices = tuple ( slices )
pres = np . broadcast_to ( pres [ slices ] , temperature . shape ) * pres . units
# Sort input data
sort_pres = np . argsort ( pres . m , axis = axis )
sort_pres = np . swapaxes ( np . swapaxes ( sort_pres , 0 , axis ) [ : : - 1 ] , 0 , axis )
sorter = broadcast_indices ( pres , sort_pres , ndim , axis )
levs = pres [ sorter ]
tmpk = temperature [ sorter ]
theta_levels = np . asanyarray ( theta_levels . to ( 'kelvin' ) ) . reshape ( - 1 )
isentlevels = theta_levels [ np . argsort ( theta_levels ) ]
# Make the desired isentropic levels the same shape as temperature
shape = list ( temperature . shape )
shape [ axis ] = isentlevels . size
isentlevs_nd = np . broadcast_to ( isentlevels [ slices ] , shape )
# exponent to Poisson ' s Equation , which is imported above
ka = mpconsts . kappa . m_as ( 'dimensionless' )
# calculate theta for each point
pres_theta = potential_temperature ( levs , tmpk )
# Raise error if input theta level is larger than pres _ theta max
if np . max ( pres_theta . m ) < np . max ( theta_levels ) :
raise ValueError ( 'Input theta level out of data bounds' )
# Find log of pressure to implement assumption of linear temperature dependence on
# ln ( p )
log_p = np . log ( levs . m )
# Calculations for interpolation routine
pok = mpconsts . P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above , below , good = find_bounding_indices ( pres_theta . m , theta_levels , axis , from_below = bottom_up_search )
# calculate constants for the interpolation
a = ( tmpk . m [ above ] - tmpk . m [ below ] ) / ( log_p [ above ] - log_p [ below ] )
b = tmpk . m [ above ] - a * log_p [ above ]
# calculate first guess for interpolation
isentprs = 0.5 * ( log_p [ above ] + log_p [ below ] )
# Make sure we ignore any nans in the data for solving ; checking a is enough since it
# combines log _ p and tmpk .
good &= ~ np . isnan ( a )
# iterative interpolation using scipy . optimize . fixed _ point and _ isen _ iter defined above
log_p_solved = so . fixed_point ( _isen_iter , isentprs [ good ] , args = ( isentlevs_nd [ good ] , ka , a [ good ] , b [ good ] , pok . m ) , xtol = eps , maxiter = max_iters )
# get back pressure from log p
isentprs [ good ] = np . exp ( log_p_solved )
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs [ ~ ( good & _less_or_close ( isentprs , np . max ( pres . m ) ) ) ] = np . nan
# create list for storing output data
ret = [ isentprs * units . hPa ]
# if tmpk _ out = true , calculate temperature and output as last item in list
if tmpk_out :
ret . append ( ( isentlevs_nd / ( ( mpconsts . P0 . m / isentprs ) ** ka ) ) * units . kelvin )
# do an interpolation for each additional argument
if args :
others = interpolate_1d ( isentlevels , pres_theta . m , * ( arr [ sorter ] for arr in args ) , axis = axis )
if len ( args ) > 1 :
ret . extend ( others )
else :
ret . append ( others )
return ret |
def get_proficiency_lookup_session ( self , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the proficiency lookup service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . learning . ProficiencyLookupSession ) - a
` ` ProficiencyLookupSession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ proficiency _ lookup ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ proficiency _ lookup ( ) ` ` is ` ` true ` ` . *""" | if not self . supports_proficiency_lookup ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . ProficiencyLookupSession ( proxy = proxy , runtime = self . _runtime ) |
def generate_signed_url ( self , expiration = None , api_access_endpoint = _API_ACCESS_ENDPOINT , method = "GET" , headers = None , query_parameters = None , client = None , credentials = None , version = None , ) :
"""Generates a signed URL for this bucket .
. . note : :
If you are on Google Compute Engine , you can ' t generate a signed
URL using GCE service account . Follow ` Issue 50 ` _ for updates on
this . If you ' d like to be able to generate a signed URL from GCE ,
you can use a standard service account from a JSON file rather
than a GCE service account .
. . _ Issue 50 : https : / / github . com / GoogleCloudPlatform / google - auth - library - python / issues / 50
If you have a bucket that you want to allow access to for a set
amount of time , you can use this method to generate a URL that
is only valid within a certain time period .
This is particularly useful if you don ' t want publicly
accessible buckets , but don ' t want to require users to explicitly
log in .
: type expiration : Union [ Integer , datetime . datetime , datetime . timedelta ]
: param expiration : Point in time when the signed URL should expire .
: type api _ access _ endpoint : str
: param api _ access _ endpoint : Optional URI base .
: type method : str
: param method : The HTTP verb that will be used when requesting the URL .
: type headers : dict
: param headers :
( Optional ) Additional HTTP headers to be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers
Requests using the signed URL * must * pass the specified header
( name and value ) with each request for the URL .
: type query _ parameters : dict
: param query _ parameters :
( Optional ) Additional query paramtersto be included as part of the
signed URLs . See :
https : / / cloud . google . com / storage / docs / xml - api / reference - headers # query
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : ( Optional ) The client to use . If not passed , falls back
to the ` ` client ` ` stored on the blob ' s bucket .
: type credentials : : class : ` oauth2client . client . OAuth2Credentials ` or
: class : ` NoneType `
: param credentials : ( Optional ) The OAuth2 credentials to use to sign
the URL . Defaults to the credentials stored on the
client used .
: type version : str
: param version : ( Optional ) The version of signed credential to create .
Must be one of ' v2 ' | ' v4 ' .
: raises : : exc : ` ValueError ` when version is invalid .
: raises : : exc : ` TypeError ` when expiration is not a valid type .
: raises : : exc : ` AttributeError ` if credentials is not an instance
of : class : ` google . auth . credentials . Signing ` .
: rtype : str
: returns : A signed URL you can use to access the resource
until expiration .""" | if version is None :
version = "v2"
elif version not in ( "v2" , "v4" ) :
raise ValueError ( "'version' must be either 'v2' or 'v4'" )
resource = "/{bucket_name}" . format ( bucket_name = self . name )
if credentials is None :
client = self . _require_client ( client )
credentials = client . _credentials
if version == "v2" :
helper = generate_signed_url_v2
else :
helper = generate_signed_url_v4
return helper ( credentials , resource = resource , expiration = expiration , api_access_endpoint = api_access_endpoint , method = method . upper ( ) , headers = headers , query_parameters = query_parameters , ) |
def removeSubEditor ( self , subEditor ) :
"""Removes the subEditor from the layout and removes the event filter .""" | if subEditor is self . focusProxy ( ) :
self . setFocusProxy ( None )
subEditor . removeEventFilter ( self )
self . _subEditors . remove ( subEditor )
self . hBoxLayout . removeWidget ( subEditor ) |
def months_int ( self ) :
"""A sorted list of months of the year in this analysis period as integers .""" | if not self . _is_reversed :
return list ( xrange ( self . st_time . month , self . end_time . month + 1 ) )
else :
months_st = list ( xrange ( self . st_time . month , 13 ) )
months_end = list ( xrange ( 1 , self . end_time . month + 1 ) )
return months_st + months_end |
def _after_n_epoch ( self , epoch_id : int , ** _ ) -> None :
"""Save the model every ` ` n _ epochs ` ` epoch .
: param epoch _ id : number of the processed epoch""" | SaveEvery . save_model ( model = self . _model , name_suffix = str ( epoch_id ) , on_failure = self . _on_save_failure ) |
def write_to_file ( self , filename ) :
"""Write the molecular geometry to a file .
The file format is inferred from the extensions . Currently supported
formats are : ` ` * . xyz ` ` , ` ` * . cml ` `
Argument :
| ` ` filename ` ` - - a filename""" | # TODO : give all file format writers the same API
if filename . endswith ( '.cml' ) :
from molmod . io import dump_cml
dump_cml ( filename , [ self ] )
elif filename . endswith ( '.xyz' ) :
from molmod . io import XYZWriter
symbols = [ ]
for n in self . numbers :
atom = periodic [ n ]
if atom is None :
symbols . append ( "X" )
else :
symbols . append ( atom . symbol )
xyz_writer = XYZWriter ( filename , symbols )
xyz_writer . dump ( self . title , self . coordinates )
del xyz_writer
else :
raise ValueError ( "Could not determine file format for %s." % filename ) |
def kdeconnector ( self ) :
"""Get the current state and return it .""" | if self . _init_dbus ( ) :
( text , color ) = self . _get_text ( )
else :
text = UNKNOWN_DEVICE
color = self . py3 . COLOR_BAD
response = { "cached_until" : self . py3 . time_in ( self . cache_timeout ) , "full_text" : text , "color" : color , }
return response |
def add_hashed_value ( self , hash_value , store_key ) :
"""Add hashed value in the context of the current transaction .
: param hash _ value : The hashed value to be added to the index
: type hash _ value : str
: param store _ key : The key for the document in the store
: type store _ key : object""" | if hash_value not in self . _add_cache [ store_key ] :
self . _add_cache [ store_key ] . append ( hash_value )
if store_key not in self . _reverse_add_cache [ hash_value ] :
self . _reverse_add_cache [ hash_value ] . append ( store_key )
if store_key in self . _remove_cache :
del self . _remove_cache [ store_key ]
if store_key in self . _undefined_cache :
del self . _undefined_cache [ store_key ] |
def list_snapshots ( self ) :
"""Returns a list of all snapshots of this volume .""" | return [ snap for snap in self . manager . list_snapshots ( ) if snap . volume_id == self . id ] |
def create_order ( cls , order , ** kwargs ) :
"""Create Order
Create a new Order
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ order ( order , async = True )
> > > result = thread . get ( )
: param async bool
: param Order order : Attributes of order to create ( required )
: return : Order
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_order_with_http_info ( order , ** kwargs )
else :
( data ) = cls . _create_order_with_http_info ( order , ** kwargs )
return data |
def get_pixel ( framebuf , x , y ) :
"""Get the color of a given pixel""" | index = ( y >> 3 ) * framebuf . stride + x
offset = y & 0x07
return ( framebuf . buf [ index ] >> offset ) & 0x01 |
def _sanitize_title ( self , title ) :
"""Remove redunant meta data from title and return it""" | title = re . sub ( self . inside_brackets , "" , title )
title = re . sub ( self . after_delimiter , "" , title )
return title . strip ( ) |
def get_tid2annotations ( self , clean : bool = True ) :
'''clean : for list of literals only''' | tid2annotations = defaultdict ( list )
header = [ 'Index' ] + list ( self . fetch_annotations ( ) . columns )
for row in self . fetch_annotations ( ) . itertuples ( ) :
row = { header [ i ] : val for i , val in enumerate ( row ) }
if clean :
annotation = { 'tid' : row [ 'tid' ] , 'annotation_type_tid' : row [ 'annotation_type_tid' ] , 'value' : row [ 'value' ] , 'annotation_type_label' : row [ 'annotation_type_label' ] , }
tid2annotations [ row [ 'tid' ] ] . append ( annotation )
elif not clean :
tid2annotations [ row [ 'tid' ] ] . append ( row )
return tid2annotations |
def assert_element_not_visible ( self , selector , by = By . CSS_SELECTOR , timeout = settings . SMALL_TIMEOUT ) :
"""Similar to wait _ for _ element _ not _ visible ( ) - returns nothing .
As above , will raise an exception if the element stays visible .
Returns True if successful . Default timeout = SMALL _ TIMEOUT .""" | if self . timeout_multiplier and timeout == settings . SMALL_TIMEOUT :
timeout = self . __get_new_timeout ( timeout )
self . wait_for_element_not_visible ( selector , by = by , timeout = timeout )
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.