signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def parse_union_type_extension ( lexer : Lexer ) -> UnionTypeExtensionNode :
"""UnionTypeExtension""" | start = lexer . token
expect_keyword ( lexer , "extend" )
expect_keyword ( lexer , "union" )
name = parse_name ( lexer )
directives = parse_directives ( lexer , True )
types = parse_union_member_types ( lexer )
if not ( directives or types ) :
raise unexpected ( lexer )
return UnionTypeExtensionNode ( name = name , directives = directives , types = types , loc = loc ( lexer , start ) ) |
def put ( self , rownr , value , matchingfields = True ) :
"""Put the values into the given row .
The value should be a dict ( as returned by method : func : ` get ` .
The names of the fields in the dict should match the names of the
columns used in the ` tablerow ` object .
` matchingfields = True ` means that the value may contain more fields
and only fields matching a column name will be used .""" | self . _put ( rownr , value , matchingfields ) |
def closeEvent ( self , event : QCloseEvent ) :
"""This function is automatically called when the window is closed using the close [ X ] button in the window
decorations or by right clicking in the system window list and using the close action , or similar ways to close
the window .
Just ignore this event and simulate that the user used the action _ close _ window instead .
To quote the Qt5 QCloseEvent documentation : If you do not want your widget to be hidden , or want some special
handling , you should reimplement the event handler and ignore ( ) the event .""" | event . ignore ( )
# Be safe and emit this signal , because it might be connected to multiple slots .
self . action_close_window . triggered . emit ( True ) |
def start_cluster_server ( self , num_gpus = 1 , rdma = False ) :
"""Convenience function to access ` ` TFNode . start _ cluster _ server ` ` directly from this object instance .""" | return TFNode . start_cluster_server ( self , num_gpus , rdma ) |
def _add_products ( self , tile , show_all = False ) :
"""Add all products from a tile into our product map .""" | products = tile . products
unique_id = tile . unique_id
base_path = tile . output_folder
for prod_path , prod_type in products . items ( ) : # We need to handle include _ directories and tilebus _ definitions
# specially since those are stored reversed in module _ settings . json
# for historical reasons . Currently we don ' t support resolving
# tilebus _ definitions or include _ directories in ProductResolver
if prod_path == 'tilebus_definitions' or prod_path == 'include_directories' :
continue
if prod_type in self . IGNORED_PRODUCTS :
continue
prod_base = os . path . basename ( prod_path )
if prod_type not in self . _product_map :
self . _product_map [ prod_type ] = { }
prod_map = self . _product_map [ prod_type ]
if prod_base not in prod_map :
prod_map [ prod_base ] = [ ]
full_path = os . path . normpath ( os . path . join ( base_path , prod_path ) )
info = ProductInfo ( prod_base , full_path , unique_id , not show_all and prod_base not in self . _product_filter )
prod_map [ prod_base ] . append ( info ) |
def load_from ( self , image ) :
"""Load from another DockerImage to this one
: param image :
: return :""" | if not isinstance ( image , self . __class__ ) :
raise ConuException ( "Invalid source image type" , type ( image ) )
image . save_to ( self ) |
def extract_logs ( self , fname , prg ) :
"""read a logfile and return entries for a program""" | op = [ ]
with open ( fname , 'r' ) as f :
for line in f :
if prg in line :
op . append ( line )
return op |
def omero_cli ( self , command ) :
"""Runs a command as if from the OMERO command - line without the need
for using popen or subprocess .""" | assert isinstance ( command , list )
if not self . cli :
raise Exception ( 'omero.cli not initialised' )
log . info ( "Invoking CLI [current environment]: %s" , " " . join ( command ) )
self . cli . invoke ( command , strict = True ) |
def max_sharpe ( self ) :
"""Get the max Sharpe ratio portfolio""" | if not self . w :
self . solve ( )
# 1 ) Compute the local max SR portfolio between any two neighbor turning points
w_sr , sr = [ ] , [ ]
for i in range ( len ( self . w ) - 1 ) :
w0 = np . copy ( self . w [ i ] )
w1 = np . copy ( self . w [ i + 1 ] )
kargs = { "minimum" : False , "args" : ( w0 , w1 ) }
a , b = self . golden_section ( self . eval_sr , 0 , 1 , ** kargs )
w_sr . append ( a * w0 + ( 1 - a ) * w1 )
sr . append ( b )
# return max ( sr ) , w _ sr [ sr . index ( max ( sr ) ) ]
self . weights = w_sr [ sr . index ( max ( sr ) ) ] . reshape ( ( self . n_assets , ) )
return dict ( zip ( self . tickers , self . weights ) ) |
def compute_near_isotropic_downsampling_scales ( size , voxel_size , dimensions_to_downsample , max_scales = DEFAULT_MAX_DOWNSAMPLING_SCALES , max_downsampling = DEFAULT_MAX_DOWNSAMPLING , max_downsampled_size = DEFAULT_MAX_DOWNSAMPLED_SIZE ) :
"""Compute a list of successive downsampling factors .""" | num_dims = len ( voxel_size )
cur_scale = np . ones ( ( num_dims , ) , dtype = int )
scales = [ tuple ( cur_scale ) ]
while ( len ( scales ) < max_scales and ( np . prod ( cur_scale ) < max_downsampling ) and ( size / cur_scale ) . max ( ) > max_downsampled_size ) : # Find dimension with smallest voxelsize .
cur_voxel_size = cur_scale * voxel_size
smallest_cur_voxel_size_dim = dimensions_to_downsample [ np . argmin ( cur_voxel_size [ dimensions_to_downsample ] ) ]
cur_scale [ smallest_cur_voxel_size_dim ] *= 2
target_voxel_size = cur_voxel_size [ smallest_cur_voxel_size_dim ] * 2
for d in dimensions_to_downsample :
if d == smallest_cur_voxel_size_dim :
continue
d_voxel_size = cur_voxel_size [ d ]
if abs ( d_voxel_size - target_voxel_size ) > abs ( d_voxel_size * 2 - target_voxel_size ) :
cur_scale [ d ] *= 2
scales . append ( tuple ( cur_scale ) )
return scales |
def _select_background_cnns ( cnns ) :
"""Select cnns to use for background calculations .
Uses background samples in cohort , and will remove CNNs with high
on target variability . Uses ( number of segments * biweight midvariance ) as metric
for variability with higher numbers being more unreliable .""" | min_for_variability_analysis = 20
pct_keep = 0.10
b_cnns = [ x for x in cnns if x [ "itype" ] == "background" and x . get ( "metrics" ) ]
assert len ( b_cnns ) % 2 == 0 , "Expect even set of target/antitarget cnns for background"
if len ( b_cnns ) >= min_for_variability_analysis :
b_cnns_w_metrics = [ ]
for b_cnn in b_cnns :
unreliability = b_cnn [ "metrics" ] [ "segments" ] * b_cnn [ "metrics" ] [ "bivar" ]
b_cnns_w_metrics . append ( ( unreliability , b_cnn ) )
b_cnns_w_metrics . sort ( )
to_keep = int ( math . ceil ( pct_keep * len ( b_cnns ) / 2.0 ) * 2 )
b_cnns = [ x [ 1 ] for x in b_cnns_w_metrics ] [ : to_keep ]
assert len ( b_cnns ) % 2 == 0 , "Expect even set of target/antitarget cnns for background"
return [ x [ "file" ] for x in b_cnns ] |
def samples_by_indices_nomapping ( self , indices ) :
"""Gather a batch of samples by indices * without * applying any index
mapping .
Parameters
indices : a tuple of the form ` ( dataset _ index , sample _ indices ) `
The ` dataset _ index ` identifies the dataset from which to draw
samples while ` sample _ indices ` identifies the samples to draw
from it .
Returns
nested list of arrays
A mini - batch""" | if not self . _random_access :
raise TypeError ( 'samples_by_indices_nomapping method not ' 'supported as one or more of the underlying ' 'data sources does not support random access' )
if not isinstance ( indices , tuple ) :
raise TypeError ( 'indices should be a tuple, not a {}' . format ( type ( indices ) ) )
dataset_index , sample_indices = indices
ds = self . datasets [ dataset_index ]
return ds . samples_by_indices_nomapping ( sample_indices ) |
def score ( self , X , y , sample_weight = None ) :
"""Returns the mean accuracy on the given test data and labels .
NOTE : In the condition of sklearn . svm . SVC with precomputed kernel
when the kernel matrix is computed portion by portion , the function
will ignore the first input argument X .
Parameters
X : list of tuple ( data1 , data2)
data1 and data2 are numpy array in shape [ num _ TRs , num _ voxels ]
to be computed for correlation .
They are test samples .
They contain the activity data filtered by ROIs
and prepared for correlation computation .
Within list , all data1s must have the same num _ voxels value ,
all data2s must have the same num _ voxels value .
len ( X ) is the number of test samples .
y : 1D numpy array
labels , len ( X ) equals len ( y ) , which is num _ samples
sample _ weight : 1D array in shape [ num _ samples ] , optional
Sample weights .
Returns
score : float
Mean accuracy of self . predict ( X ) wrt . y .""" | from sklearn . metrics import accuracy_score
if isinstance ( self . clf , sklearn . svm . SVC ) and self . clf . kernel == 'precomputed' and self . training_data_ is None :
result = accuracy_score ( y , self . predict ( ) , sample_weight = sample_weight )
else :
result = accuracy_score ( y , self . predict ( X ) , sample_weight = sample_weight )
return result |
def load_map ( stream , name = None , check_integrity = True , check_duplicates = True ) :
"""Loads a ContainerMap configuration from a YAML document stream .
: param stream : YAML stream .
: type stream : file
: param name : Name of the ContainerMap . If not provided , will be attempted to read from a ` ` name ` ` attribute on the
document root level .
: type name : unicode | str
: param check _ integrity : Performs a brief integrity check ; default is ` ` True ` ` .
: type check _ integrity : bool
: param check _ duplicates : Check for duplicate attached volumes during integrity check .
: type check _ duplicates : bool
: return : A ContainerMap object .
: rtype : ContainerMap""" | map_dict = yaml . safe_load ( stream )
if isinstance ( map_dict , dict ) :
map_name = name or map_dict . pop ( 'name' , None )
if not map_name :
raise ValueError ( "No map name provided, and none found in YAML stream." )
return ContainerMap ( map_name , map_dict , check_integrity = check_integrity , check_duplicates = check_duplicates )
raise ValueError ( "Valid map could not be decoded." ) |
def smooth ( sig , window_size ) :
"""Apply a uniform moving average filter to a signal
Parameters
sig : numpy array
The signal to smooth .
window _ size : int
The width of the moving average filter .""" | box = np . ones ( window_size ) / window_size
return np . convolve ( sig , box , mode = 'same' ) |
def push_token ( self , tok ) :
"Push a token onto the stack popped by the get _ token method" | if self . debug >= 1 :
print ( "shlex: pushing token " + repr ( tok ) )
self . pushback . appendleft ( tok ) |
def make_error ( self , message : str , * , error : Exception = None , # ` ` error _ class : Type [ Exception ] = None ` ` doesn ' t work on
# Python 3.5.2 , but that is exact version ran by Read the
# Docs : ( More info : http : / / stackoverflow . com / q / 42942867
error_class : Any = None ) -> Exception :
"""Return error instantiated from given message .
: param message : Message to wrap .
: param error : Validation error .
: param error _ class :
Special class to wrap error message into . When omitted
` ` self . error _ class ` ` will be used .""" | if error_class is None :
error_class = self . error_class if self . error_class else Error
return error_class ( message ) |
def insert ( self , context ) :
"""Resolve resoures .
: param resort . engine . execution . Context context :
Current execution context .""" | self . __resolve ( context , context . resolve ( self . __source_path ) , context . resolve ( self . __target_path ) ) |
def _init_associations ( self , fin_gaf , hdr_only , prt , allow_missing_symbol ) :
"""Read annotation file and store a list of namedtuples .""" | ini = InitAssc ( )
nts = ini . init_associations ( fin_gaf , hdr_only , prt , allow_missing_symbol )
self . hdr = ini . hdr
return nts |
def addrs_for_name ( self , n ) :
"""Returns addresses that contain expressions that contain a variable named ` n ` .""" | if n not in self . _name_mapping :
return
self . _mark_updated_mapping ( self . _name_mapping , n )
to_discard = set ( )
for e in self . _name_mapping [ n ] :
try :
if n in self [ e ] . object . variables :
yield e
else :
to_discard . add ( e )
except KeyError :
to_discard . add ( e )
self . _name_mapping [ n ] -= to_discard |
def sign_token_records ( profile_components , parent_private_key , signing_algorithm = "ES256K" ) :
"""Function for iterating through a list of profile components and
signing separate individual profile tokens .""" | if signing_algorithm != "ES256K" :
raise ValueError ( "Signing algorithm not supported" )
token_records = [ ]
for profile_component in profile_components :
private_key = ECPrivateKey ( parent_private_key )
public_key = private_key . public_key ( )
subject = { "publicKey" : public_key . to_hex ( ) }
token = sign_token ( profile_component , private_key . to_hex ( ) , subject , signing_algorithm = signing_algorithm )
token_record = wrap_token ( token )
token_record [ "parentPublicKey" ] = public_key . to_hex ( )
token_records . append ( token_record )
return token_records |
def pad_width ( model , table_padding = 0.85 , tabs_padding = 1.2 ) :
"""Computes the width of a model and sets up appropriate padding
for Tabs and DataTable types .""" | if isinstance ( model , Row ) :
vals = [ pad_width ( child ) for child in model . children ]
width = np . max ( [ v for v in vals if v is not None ] )
elif isinstance ( model , Column ) :
vals = [ pad_width ( child ) for child in model . children ]
width = np . sum ( [ v for v in vals if v is not None ] )
elif isinstance ( model , Tabs ) :
vals = [ pad_width ( t ) for t in model . tabs ]
width = np . max ( [ v for v in vals if v is not None ] )
for model in model . tabs :
model . width = width
width = int ( tabs_padding * width )
elif isinstance ( model , DataTable ) :
width = model . width
model . width = int ( table_padding * width )
elif isinstance ( model , ( WidgetBox , Div ) ) :
width = model . width
elif model :
width = model . plot_width
else :
width = 0
return width |
def qualified_name ( cls ) :
"""Full name of a class , including the module . Like qualified _ class _ name , but when you already have a class""" | module = cls . __module__
if module is None or module == str . __class__ . __module__ :
return cls . __name__
return module + '.' + cls . __name__ |
def get_instances_with_configs ( configs ) :
"""Create AndroidDevice instances from a list of dict configs .
Each config should have the required key - value pair ' serial ' .
Args :
configs : A list of dicts each representing the configuration of one
android device .
Returns :
A list of AndroidDevice objects .""" | results = [ ]
for c in configs :
try :
serial = c . pop ( 'serial' )
except KeyError :
raise Error ( 'Required value "serial" is missing in AndroidDevice config %s.' % c )
is_required = c . get ( KEY_DEVICE_REQUIRED , True )
try :
ad = AndroidDevice ( serial )
ad . load_config ( c )
except Exception :
if is_required :
raise
ad . log . exception ( 'Skipping this optional device due to error.' )
continue
results . append ( ad )
return results |
def get_config ( self , config = 'running-config' , params = None , as_string = False ) :
"""Retreives the config from the node
This method will retrieve the config from the node as either a string
or a list object . The config to retrieve can be specified as either
the startup - config or the running - config .
Args :
config ( str ) : Specifies to return either the nodes startup - config
or running - config . The default value is the running - config
params ( str ) : A string of keywords to append to the command for
retrieving the config .
as _ string ( boo ) : Flag that determines the response . If True , then
the configuration is returned as a raw string . If False , then
the configuration is returned as a list . The default value is
False
Returns :
This method will return either a string or a list depending on the
states of the as _ string keyword argument .
Raises :
TypeError : If the specified config is not one of either
' running - config ' or ' startup - config '""" | if config not in [ 'startup-config' , 'running-config' ] :
raise TypeError ( 'invalid config name specified' )
command = 'show %s' % config
if params :
command += ' %s' % params
result = self . run_commands ( command , 'text' )
if as_string :
return str ( result [ 0 ] [ 'output' ] ) . strip ( )
return str ( result [ 0 ] [ 'output' ] ) . split ( '\n' ) |
async def _setcolor ( self , * , color : discord . Colour ) :
"""Sets the default color of embeds .""" | data = self . bot . config . get ( "meta" , { } )
data [ 'default_color' ] = str ( color )
await self . bot . config . put ( 'meta' , data )
await self . bot . responses . basic ( message = "The default color has been updated." ) |
def shewhart ( self , data : [ 'SASdata' , str ] = None , boxchart : str = None , cchart : str = None , irchart : str = None , mchart : str = None , mrchart : str = None , npchart : str = None , pchart : str = None , rchart : str = None , schart : str = None , uchart : str = None , xrchart : str = None , xschart : str = None , procopts : str = None , stmtpassthrough : str = None , ** kwargs : dict ) -> 'SASresults' :
"""Python method to call the SHEWHART procedure
Documentation link :
https : / / go . documentation . sas . com / ? cdcId = pgmsascdc & cdcVersion = 9.4_3.4 & docsetId = qcug & docsetTarget = qcug _ shewhart _ toc . htm & locale = en
: param data : SASdata object or string . This parameter is required .
: parm boxchart : The boxchart variable can only be a string type .
: parm cchart : The cchart variable can only be a string type .
: parm irchart : The irchart variable can only be a string type .
: parm mchart : The mchart variable can only be a string type .
: parm mrchart : The mrchart variable can only be a string type .
: parm npchart : The npchart variable can only be a string type .
: parm pchart : The pchart variable can only be a string type .
: parm rchart : The rchart variable can only be a string type .
: parm schart : The schart variable can only be a string type .
: parm uchart : The uchart variable can only be a string type .
: parm xrchart : The xrchart variable can only be a string type .
: parm xschart : The xschart variable can only be a string type .
: parm procopts : The procopts variable is a generic option available for advanced use . It can only be a string type .
: parm stmtpassthrough : The stmtpassthrough variable is a generic option available for advanced use . It can only be a string type .
: return : SAS Result Object""" | |
def clear_difficulty ( self ) :
"""stub""" | if ( self . get_difficulty_metadata ( ) . is_read_only ( ) or self . get_difficulty_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'texts' ] [ 'difficulty' ] = self . _difficulty_metadata [ 'default_string_values' ] [ 0 ] |
def splitdrive ( path ) :
"""Split the path into a pair ( drive , tail ) where drive is either a
mount point or the empty string . On systems which do not use drive
specifications , drive will always be the empty string .
In all cases , drive + tail will be the same as path .
Equivalent to " os . path . splitdrive " .
Args :
path ( path - like object ) : Path or URL .
Returns :
tuple of str : drive , tail .""" | relative = get_instance ( path ) . relpath ( path )
drive = path . rsplit ( relative , 1 ) [ 0 ]
if drive and not drive [ - 2 : ] == '//' : # Keep " / " tail side
relative = '/' + relative
drive = drive . rstrip ( '/' )
return drive , relative |
def react ( self , emojiname ) :
"""React to a message using the web api""" | self . _client . react_to_message ( emojiname = emojiname , channel = self . _body [ 'channel' ] , timestamp = self . _body [ 'ts' ] ) |
def _write ( self , session , openFile , replaceParamFile ) :
"""Generic Output Location Write to File Method""" | # Retrieve output locations
locations = self . outputLocations
# Write lines
openFile . write ( '%s\n' % self . numLocations )
for location in locations :
openFile . write ( '%s %s\n' % ( location . linkOrCellI , location . nodeOrCellJ ) ) |
def _prune_previous_versions ( self , symbol , keep_mins = 120 , keep_version = None , new_version_shas = None ) :
"""Prune versions , not pointed at by snapshots which are at least keep _ mins old . Prune will never
remove all versions .""" | new_version_shas = new_version_shas if new_version_shas else [ ]
prunable_ids_to_shas = self . _find_prunable_version_ids ( symbol , keep_mins )
prunable_ids = list ( prunable_ids_to_shas . keys ( ) )
if keep_version is not None :
try :
prunable_ids . remove ( keep_version )
except ValueError :
pass
if not prunable_ids :
return
base_version_ids = self . _find_base_version_ids ( symbol , prunable_ids )
version_ids = list ( set ( prunable_ids ) - set ( base_version_ids ) )
if not version_ids :
return
# Delete the version documents
mongo_retry ( self . _versions . delete_many ) ( { '_id' : { '$in' : version_ids } } )
prunable_ids_to_shas = { k : prunable_ids_to_shas [ k ] for k in version_ids }
# The new version has not been written yet , so make sure that any SHAs pointed by it are preserved
shas_to_delete = [ sha for v in prunable_ids_to_shas . values ( ) for sha in v [ 0 ] if sha not in new_version_shas ]
# Cleanup any chunks
mongo_retry ( cleanup ) ( self . _arctic_lib , symbol , version_ids , self . _versions , shas_to_delete = shas_to_delete , pointers_cfgs = [ v [ 1 ] for v in prunable_ids_to_shas . values ( ) ] ) |
def value_to_db ( self , value ) :
"""Returns field ' s single value prepared for saving into a database .""" | assert isinstance ( value , datetime . datetime )
try :
value = value - datetime . datetime ( 1970 , 1 , 1 )
except OverflowError :
raise tldap . exceptions . ValidationError ( "is too big a date" )
value = value . seconds + value . days * 24 * 3600
value = str ( value ) . encode ( "utf_8" )
return value |
def modify ( self , * , sort = None , purge = False , done = None ) :
"""Calls Model . _ modifyInternal after loading the database .""" | return self . _modifyInternal ( sort = sort , purge = purge , done = done ) |
def load ( cls , config : Params , serialization_dir : str , weights_file : str = None , cuda_device : int = - 1 ) -> 'Model' :
"""Instantiates an already - trained model , based on the experiment
configuration and some optional overrides .
Parameters
config : Params
The configuration that was used to train the model . It should definitely
have a ` model ` section , and should probably have a ` trainer ` section
as well .
serialization _ dir : str = None
The directory containing the serialized weights , parameters , and vocabulary
of the model .
weights _ file : str = None
By default we load the weights from ` best . th ` in the serialization
directory , but you can override that value here .
cuda _ device : int = - 1
By default we load the model on the CPU , but if you want to load it
for GPU usage you can specify the id of your GPU here
Returns
model : Model
The model specified in the configuration , loaded with the serialized
vocabulary and the trained weights .""" | # Peak at the class of the model .
model_type = config [ "model" ] [ "type" ]
# Load using an overridable _ load method .
# This allows subclasses of Model to override _ load .
# pylint : disable = protected - access
return cls . by_name ( model_type ) . _load ( config , serialization_dir , weights_file , cuda_device ) |
def get_list_subtasks ( client , list_id , completed = False ) :
'''Gets subtasks for the list with given ID''' | params = { 'list_id' : int ( list_id ) , 'completed' : completed , }
response = client . authenticated_request ( client . api . Endpoints . SUBTASKS , params = params )
return response . json ( ) |
def _ReadAttributeValueString ( self , attribute_values_data , record_offset , attribute_values_data_offset , attribute_value_offset ) :
"""Reads a string attribute value .
Args :
attribute _ values _ data ( bytes ) : attribute values data .
record _ offset ( int ) : offset of the record relative to the start of
the file .
attribute _ values _ data _ offset ( int ) : offset of the attribute values data
relative to the start of the record .
attribute _ value _ offset ( int ) : offset of the attribute relative to
the start of the record .
Returns :
str : string value or None if attribute value offset is not set .
Raises :
ParseError : if the attribute value cannot be read .""" | if attribute_value_offset == 0 :
return None
data_type_map = self . _GetDataTypeMap ( 'keychain_string' )
file_offset = ( record_offset + attribute_values_data_offset + attribute_value_offset )
attribute_value_offset -= attribute_values_data_offset + 1
attribute_value_data = attribute_values_data [ attribute_value_offset : ]
try :
string_attribute_value = self . _ReadStructureFromByteStream ( attribute_value_data , file_offset , data_type_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . ParseError ( ( 'Unable to map string attribute value data at offset: 0x{0:08x} ' 'with error: {1!s}' ) . format ( file_offset , exception ) )
return string_attribute_value . string |
def commit ( self ) :
"""Commit a batch .""" | assert self . batch is not None , "No active batch, call start() first"
logger . debug ( "Comitting batch from %d sources..." , len ( self . batch ) )
# Determine item priority .
by_priority = [ ]
for name in self . batch . keys ( ) :
priority = self . priorities . get ( name , self . default_priority )
by_priority . append ( ( priority , name ) )
for priority , name in sorted ( by_priority , key = lambda key : key [ 0 ] ) :
logger . debug ( "Processing items from '%s' (priority=%d)..." , name , priority )
items = self . batch [ name ]
for handlers in items . values ( ) :
for agg , handler in handlers :
try :
if agg is None :
handler ( )
else :
handler ( agg )
except Exception as error : # Log errors and proceed to evaluate the next handler .
logger . exception ( "Error while invoking handler." )
self . batch = None
logger . debug ( "Batch committed." ) |
def is_adjacent_before ( self , other ) :
"""Return ` ` True ` ` if this time interval ends
when the given other time interval begins .
: param other : the other interval
: type other : : class : ` ~ aeneas . exacttiming . TimeInterval `
: raises TypeError : if ` ` other ` ` is not an instance of ` ` TimeInterval ` `
: rtype : bool""" | if not isinstance ( other , TimeInterval ) :
raise TypeError ( u"other is not an instance of TimeInterval" )
return ( self . end == other . begin ) |
def set_shortcut ( self , name , shortcut ) :
"""Sets given action shortcut .
: param name : Action to set the shortcut .
: type name : unicode
: param shortcut : Shortcut to set .
: type shortcut : unicode
: return : Method success .
: rtype : bool""" | name = self . __normalize_name ( name )
action = self . get_action ( name )
if not action :
return
action . setShortcut ( QKeySequence ( shortcut ) )
return True |
def email_addresses595 ( self , key , value ) :
"""Populates the ` ` email _ addresses ` ` field using the 595 MARCXML field .
Also populates ` ` _ private _ notes ` ` as a side effect .""" | emails = self . get ( 'email_addresses' , [ ] )
if value . get ( 'o' ) :
emails . append ( { 'value' : value . get ( 'o' ) , 'current' : False , 'hidden' : True , } )
if value . get ( 'm' ) :
emails . append ( { 'value' : value . get ( 'm' ) , 'current' : True , 'hidden' : True , } )
notes = self . get ( '_private_notes' , [ ] )
new_note = ( { 'source' : value . get ( '9' ) , 'value' : _private_note , } for _private_note in force_list ( value . get ( 'a' ) ) )
notes . extend ( new_note )
self [ '_private_notes' ] = notes
return emails |
def namedtuple_with_defaults ( typename , field_names , default_values = [ ] ) :
"""Create a namedtuple with default values
> > > Node = namedtuple _ with _ defaults ( ' Node ' , ' val left right ' )
> > > Node ( )
Node ( val = None , left = None , right = None )
> > > Node = namedtuple _ with _ defaults ( ' Node ' , ' val left right ' , [ 1 , 2 , 3 ] )
> > > Node ( )
Node ( val = 1 , left = 2 , right = 3)
> > > Node = namedtuple _ with _ defaults ( ' Node ' , ' val left right ' , { ' right ' : 7 } )
> > > Node ( )
Node ( val = None , left = None , right = 7)
> > > Node ( 4)
Node ( val = 4 , left = None , right = 7)""" | the_tuple = collections . namedtuple ( typename , field_names )
the_tuple . __new__ . __defaults__ = ( None , ) * len ( the_tuple . _fields )
if isinstance ( default_values , collections . Mapping ) :
prototype = the_tuple ( ** default_values )
else :
prototype = the_tuple ( * default_values )
the_tuple . __new__ . __defaults__ = tuple ( prototype )
return the_tuple |
def dasopr ( fname ) :
"""Open a DAS file for reading .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dasopr _ c . html
: param fname : Name of a DAS file to be opened .
: type fname : str
: return : Handle assigned to the opened DAS file .
: rtype : int""" | fname = stypes . stringToCharP ( fname )
handle = ctypes . c_int ( )
libspice . dasopr_c ( fname , ctypes . byref ( handle ) )
return handle . value |
def match_filters ( self , path ) :
"""Get filename and return True if file pass all filters and should be processed .
: param path : path to check .
: return : True if pass filters , false otherwise .""" | # indicate if all required filters were matched
all_required_match = True
# iterate over filters to match files
for filt , ftype in self . __filters : # handle " Required " filters :
if all_required_match and ftype == self . FilterType . Required and not filt . match ( path ) :
all_required_match = False
# handle " Include " filters :
elif ftype == self . FilterType . Include and filt . match ( path ) :
return True
# handle " Exclude " filters :
elif ftype == self . FilterType . Exclude and filt . match ( path ) :
return False
# if got here it means we processed all filters , and no include / exclude filter was matched .
# return if all required were matched
return all_required_match |
def as_wires ( val , bitwidth = None , truncating = True , block = None ) :
"""Return wires from val which may be wires , integers , strings , or bools .
: param val : a wirevector - like object or something that can be converted into
a Const
: param bitwidth : The bitwidth the resulting wire should be
: param bool truncating : determines whether bits will be dropped to achieve
the desired bitwidth if it is too long ( if true , the most - significant bits
will be dropped )
: param Block block : block to use for wire
This function is mainly used to coerce values into WireVectors ( for
example , operations such as " x + 1 " where " 1 " needs to be converted to
a Const WireVector ) . An example : : :
def myhardware ( input _ a , input _ b ) :
a = as _ wires ( input _ a )
b = as _ wires ( input _ b )
myhardware ( 3 , x )
The function as _ wires will covert the 3 to Const but keep ` x ` unchanged
assuming it is a WireVector .""" | from . memory import _MemIndexed
block = working_block ( block )
if isinstance ( val , ( int , six . string_types ) ) : # note that this case captures bool as well ( as bools are instances of ints )
return Const ( val , bitwidth = bitwidth , block = block )
elif isinstance ( val , _MemIndexed ) : # convert to a memory read when the value is actually used
if val . wire is None :
val . wire = as_wires ( val . mem . _readaccess ( val . index ) , bitwidth , truncating , block )
return val . wire
elif not isinstance ( val , WireVector ) :
raise PyrtlError ( 'error, expecting a wirevector, int, or verilog-style ' 'const string got %s instead' % repr ( val ) )
elif bitwidth == '0' :
raise PyrtlError ( 'error, bitwidth must be >= 1' )
elif val . bitwidth is None :
raise PyrtlError ( 'error, attempting to use wirevector with no defined bitwidth' )
elif bitwidth and bitwidth > val . bitwidth :
return val . zero_extended ( bitwidth )
elif bitwidth and truncating and bitwidth < val . bitwidth :
return val [ : bitwidth ]
# truncate the upper bits
else :
return val |
def _save_to ( self , im , path , format = None ) :
"""Save the image for testing .""" | format = format or im . format
if not format :
_ , format = splitext ( path )
format = format [ 1 : ]
im . format = format . lower ( )
im . save ( filename = path ) |
def app_remove ( name , ** kwargs ) :
"""Remove application from storage .
No error messages will display if specified application is not uploaded .""" | ctx = Context ( ** kwargs )
ctx . execute_action ( 'app:remove' , ** { 'storage' : ctx . repo . create_secure_service ( 'storage' ) , 'name' : name , } ) |
def get_dict ( * keys , ** extras ) :
"""Returns request dict of given keys .""" | _keys = ( 'url' , 'args' , 'form' , 'data' , 'origin' , 'headers' , 'files' , 'json' , 'method' )
assert all ( map ( _keys . __contains__ , keys ) )
data = request . data
form = semiflatten ( request . form )
try :
_json = json . loads ( data . decode ( 'utf-8' ) )
except ( ValueError , TypeError ) :
_json = None
d = dict ( url = get_url ( request ) , args = semiflatten ( request . args ) , form = form , data = json_safe ( data ) , origin = request . headers . get ( 'X-Forwarded-For' , request . remote_addr ) , headers = get_headers ( ) , files = get_files ( ) , json = _json , method = request . method , )
out_d = dict ( )
for key in keys :
out_d [ key ] = d . get ( key )
out_d . update ( extras )
return out_d |
def list_dataset_uris ( cls , base_uri , config_path ) :
"""Return list containing URIs in location given by base _ uri .""" | parsed_uri = generous_parse_uri ( base_uri )
uri_list = [ ]
path = parsed_uri . path
if IS_WINDOWS :
path = unix_to_windows_path ( parsed_uri . path , parsed_uri . netloc )
for d in os . listdir ( path ) :
dir_path = os . path . join ( path , d )
if not os . path . isdir ( dir_path ) :
continue
storage_broker = cls ( dir_path , config_path )
if not storage_broker . has_admin_metadata ( ) :
continue
uri = storage_broker . generate_uri ( name = d , uuid = None , base_uri = base_uri )
uri_list . append ( uri )
return uri_list |
def requestSubsystem ( self , subsystem ) :
"""Request a subsystem and return a deferred reply .""" | data = common . NS ( subsystem )
return self . sendRequest ( 'subsystem' , data , wantReply = True ) |
def weighted_maximum_cut ( G , sampler = None , ** sampler_args ) :
"""Returns an approximate weighted maximum cut .
Defines an Ising problem with ground states corresponding to
a weighted maximum cut and uses the sampler to sample from it .
A weighted maximum cut is a subset S of the vertices of G that
maximizes the sum of the edge weights between S and its
complementary subset .
Parameters
G : NetworkX graph
The graph on which to find a weighted maximum cut . Each edge in G should
have a numeric ` weight ` attribute .
sampler
A binary quadratic model sampler . A sampler is a process that
samples from low energy states in models defined by an Ising
equation or a Quadratic Unconstrained Binary Optimization
Problem ( QUBO ) . A sampler is expected to have a ' sample _ qubo '
and ' sample _ ising ' method . A sampler is expected to return an
iterable of samples , in order of increasing energy . If no
sampler is provided , one must be provided using the
` set _ default _ sampler ` function .
sampler _ args
Additional keyword parameters are passed to the sampler .
Returns
S : set
A maximum cut of G .
Example
This example uses a sampler from
` dimod < https : / / github . com / dwavesystems / dimod > ` _ to find a weighted maximum
cut for a graph of a Chimera unit cell . The graph is created using the
` chimera _ graph ( ) ` function with weights added to all its edges such that
those incident to nodes { 6 , 7 } have weight - 1 while the others are + 1 . A
weighted maximum cut should cut as many of the latter and few of the former
as possible .
> > > import dimod
> > > import dwave _ networkx as dnx
> > > samplerSA = dimod . SimulatedAnnealingSampler ( )
> > > G = dnx . chimera _ graph ( 1 , 1 , 4)
> > > for u , v in G . edges :
. . . . : if ( u > = 6 ) | ( v > = 6 ) :
. . . . : G [ u ] [ v ] [ ' weight ' ] = - 1
. . . . : else : G [ u ] [ v ] [ ' weight ' ] = 1
> > > dnx . weighted _ maximum _ cut ( G , samplerSA )
{4 , 5}
Notes
Samplers by their nature may not return the optimal solution . This
function does not attempt to confirm the quality of the returned
sample .""" | # In order to form the Ising problem , we want to increase the
# energy by 1 for each edge between two nodes of the same color .
# The linear biases can all be 0.
h = { v : 0. for v in G }
try :
J = { ( u , v ) : G [ u ] [ v ] [ 'weight' ] for u , v in G . edges }
except KeyError :
raise DWaveNetworkXException ( "edges must have 'weight' attribute" )
# draw the lowest energy sample from the sampler
response = sampler . sample_ising ( h , J , ** sampler_args )
sample = next ( iter ( response ) )
return set ( v for v in G if sample [ v ] >= 0 ) |
def _module_to_generators ( pb_module ) :
'''Convert a protobuf module to a dict of generators .
This is typically used with modules that contain multiple type definitions .''' | if not pb_module :
return None
message_types = pb_module . DESCRIPTOR . message_types_by_name
return { k : ProtobufGenerator ( v ) for k , v in message_types . items ( ) } |
def plot_feature_histograms ( xyzall , feature_labels = None , ax = None , ylog = False , outfile = None , n_bins = 50 , ignore_dim_warning = False , ** kwargs ) :
r"""Feature histogram plot
Parameters
xyzall : np . ndarray ( T , d )
( Concatenated list of ) input features ; containing time series data to be plotted .
Array of T data points in d dimensions ( features ) .
feature _ labels : iterable of str or pyemma . Featurizer , optional , default = None
Labels of histogramed features , defaults to feature index .
ax : matplotlib . Axes object , optional , default = None .
The ax to plot to ; if ax = None , a new ax ( and fig ) is created .
ylog : boolean , default = False
If True , plot logarithm of histogram values .
n _ bins : int , default = 50
Number of bins the histogram uses .
outfile : str , default = None
If not None , saves plot to this file .
ignore _ dim _ warning : boolean , default = False
Enable plotting for more than 50 dimensions ( on your own risk ) .
* * kwargs : kwargs passed to pyplot . fill _ between . See the doc of pyplot for options .
Returns
fig : matplotlib . Figure object
The figure in which the used ax resides .
ax : matplotlib . Axes object
The ax in which the historams were plotted .""" | if not isinstance ( xyzall , _np . ndarray ) :
raise ValueError ( 'Input data hast to be a numpy array. Did you concatenate your data?' )
if xyzall . shape [ 1 ] > 50 and not ignore_dim_warning :
raise RuntimeError ( 'This function is only useful for less than 50 dimensions. Turn-off this warning ' 'at your own risk with ignore_dim_warning=True.' )
if feature_labels is not None :
if not isinstance ( feature_labels , list ) :
from pyemma . coordinates . data . featurization . featurizer import MDFeaturizer as _MDFeaturizer
if isinstance ( feature_labels , _MDFeaturizer ) :
feature_labels = feature_labels . describe ( )
else :
raise ValueError ( 'feature_labels must be a list of feature labels, ' 'a pyemma featurizer object or None.' )
if not xyzall . shape [ 1 ] == len ( feature_labels ) :
raise ValueError ( 'feature_labels must have the same dimension as the input data xyzall.' )
# make nice plots if user does not decide on color and transparency
if 'color' not in kwargs . keys ( ) :
kwargs [ 'color' ] = 'b'
if 'alpha' not in kwargs . keys ( ) :
kwargs [ 'alpha' ] = .25
import matplotlib . pyplot as _plt
# check input
if ax is None :
fig , ax = _plt . subplots ( )
else :
fig = ax . get_figure ( )
hist_offset = - .2
for h , coordinate in enumerate ( reversed ( xyzall . T ) ) :
hist , edges = _np . histogram ( coordinate , bins = n_bins )
if not ylog :
y = hist / hist . max ( )
else :
y = _np . zeros_like ( hist ) + _np . NaN
pos_idx = hist > 0
y [ pos_idx ] = _np . log ( hist [ pos_idx ] ) / _np . log ( hist [ pos_idx ] ) . max ( )
ax . fill_between ( edges [ : - 1 ] , y + h + hist_offset , y2 = h + hist_offset , ** kwargs )
ax . axhline ( y = h + hist_offset , xmin = 0 , xmax = 1 , color = 'k' , linewidth = .2 )
ax . set_ylim ( hist_offset , h + hist_offset + 1 )
# formatting
if feature_labels is None :
feature_labels = [ str ( n ) for n in range ( xyzall . shape [ 1 ] ) ]
ax . set_ylabel ( 'Feature histograms' )
ax . set_yticks ( _np . array ( range ( len ( feature_labels ) ) ) + .3 )
ax . set_yticklabels ( feature_labels [ : : - 1 ] )
ax . set_xlabel ( 'Feature values' )
# save
if outfile is not None :
fig . savefig ( outfile )
return fig , ax |
def angular_rate ( self ) :
"""Return the angular rate for every axis in degree / second .
: returns : Angular rate for every axis as a tuple
: Example :
> > > sensor = MPU6050I2C ( gw )
> > > sensor . wakeup ( )
> > > sensor . angular _ rate ( )
(1.380859375 , 1.6318359375 , 1.8828125)""" | if not self . awake :
raise Exception ( "MPU6050 is in sleep mode, use wakeup()" )
raw = self . i2c_read_register ( 0x43 , 6 )
x , y , z = struct . unpack ( '>HHH' , raw )
scales = { self . RANGE_GYRO_250DEG : 16384 , self . RANGE_GYRO_500DEG : 8192 , self . RANGE_GYRO_1000DEG : 4096 , self . RANGE_GYRO_2000DEG : 2048 }
scale = scales [ self . gyro_range ]
return x / scale , y / scale , z / scale |
def validate ( template_dict , schema = None ) :
"""Is this a valid SAM template dictionary
: param dict template _ dict : Data to be validated
: param dict schema : Optional , dictionary containing JSON Schema representing SAM template
: return : Empty string if there are no validation errors in template""" | if not schema :
schema = SamTemplateValidator . _read_schema ( )
validation_errors = ""
try :
jsonschema . validate ( template_dict , schema )
except ValidationError as ex : # Stringifying the exception will give us useful error message
validation_errors = str ( ex )
# Swallowing expected exception here as our caller is expecting validation errors and
# not the valiation exception itself
pass
return validation_errors |
def add_sibling ( self , pos = None , arc_element_type = None , description = None , story_element_node = None , ** kwargs ) :
'''Overrides the default ` treebeard ` function , adding additional integrity checks .''' | if 'mile' in arc_element_type :
if self . get_depth ( ) == 1 :
raise ArcGenerationError ( 'Milestones are invalid to be the root' )
nodes_to_check = self . get_root ( ) . get_descendants ( ) . filter ( arc_element_type__icontains = 'mile' )
for node in nodes_to_check :
if node . arc_element_type == arc_element_type :
raise ArcIntegrityError ( 'You cannot have two of the same milestone in the same arc.' )
return super ( ) . add_sibling ( pos = pos , arc = self . arc , arc_element_type = arc_element_type , description = description , story_element_node = story_element_node ) |
def downloader ( self ) :
'''the download thread''' | while self . tiles_pending ( ) > 0 :
time . sleep ( self . tile_delay )
keys = sorted ( self . _download_pending . keys ( ) )
# work out which one to download next , choosing by request _ time
tile_info = self . _download_pending [ keys [ 0 ] ]
for key in keys :
if self . _download_pending [ key ] . request_time > tile_info . request_time :
tile_info = self . _download_pending [ key ]
url = tile_info . url ( self . service )
path = self . tile_to_path ( tile_info )
key = tile_info . key ( )
try :
if self . debug :
print ( "Downloading %s [%u left]" % ( url , len ( keys ) ) )
req = url_request ( url )
if url . find ( 'google' ) != - 1 :
req . add_header ( 'Referer' , 'https://maps.google.com/' )
resp = url_open ( req )
headers = resp . info ( )
except url_error as e : # print ( ' Error loading % s ' % url )
if not key in self . _tile_cache :
self . _tile_cache [ key ] = self . _unavailable
self . _download_pending . pop ( key )
if self . debug :
print ( "Failed %s: %s" % ( url , str ( e ) ) )
continue
if 'content-type' not in headers or headers [ 'content-type' ] . find ( 'image' ) == - 1 :
if not key in self . _tile_cache :
self . _tile_cache [ key ] = self . _unavailable
self . _download_pending . pop ( key )
if self . debug :
print ( "non-image response %s" % url )
continue
else :
img = resp . read ( )
# see if its a blank / unavailable tile
md5 = hashlib . md5 ( img ) . hexdigest ( )
if md5 in BLANK_TILES :
if self . debug :
print ( "blank tile %s" % url )
if not key in self . _tile_cache :
self . _tile_cache [ key ] = self . _unavailable
self . _download_pending . pop ( key )
continue
mp_util . mkdir_p ( os . path . dirname ( path ) )
h = open ( path + '.tmp' , 'wb' )
h . write ( img )
h . close ( )
try :
os . unlink ( path )
except Exception :
pass
os . rename ( path + '.tmp' , path )
self . _download_pending . pop ( key )
self . _download_thread = None |
def demeshgrid ( arr ) :
"""Turns an ndarray created by a meshgrid back into a 1D array
Parameters
arr : array of dimension > 1
This array should have been created by a meshgrid .""" | dim = len ( arr . shape )
for i in range ( dim ) :
Slice1 = [ 0 ] * dim
Slice2 = [ 1 ] * dim
Slice1 [ i ] = slice ( None )
Slice2 [ i ] = slice ( None )
if ( arr [ tuple ( Slice1 ) ] == arr [ tuple ( Slice2 ) ] ) . all ( ) :
return arr [ tuple ( Slice1 ) ] |
def volume ( self ) :
"""Volume of the simplex .""" | return abs ( np . linalg . det ( self . _aug ) ) / math . factorial ( self . simplex_dim ) |
def _validate_class ( self , cl ) :
"""return error if class ` cl ` is not found in the ontology""" | if cl not in self . schema_def . attributes_by_class :
search_string = self . _build_search_string ( cl )
err = self . err ( "{0} - invalid class" , self . _field_name_from_uri ( cl ) , search_string = search_string )
return ValidationWarning ( ValidationResult . ERROR , err [ 'err' ] , err [ 'line' ] , err [ 'num' ] ) |
def _get_relative_base_path ( filename , path_to_check ) :
"""Extracts the relative mod path of the file to import from
Check if a file is within the passed in path and if so , returns the
relative mod path from the one passed in .
If the filename is no in path _ to _ check , returns None
Note this function will look for both abs and realpath of the file ,
this allows to find the relative base path even if the file is a
symlink of a file in the passed in path
Examples :
_ get _ relative _ base _ path ( " / a / b / c / d . py " , " / a / b " ) - > [ " c " , " d " ]
_ get _ relative _ base _ path ( " / a / b / c / d . py " , " / dev " ) - > None""" | importable_path = None
path_to_check = os . path . normcase ( path_to_check )
abs_filename = os . path . abspath ( filename )
if os . path . normcase ( abs_filename ) . startswith ( path_to_check ) :
importable_path = abs_filename
real_filename = os . path . realpath ( filename )
if os . path . normcase ( real_filename ) . startswith ( path_to_check ) :
importable_path = real_filename
if importable_path :
base_path = os . path . splitext ( importable_path ) [ 0 ]
relative_base_path = base_path [ len ( path_to_check ) : ]
return [ pkg for pkg in relative_base_path . split ( os . sep ) if pkg ]
return None |
def reprompt ( text = None , ssml = None , attributes = None ) :
"""Convenience method to save a little bit of typing for the common case of
reprompting the user . Simply calls : py : func : ` alexandra . util . respond ` with
the given arguments and holds the session open .
One of either the ` text ` or ` ssml ` should be provided if any
speech output is desired .
: param text : Plain text speech output
: param ssml : Speech output in SSML format
: param attributes : Dictionary of attributes to store in the current session""" | return respond ( reprompt_text = text , reprompt_ssml = ssml , attributes = attributes , end_session = False ) |
def writeInfoLine ( self , stream , fromUUID , size ) :
"""Write one line of diff information .""" | if size is None or fromUUID is None :
return
if not isinstance ( size , int ) :
logger . warning ( "Bad size: %s" , size )
return
stream . write ( str ( "%s\t%s\t%d\n" % ( self . uuid , fromUUID , size , ) ) ) |
def makedirs ( d ) :
"""Create directories recursively if they don ' t exist . os . makedirs ( exist _ ok = True ) is not
available in Python2""" | if sys . version_info [ 0 ] < 3 :
from distutils . dir_util import mkpath
mkpath ( d )
else :
os . makedirs ( d , exist_ok = True ) |
def get_s3_file_tree ( s3 , bucket , prefix ) :
"""Overcome s3 response limit and return NestedDict tree of paths .
The NestedDict object also allows the user to search by the ends of a path .
The tree mimics a file directory structure , with the leave nodes being the
full unbroken key . For example , ' path / to / file . txt ' would be retrieved by
ret [ ' path ' ] [ ' to ' ] [ ' file . txt ' ] [ ' key ' ]
The NestedDict object returned also has the capability to get paths that
lead to a certain value . So if you wanted all paths that lead to something
called ' file . txt ' , you could use
ret . get _ paths ( ' file . txt ' )
For more details , see the NestedDict docs .""" | def get_some_keys ( keys , marker = None ) :
if marker :
relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix , Marker = marker )
else :
relevant_files = s3 . list_objects ( Bucket = bucket , Prefix = prefix )
keys . extend ( [ entry [ 'Key' ] for entry in relevant_files [ 'Contents' ] if entry [ 'Key' ] != marker ] )
return relevant_files [ 'IsTruncated' ]
file_keys = [ ]
marker = None
while get_some_keys ( file_keys , marker ) :
marker = file_keys [ - 1 ]
file_tree = NestedDict ( )
pref_path = prefix . split ( '/' ) [ : - 1 ]
# avoid the trailing empty str .
for key in file_keys :
full_path = key . split ( '/' )
relevant_path = full_path [ len ( pref_path ) : ]
curr = file_tree
for step in relevant_path :
curr = curr [ step ]
curr [ 'key' ] = key
return file_tree |
def encrypted_json ( self ) :
"""Returns an encrypted json serialized from self .""" | json = serialize ( objects = [ self . instance ] )
encrypted_json = Cryptor ( ) . aes_encrypt ( json , LOCAL_MODE )
return encrypted_json |
def get_run_details_json ( self ) :
"""Retrieves the JSON object for the stats in the file named run _ details . json in the project
specified by self . dx _ project _ id .
Returns :
JSON object of the run details .""" | run_details_filename = "run_details.json"
run_details_json_id = dxpy . find_one_data_object ( more_ok = False , zero_ok = True , project = self . dx_project_id , name = run_details_filename ) [ "id" ]
json_data = json . loads ( dxpy . open_dxfile ( dxid = run_details_json_id ) . read ( ) )
# dxpy . download _ dxfile ( show _ progress = True , dxid = run _ details _ json _ id , project = self . dx _ project _ id , filename = output _ name )
return json_data |
def add_molecule ( self , molecule , atom_types = None , charges = None , split = True ) :
"""Add the graph of the molecule to the data structure
The molecular graph is estimated from the molecular geometry based on
interatomic distances .
Argument :
| ` ` molecule ` ` - - a Molecule instance
Optional arguments :
| ` ` atom _ types ` ` - - a list with atom type strings
| ` ` charges ` ` - - The net atom charges
| ` ` split ` ` - - When True , the molecule is split into disconnected
molecules [ default = True ]""" | molecular_graph = MolecularGraph . from_geometry ( molecule )
self . add_molecular_graph ( molecular_graph , atom_types , charges , split , molecule ) |
def create_postgresql_psycopg2cffi ( self , ** kwargs ) :
""": rtype : Engine""" | return self . _ce ( self . _ccs ( self . DialectAndDriver . psql_psycopg2cffi ) , ** kwargs ) |
def get_retention_policy ( database , name , ** client_args ) :
'''Get an existing retention policy .
database
Name of the database for which the retention policy was
defined .
name
Name of the retention policy .
CLI Example :
. . code - block : : bash
salt ' * ' influxdb . get _ retention _ policy metrics default''' | client = _client ( ** client_args )
try :
return next ( ( p for p in client . get_list_retention_policies ( database ) if p . get ( 'name' ) == name ) )
except StopIteration :
return { } |
def get_character ( self , id ) :
"""Fetches a single character by id .
get / v1 / public / characters
: param id : ID of Character
: type params : int
: returns : CharacterDataWrapper
> > > m = Marvel ( public _ key , private _ key )
> > > cdw = m . get _ character ( 1009718)
> > > print cdw . data . count
> > > print cdw . data . results [ 0 ] . name
Wolverine""" | url = "%s/%s" % ( Character . resource_url ( ) , id )
response = json . loads ( self . _call ( url ) . text )
return CharacterDataWrapper ( self , response ) |
def _methodInTraceback ( self , name , traceback ) :
'''Returns boolean whether traceback contains method from this instance''' | foundMethod = False
for frame in self . _frames ( traceback ) :
this = frame . f_locals . get ( 'self' )
if this is self and frame . f_code . co_name == name :
foundMethod = True
break
return foundMethod |
def run_index_cmd ( name , cmd ) :
"""Run command , show errors if the returncode is non - zero .""" | sys . stderr . write ( "Creating {} index...\n" . format ( name ) )
# Create index
p = sp . Popen ( cmd , shell = True , stdout = sp . PIPE , stderr = sp . PIPE )
stdout , stderr = p . communicate ( )
if p . returncode != 0 :
sys . stderr . write ( "Index for {} failed\n" . format ( name ) )
sys . stderr . write ( stdout )
sys . stderr . write ( stderr ) |
def setSortData ( self , column , data ) :
"""Sets the sorting information for the inputed column to the given data .
: param column | < int >
data | < variant >""" | self . setData ( column , self . SortRole , wrapVariant ( data ) ) |
def _parse_udf_file_entry ( self , abs_file_entry_extent , icb , parent ) : # type : ( int , udfmod . UDFLongAD , Optional [ udfmod . UDFFileEntry ] ) - > Optional [ udfmod . UDFFileEntry ]
'''An internal method to parse a single UDF File Entry and return the
corresponding object .
Parameters :
part _ start - The extent number the partition starts at .
icb - The ICB object for the data .
parent - The parent of the UDF File Entry .
Returns :
A UDF File Entry object corresponding to the on - disk File Entry .''' | self . _seek_to_extent ( abs_file_entry_extent )
icbdata = self . _cdfp . read ( icb . extent_length )
if all ( v == 0 for v in bytearray ( icbdata ) ) : # We have seen ISOs in the wild ( Windows 2008 Datacenter Enterprise
# Standard SP2 x86 DVD ) where the UDF File Identifier points to a
# UDF File Entry of all zeros . In those cases , we just keep the
# File Identifier , and keep the UDF File Entry blank .
return None
desc_tag = udfmod . UDFTag ( )
desc_tag . parse ( icbdata , icb . log_block_num )
if desc_tag . tag_ident != 261 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF File Entry Tag identifier not 261' )
file_entry = udfmod . UDFFileEntry ( )
file_entry . parse ( icbdata , abs_file_entry_extent , parent , desc_tag )
return file_entry |
async def observer_evaluate ( self , message ) :
"""Execute observer evaluation on the worker or throttle .""" | observer_id = message [ 'observer' ]
throttle_rate = get_queryobserver_settings ( ) [ 'throttle_rate' ]
if throttle_rate <= 0 :
await self . _evaluate ( observer_id )
return
cache_key = throttle_cache_key ( observer_id )
try :
count = cache . incr ( cache_key )
# Ignore if delayed observer already scheduled .
if count == 2 :
await self . channel_layer . send ( CHANNEL_MAIN , { 'type' : TYPE_POLL , 'observer' : observer_id , 'interval' : throttle_rate , } , )
except ValueError :
count = cache . get_or_set ( cache_key , default = 1 , timeout = throttle_rate )
# Ignore if cache was set and increased in another thread .
if count == 1 :
await self . _evaluate ( observer_id ) |
def present ( name , profile = "github" , ** kwargs ) :
'''Ensure a user is present
. . code - block : : yaml
ensure user test is present in github :
github . present :
- name : ' gitexample '
The following parameters are required :
name
This is the github handle of the user in the organization''' | ret = { 'name' : name , 'changes' : { } , 'result' : None , 'comment' : '' }
target = __salt__ [ 'github.get_user' ] ( name , profile = profile , ** kwargs )
# If the user has a valid github handle and is not in the org already
if not target :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Couldnt find user {0}' . format ( name )
elif isinstance ( target , bool ) and target :
ret [ 'comment' ] = 'User {0} is already in the org ' . format ( name )
ret [ 'result' ] = True
elif not target . get ( 'in_org' , False ) and target . get ( 'membership_state' ) != 'pending' :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'User {0} will be added to the org' . format ( name )
return ret
# add the user
result = __salt__ [ 'github.add_user' ] ( name , profile = profile , ** kwargs )
if result :
ret [ 'changes' ] . setdefault ( 'old' , None )
ret [ 'changes' ] . setdefault ( 'new' , 'User {0} exists in the org now' . format ( name ) )
ret [ 'result' ] = True
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to add user {0} to the org' . format ( name )
else :
ret [ 'comment' ] = 'User {0} has already been invited.' . format ( name )
ret [ 'result' ] = True
return ret |
def set_goterm ( self , go2obj ) :
"""Set goterm and copy GOTerm ' s name and namespace .""" | if self . GO in go2obj :
goterm = go2obj [ self . GO ]
self . goterm = goterm
self . name = goterm . name
self . depth = goterm . depth
self . NS = self . namespace2NS [ self . goterm . namespace ] |
def _set_fan ( self , v , load = False ) :
"""Setter method for fan , mapped from YANG variable / system _ monitor / fan ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ fan is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ fan ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = fan . fan , is_container = 'container' , presence = False , yang_name = "fan" , rest_name = "fan" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure threshold and alert setting for \n component:FAN' , u'cli-incomplete-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-system-monitor' , defining_module = 'brocade-system-monitor' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """fan must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=fan.fan, is_container='container', presence=False, yang_name="fan", rest_name="fan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure threshold and alert setting for \n component:FAN', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-system-monitor', defining_module='brocade-system-monitor', yang_type='container', is_config=True)""" , } )
self . __fan = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def setup ( self , app ) :
"""Initialize the plugin .""" | super ( ) . setup ( app )
if self . cfg . secret == 'InsecureSecret' :
app . logger . warning ( 'Use insecure secret key. Change SESSION_SECRET option in configuration.' )
self . _user_loader = asyncio . coroutine ( lambda id_ : id_ )
# noqa
app . on_response_prepare . append ( self . save )
if self . cfg . auto_load :
app . middlewares . append ( self . _middleware ) |
def argv ( self ) :
"""Command to start kernels""" | # Python interpreter used to start kernels
if CONF . get ( 'main_interpreter' , 'default' ) :
pyexec = get_python_executable ( )
else : # Avoid IPython adding the virtualenv on which Spyder is running
# to the kernel sys . path
os . environ . pop ( 'VIRTUAL_ENV' , None )
pyexec = CONF . get ( 'main_interpreter' , 'executable' )
if not is_python_interpreter ( pyexec ) :
pyexec = get_python_executable ( )
CONF . set ( 'main_interpreter' , 'executable' , '' )
CONF . set ( 'main_interpreter' , 'default' , True )
CONF . set ( 'main_interpreter' , 'custom' , False )
# Fixes Issue # 3427
if os . name == 'nt' :
dir_pyexec = osp . dirname ( pyexec )
pyexec_w = osp . join ( dir_pyexec , 'pythonw.exe' )
if osp . isfile ( pyexec_w ) :
pyexec = pyexec_w
# Command used to start kernels
kernel_cmd = [ pyexec , '-m' , 'spyder_kernels.console' , '-f' , '{connection_file}' ]
return kernel_cmd |
def to_modify ( self , uid ) :
'''Try to modify the page .''' | kwd = { 'pager' : '' , }
self . render ( 'wiki_page/page_edit.html' , postinfo = MWiki . get_by_uid ( uid ) , kwd = kwd , cfg = CMS_CFG , userinfo = self . userinfo ) |
def set ( self , name , value , ex = None , px = None , nx = False , xx = False ) :
"""设置值 , 不存在则创建 , 存在则修改
: param name : key
: param value : value
: param ex : 过期时间 ( 秒 )
: param px : 过期时间 ( 毫秒 )
: param nx : 如果设置为True , 则只有name不存在时 , 当前的set操作才执行
: param xx : 如果设置为True , 则只有nmae存在时 , 当前的set操作才执行""" | self . client . set ( name = name , value = value , px = px , nx = nx , xx = xx ) |
def check_directory_paths ( self , * args ) :
"""Ensure all arguments correspond to directories""" | for path in enumerate ( args ) :
path = path [ 1 ]
if path is not None :
try :
self . check_directory_path ( path )
except OSError as ex :
logger . warn ( ex )
raise
return args |
def _get_tau_vector ( self , tau_mean , tau_std , imt_list ) :
"""Gets the vector of mean and variance of tau values corresponding to
the specific model and returns them as dictionaries""" | self . magnitude_limits = MAG_LIMS_KEYS [ self . tau_model ] [ "mag" ]
self . tau_keys = MAG_LIMS_KEYS [ self . tau_model ] [ "keys" ]
t_bar = { }
t_std = { }
for imt in imt_list :
t_bar [ imt ] = [ ]
t_std [ imt ] = [ ]
for mag , key in zip ( self . magnitude_limits , self . tau_keys ) :
t_bar [ imt ] . append ( TAU_EXECUTION [ self . tau_model ] ( imt , mag , tau_mean ) )
t_std [ imt ] . append ( TAU_EXECUTION [ self . tau_model ] ( imt , mag , tau_std ) )
return t_bar , t_std |
def __gen_rel_anno_file ( self , top_level_layer ) :
"""A rel annotation file contains edge ( rel )
attributes . It is e . g . used to annotate the type of a dependency
relation ( subj , obj etc . ) .
See also : _ _ gen _ hierarchy _ file ( )""" | paula_id = '{0}.{1}.{2}_{3}_rel' . format ( top_level_layer , self . corpus_name , self . name , top_level_layer )
E , tree = gen_paula_etree ( paula_id )
dominance_edges = select_edges_by ( self . dg , layer = top_level_layer , edge_type = EdgeTypes . dominance_relation , data = True )
dominance_dict = defaultdict ( lambda : defaultdict ( str ) )
for source_id , target_id , edge_attrs in dominance_edges :
if source_id != top_level_layer + ':root_node' :
dominance_dict [ source_id ] [ target_id ] = edge_attrs
base_paula_id = self . paulamap [ 'hierarchy' ] [ top_level_layer ]
mflist = E ( 'multiFeatList' , { XMLBASE : base_paula_id + '.xml' } )
for source_id in dominance_dict :
for target_id in dominance_dict [ source_id ] :
rel_href = '#rel_{0}_{1}' . format ( source_id , target_id )
mfeat = E ( 'multiFeat' , { XLINKHREF : rel_href } )
edge_attrs = dominance_dict [ source_id ] [ target_id ]
for edge_attr in edge_attrs :
if edge_attr not in IGNORED_EDGE_ATTRIBS :
mfeat . append ( E ( 'feat' , { 'name' : edge_attr , 'value' : edge_attrs [ edge_attr ] } ) )
if self . human_readable : # adds edge label as a < ! - - comment - - >
source_label = self . dg . node [ source_id ] . get ( 'label' )
target_label = self . dg . node [ target_id ] . get ( 'label' )
mfeat . append ( Comment ( u'{0} - {1}' . format ( source_label , target_label ) ) )
mflist . append ( mfeat )
tree . append ( mflist )
self . files [ paula_id ] = tree
self . file2dtd [ paula_id ] = PaulaDTDs . multifeat
return paula_id |
def get_model ( self , model = None , ** kwargs ) :
"""Filter in the ' model ' context
: parameter str model : name of the model ( optional )
: parameter * * kwargs : any other tags to do the filter
( except model or context )
: return : : class : ` phoebe . parameters . parameters . ParameterSet `""" | if model is not None :
kwargs [ 'model' ] = model
kwargs [ 'context' ] = 'model'
return self . filter ( ** kwargs ) |
def node_copy ( node , nodefactory = Node ) :
"""Make a deep copy of the node""" | return nodefactory ( node . tag , node . attrib . copy ( ) , node . text , [ node_copy ( n , nodefactory ) for n in node ] ) |
def rectify ( self , slitlet2d , resampling , inverse = False ) :
"""Rectify slitlet using computed transformation .
Parameters
slitlet2d : numpy array
Image containing the 2d slitlet image .
resampling : int
1 : nearest neighbour , 2 : flux preserving interpolation .
inverse : bool
If true , the inverse rectification transformation is
employed .
Returns
slitlet2d _ rect : numpy array
Rectified slitlet image .""" | if resampling not in [ 1 , 2 ] :
raise ValueError ( "Unexpected resampling value=" + str ( resampling ) )
# check image dimension
naxis2 , naxis1 = slitlet2d . shape
if naxis1 != self . bb_nc2_orig - self . bb_nc1_orig + 1 :
raise ValueError ( "Unexpected slitlet2d_rect naxis1" )
if naxis2 != self . bb_ns2_orig - self . bb_ns1_orig + 1 :
raise ValueError ( "Unexpected slitlet2d_rect naxis2" )
if inverse :
aij = self . tti_aij
bij = self . tti_bij
else :
aij = self . ttd_aij
bij = self . ttd_bij
# rectify image
slitlet2d_rect = rectify2d ( image2d = slitlet2d , aij = aij , bij = bij , resampling = resampling )
if abs ( self . debugplot % 10 ) != 0 :
if inverse :
self . ximshow_unrectified ( slitlet2d_rect )
else :
self . ximshow_rectified ( slitlet2d_rect )
return slitlet2d_rect |
def kill_all_processes ( self , check_alive = True , allow_graceful = False ) :
"""Kill all of the processes .
Note that This is slower than necessary because it calls kill , wait ,
kill , wait , . . . instead of kill , kill , . . . , wait , wait , . . .
Args :
check _ alive ( bool ) : Raise an exception if any of the processes were
already dead .""" | # Kill the raylet first . This is important for suppressing errors at
# shutdown because we give the raylet a chance to exit gracefully and
# clean up its child worker processes . If we were to kill the plasma
# store ( or Redis ) first , that could cause the raylet to exit
# ungracefully , leading to more verbose output from the workers .
if ray_constants . PROCESS_TYPE_RAYLET in self . all_processes :
self . _kill_process_type ( ray_constants . PROCESS_TYPE_RAYLET , check_alive = check_alive , allow_graceful = allow_graceful )
# We call " list " to copy the keys because we are modifying the
# dictionary while iterating over it .
for process_type in list ( self . all_processes . keys ( ) ) :
self . _kill_process_type ( process_type , check_alive = check_alive , allow_graceful = allow_graceful ) |
def unpack_values ( format_string , stream , verifier = None ) :
"""Helper function to unpack struct data from a stream and update the signature verifier .
: param str format _ string : Struct format string
: param stream : Source data stream
: type stream : io . BytesIO
: param verifier : Signature verifier object
: type verifier : aws _ encryption _ sdk . internal . crypto . Verifier
: returns : Unpacked values
: rtype : tuple""" | try :
message_bytes = stream . read ( struct . calcsize ( format_string ) )
if verifier :
verifier . update ( message_bytes )
values = struct . unpack ( format_string , message_bytes )
except struct . error as error :
raise SerializationError ( "Unexpected deserialization error" , type ( error ) , error . args )
return values |
def cmu_mocap_49_balance ( data_set = 'cmu_mocap' ) :
"""Load CMU subject 49 ' s one legged balancing motion that was used by Alvarez , Luengo and Lawrence at AISTATS 2009.""" | train_motions = [ '18' , '19' ]
test_motions = [ '20' ]
data = cmu_mocap ( '49' , train_motions , test_motions , sample_every = 4 , data_set = data_set )
data [ 'info' ] = "One legged balancing motions from CMU data base subject 49. As used in Alvarez, Luengo and Lawrence at AISTATS 2009. It consists of " + data [ 'info' ]
return data |
def run ( self , args ) :
"""Gives user permission based on auth _ role arg and sends email to that user .
: param args Namespace arguments parsed from the command line""" | email = args . email
# email of person to send email to
username = args . username
# username of person to send email to , will be None if email is specified
force_send = args . resend
# is this a resend so we should force sending
auth_role = args . auth_role
# authorization role ( project permissions ) to give to the user
msg_file = args . msg_file
# message file who ' s contents will be sent with the share
message = read_argument_file_contents ( msg_file )
print ( "Sharing project." )
to_user = self . remote_store . lookup_or_register_user_by_email_or_username ( email , username )
try :
project = self . fetch_project ( args , must_exist = True , include_children = False )
dest_email = self . service . share ( project , to_user , force_send , auth_role , message )
print ( "Share email message sent to " + dest_email )
except D4S2Error as ex :
if ex . warning :
print ( ex . message )
else :
raise |
def construct_polar_mesh_for_colormesh ( r_values , theta_values ) :
"""Returns polar mesh for matplotlib . pyplot . pcolormesh ( ) in Cartesian coordinates
polar coordinates of data points - > polar mesh for colormesh
polar coordinates is assumed to be equidistanced in a sense that
the r _ values and theta _ values are assumed to be equally - spaced .""" | mesh_R , mesh_Theta = augment_polar_mesh_for_colormesh ( r_values , theta_values )
mesh_X = mesh_R * np . cos ( mesh_Theta )
mesh_Y = mesh_R * np . sin ( mesh_Theta )
return mesh_X , mesh_Y |
def filter_by ( cls , ** kwargs ) :
"""Same as SQLAlchemy ' s filter _ by . Additionally this accepts
two special keyword arguments ` limit ` and ` reverse ` for limiting
the results and reversing the order respectively .
Args :
* * kwargs : filter parameters
Examples :
> > > user = User . filter _ by ( email = " new @ x . com " )
> > > shipments = Shipment . filter _ by ( country = " India " , limit = 3 , reverse = True )""" | limit = kwargs . pop ( 'limit' , None )
reverse = kwargs . pop ( 'reverse' , False )
q = cls . query . filter_by ( ** kwargs )
if reverse :
q = q . order_by ( cls . id . desc ( ) )
if limit :
q = q . limit ( limit )
return q |
def flush ( self ) :
"""更新会话信息 , 主要是ck , user _ alias""" | if 'dbcl2' not in self . cookies :
return
r = self . req ( API_ACCOUNT_HOME )
if RE_SESSION_EXPIRE . search ( r . url ) :
return self . expire ( )
self . cookies . update ( dict ( r . cookies ) )
self . user_alias = slash_right ( r . url )
self . logger . debug ( 'flush with user_alias <%s>' % self . user_alias )
return |
def get_relationship_form ( self , * args , ** kwargs ) :
"""Pass through to provider RelationshipAdminSession . get _ relationship _ form _ for _ update""" | # Implemented from kitosid template for -
# osid . resource . ResourceAdminSession . get _ resource _ form _ for _ update
# This method might be a bit sketchy . Time will tell .
if isinstance ( args [ - 1 ] , list ) or 'relationship_record_types' in kwargs :
return self . get_relationship_form_for_create ( * args , ** kwargs )
else :
return self . get_relationship_form_for_update ( * args , ** kwargs ) |
def newReference ( self , name ) :
"""Creation of a new reference node .""" | ret = libxml2mod . xmlNewReference ( self . _o , name )
if ret is None :
raise treeError ( 'xmlNewReference() failed' )
__tmp = xmlNode ( _obj = ret )
return __tmp |
def hash_files ( method , * files ) :
"""Calculate the hexadecimal digest of one or more local files .
: param method : The hash method ( a string , given to : func : ` hashlib . new ( ) ` ) .
: param files : The pathname ( s ) of file ( s ) to hash ( zero or more strings ) .
: returns : The calculated hex digest ( a string ) .""" | context = hashlib . new ( method )
for filename in files :
with open ( filename , 'rb' ) as handle :
while True :
chunk = handle . read ( 4096 )
if not chunk :
break
context . update ( chunk )
return context . hexdigest ( ) |
def derivative ( self , x ) :
"""Return the derivative at ` ` x ` ` .""" | if self . is_linear :
return self
else :
left = self . right ( x ) * self . left . derivative ( x )
right = self . left ( x ) * self . right . derivative ( x )
return left + right |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.