signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def predict ( self , data , num_iteration = - 1 , raw_score = False , pred_leaf = False , pred_contrib = False , data_has_header = False , is_reshape = True ) :
"""Predict logic .
Parameters
data : string , numpy array , pandas DataFrame , H2O DataTable ' s Frame or scipy . sparse
Data source for prediction .
When data type is string , it represents the path of txt file .
num _ iteration : int , optional ( default = - 1)
Iteration used for prediction .
raw _ score : bool , optional ( default = False )
Whether to predict raw scores .
pred _ leaf : bool , optional ( default = False )
Whether to predict leaf index .
pred _ contrib : bool , optional ( default = False )
Whether to predict feature contributions .
data _ has _ header : bool , optional ( default = False )
Whether data has header .
Used only for txt data .
is _ reshape : bool , optional ( default = True )
Whether to reshape to ( nrow , ncol ) .
Returns
result : numpy array
Prediction result .""" | if isinstance ( data , Dataset ) :
raise TypeError ( "Cannot use Dataset instance for prediction, please use raw data instead" )
data = _data_from_pandas ( data , None , None , self . pandas_categorical ) [ 0 ]
predict_type = C_API_PREDICT_NORMAL
if raw_score :
predict_type = C_API_PREDICT_RAW_SCORE
if pred_leaf :
predict_type = C_API_PREDICT_LEAF_INDEX
if pred_contrib :
predict_type = C_API_PREDICT_CONTRIB
int_data_has_header = 1 if data_has_header else 0
if num_iteration > self . num_total_iteration :
num_iteration = self . num_total_iteration
if isinstance ( data , string_type ) :
with _TempFile ( ) as f :
_safe_call ( _LIB . LGBM_BoosterPredictForFile ( self . handle , c_str ( data ) , ctypes . c_int ( int_data_has_header ) , ctypes . c_int ( predict_type ) , ctypes . c_int ( num_iteration ) , c_str ( self . pred_parameter ) , c_str ( f . name ) ) )
lines = f . readlines ( )
nrow = len ( lines )
preds = [ float ( token ) for line in lines for token in line . split ( '\t' ) ]
preds = np . array ( preds , dtype = np . float64 , copy = False )
elif isinstance ( data , scipy . sparse . csr_matrix ) :
preds , nrow = self . __pred_for_csr ( data , num_iteration , predict_type )
elif isinstance ( data , scipy . sparse . csc_matrix ) :
preds , nrow = self . __pred_for_csc ( data , num_iteration , predict_type )
elif isinstance ( data , np . ndarray ) :
preds , nrow = self . __pred_for_np2d ( data , num_iteration , predict_type )
elif isinstance ( data , list ) :
try :
data = np . array ( data )
except BaseException :
raise ValueError ( 'Cannot convert data list to numpy array.' )
preds , nrow = self . __pred_for_np2d ( data , num_iteration , predict_type )
elif isinstance ( data , DataTable ) :
preds , nrow = self . __pred_for_np2d ( data . to_numpy ( ) , num_iteration , predict_type )
else :
try :
warnings . warn ( 'Converting data to scipy sparse matrix.' )
csr = scipy . sparse . csr_matrix ( data )
except BaseException :
raise TypeError ( 'Cannot predict data for type {}' . format ( type ( data ) . __name__ ) )
preds , nrow = self . __pred_for_csr ( csr , num_iteration , predict_type )
if pred_leaf :
preds = preds . astype ( np . int32 )
if is_reshape and preds . size != nrow :
if preds . size % nrow == 0 :
preds = preds . reshape ( nrow , - 1 )
else :
raise ValueError ( 'Length of predict result (%d) cannot be divide nrow (%d)' % ( preds . size , nrow ) )
return preds |
def add ( self , name , value , bitmask = DEFMASK ) :
"""Add an enum member
Args :
name : Name of the member
value : value of the member
bitmask : bitmask . Only use if enum is a bitfield .""" | _add_enum_member ( self . _eid , name , value , bitmask ) |
def get_request_token ( self , method = 'GET' , decoder = parse_utf8_qsl , key_token = 'oauth_token' , key_token_secret = 'oauth_token_secret' , ** kwargs ) :
'''Return a request token pair .
: param method : A string representation of the HTTP method to be used ,
defaults to ` GET ` .
: type method : str
: param decoder : A function used to parse the Response content . Should
return a dictionary .
: type decoder : func
: param key _ token : The key the access token will be decoded by , defaults
to ' oauth _ token ' .
: type string :
: param key _ token _ secret : The key the access token will be decoded by ,
defaults to ' oauth _ token _ secret ' .
: type string :
: param \ * \ * kwargs : Optional arguments . Same as Requests .
: type \ * \ * kwargs : dict''' | r = self . get_raw_request_token ( method = method , ** kwargs )
request_token , request_token_secret = process_token_request ( r , decoder , key_token , key_token_secret )
return request_token , request_token_secret |
def get ( self , request , * args , ** kwargs ) :
"""Django view get function .
Add items of extra _ context , crumbs and grid to context .
Args :
request ( ) : Django ' s request object .
* args ( ) : request args .
* * kwargs ( ) : request kwargs .
Returns :
response : render to response with context .""" | context = self . get_context_data ( ** kwargs )
context . update ( self . extra_context )
context [ 'crumbs' ] = self . get_crumbs ( )
context [ 'title' ] = self . title
context [ 'suit' ] = 'suit' in settings . INSTALLED_APPS
if context . get ( 'dashboard_grid' , None ) is None and self . grid :
context [ 'dashboard_grid' ] = self . grid
return self . render_to_response ( context ) |
def heightmap_new ( w : int , h : int , order : str = "C" ) -> np . ndarray :
"""Return a new numpy . ndarray formatted for use with heightmap functions .
` w ` and ` h ` are the width and height of the array .
` order ` is given to the new NumPy array , it can be ' C ' or ' F ' .
You can pass a NumPy array to any heightmap function as long as all the
following are true : :
* The array is 2 dimensional .
* The array has the C _ CONTIGUOUS or F _ CONTIGUOUS flag .
* The array ' s dtype is : any : ` dtype . float32 ` .
The returned NumPy array will fit all these conditions .
. . versionchanged : : 8.1
Added the ` order ` parameter .""" | if order == "C" :
return np . zeros ( ( h , w ) , np . float32 , order = "C" )
elif order == "F" :
return np . zeros ( ( w , h ) , np . float32 , order = "F" )
else :
raise ValueError ( "Invalid order parameter, should be 'C' or 'F'." ) |
def list_media_services_rg ( access_token , subscription_id , rgname ) :
'''List the media services in a resource group .
Args :
access _ token ( str ) : A valid Azure authentication token .
subscription _ id ( str ) : Azure subscription id .
rgname ( str ) : Azure resource group name .
Returns :
HTTP response . JSON body .''' | endpoint = '' . join ( [ get_rm_endpoint ( ) , '/subscriptions/' , subscription_id , '/resourceGroups/' , rgname , '/providers/microsoft.media/mediaservices?api-version=' , MEDIA_API ] )
return do_get ( endpoint , access_token ) |
def _create_equivalence_transform ( equiv ) :
"""Compute an equivalence transformation that transforms this compound
to another compound ' s coordinate system .
Parameters
equiv : np . ndarray , shape = ( n , 3 ) , dtype = float
Array of equivalent points .
Returns
T : CoordinateTransform
Transform that maps this point cloud to the other point cloud ' s
coordinates system .""" | from mbuild . compound import Compound
self_points = np . array ( [ ] )
self_points . shape = ( 0 , 3 )
other_points = np . array ( [ ] )
other_points . shape = ( 0 , 3 )
for pair in equiv :
if not isinstance ( pair , tuple ) or len ( pair ) != 2 :
raise ValueError ( 'Equivalence pair not a 2-tuple' )
if not ( isinstance ( pair [ 0 ] , Compound ) and isinstance ( pair [ 1 ] , Compound ) ) :
raise ValueError ( 'Equivalence pair type mismatch: pair[0] is a {0} ' 'and pair[1] is a {1}' . format ( type ( pair [ 0 ] ) , type ( pair [ 1 ] ) ) )
# TODO : vstack is slow , replace with list concatenation
if not pair [ 0 ] . children :
self_points = np . vstack ( [ self_points , pair [ 0 ] . pos ] )
other_points = np . vstack ( [ other_points , pair [ 1 ] . pos ] )
else :
for atom0 in pair [ 0 ] . _particles ( include_ports = True ) :
self_points = np . vstack ( [ self_points , atom0 . pos ] )
for atom1 in pair [ 1 ] . _particles ( include_ports = True ) :
other_points = np . vstack ( [ other_points , atom1 . pos ] )
T = RigidTransform ( self_points , other_points )
return T |
def set_hs_color ( self , hue : float , saturation : float ) :
"""Set a fixed color and also turn off effects in order to see the color .
: param hue : Hue component ( range 0-1)
: param saturation : Saturation component ( range 0-1 ) . Yields white for values near 0 , other values are
interpreted as 100 % saturation .
The input values are the components of an HSV color without the value / brightness component .
Example colors :
* Green : set _ hs _ color ( 120/360 , 1)
* Blue : set _ hs _ color ( 240/360 , 1)
* Yellow : set _ hs _ color ( 60/360 , 1)
* White : set _ hs _ color ( 0 , 0)""" | self . turn_off_effect ( )
if saturation < 0.1 : # Special case ( white )
hm_color = 200
else :
hm_color = int ( round ( max ( min ( hue , 1 ) , 0 ) * 199 ) )
self . setValue ( key = "COLOR" , channel = self . _color_channel , value = hm_color ) |
def unload_plugin ( name , category = None ) :
"""remove single plugin
Parameters
name : str
plugin name
category : str
plugin category
Examples
> > > from pprint import pprint
> > > pprint ( view _ plugins ( ) )
{ ' decoders ' : { } , ' encoders ' : { } , ' parsers ' : { } }
> > > class DecoderPlugin ( object ) :
. . . plugin _ name = ' example '
. . . plugin _ descript = ' a decoder for dicts containing _ example _ key '
. . . dict _ signature = ( ' _ example _ ' , )
> > > errors = load _ plugin _ classes ( [ DecoderPlugin ] , category = ' decoders ' )
> > > pprint ( view _ plugins ( ) )
{ ' decoders ' : { ' example ' : ' a decoder for dicts containing _ example _ key ' } ,
' encoders ' : { } ,
' parsers ' : { } }
> > > unload _ plugin ( ' example ' , ' decoders ' )
> > > pprint ( view _ plugins ( ) )
{ ' decoders ' : { } , ' encoders ' : { } , ' parsers ' : { } }""" | if category is not None :
_all_plugins [ category ] . pop ( name )
else :
for cat in _all_plugins :
if name in _all_plugins [ cat ] :
_all_plugins [ cat ] . pop ( name ) |
def add_date_facet ( self , * args , ** kwargs ) :
"""Add a date factory facet""" | self . facets . append ( DateHistogramFacet ( * args , ** kwargs ) ) |
def _outer_init_full_values ( self ) :
"""If full _ values has indices in values _ indices , we might want to initialize
the full _ values differently , so that subsetting is possible .
Here you can initialize the full _ values for the values needed .
Keep in mind , that if a key does not exist in full _ values when updating
values , it will be set ( so e . g . for Z there is no need to initialize Zgrad ,
as there is no subsetting needed . For X in BGPLVM on the other hand we probably need
to initialize the gradients for the mean and the variance in order to
have the full gradient for indexing )""" | retd = dict ( dL_dKmm = np . zeros ( ( self . Z . shape [ 0 ] , self . Z . shape [ 0 ] ) ) )
if self . has_uncertain_inputs ( ) :
retd . update ( dict ( dL_dpsi0 = np . zeros ( self . X . shape [ 0 ] ) , dL_dpsi1 = np . zeros ( ( self . X . shape [ 0 ] , self . Z . shape [ 0 ] ) ) , dL_dpsi2 = np . zeros ( ( self . X . shape [ 0 ] , self . Z . shape [ 0 ] , self . Z . shape [ 0 ] ) ) ) )
else :
retd . update ( { 'dL_dKdiag' : np . zeros ( self . X . shape [ 0 ] ) , 'dL_dKnm' : np . zeros ( ( self . X . shape [ 0 ] , self . Z . shape [ 0 ] ) ) } )
return retd |
def remove_wirevector ( self , wirevector ) :
"""Remove a wirevector object to the block .""" | self . wirevector_set . remove ( wirevector )
del self . wirevector_by_name [ wirevector . name ] |
def list_storage_services ( conn = None , call = None ) :
'''List VMs on this Azure account , with full information''' | if call != 'function' :
raise SaltCloudSystemExit ( ( 'The list_storage_services function must be called ' 'with -f or --function.' ) )
if not conn :
conn = get_conn ( )
ret = { }
accounts = conn . list_storage_accounts ( )
for service in accounts . storage_services :
ret [ service . service_name ] = { 'capabilities' : service . capabilities , 'service_name' : service . service_name , 'storage_service_properties' : service . storage_service_properties , 'extended_properties' : service . extended_properties , 'storage_service_keys' : service . storage_service_keys , 'url' : service . url , }
return ret |
def fake_run ( self ) :
'''Doesn ' t actually run cd - hit . Instead , puts each input sequence into its own cluster . So it ' s as if cdhit was run , but didn ' t cluster anything''' | clusters = { }
used_names = set ( )
seq_reader = pyfastaq . sequences . file_reader ( self . infile )
for seq in seq_reader :
if seq . id in used_names :
raise Error ( 'Sequence name "' + seq . id + '" not unique. Cannot continue' )
clusters [ str ( len ( clusters ) + self . min_cluster_number ) ] = { seq . id }
used_names . add ( seq . id )
return clusters |
def get ( self , request , bot_id , format = None ) :
"""Get list of environment variables
serializer : EnvironmentVarSerializer
responseMessages :
- code : 401
message : Not authenticated""" | return super ( EnvironmentVarList , self ) . get ( request , bot_id , format ) |
def _analyst_data ( self , ws ) :
"""Returns a dict that represent the analyst assigned to the
worksheet .
Keys : username , fullname , email""" | username = ws . getAnalyst ( )
return { 'username' : username , 'fullname' : to_utf8 ( self . user_fullname ( username ) ) , 'email' : to_utf8 ( self . user_email ( username ) ) } |
def d2logpdf_dlink2_dvar ( self , link_f , y , Y_metadata = None ) :
""": param link _ f : latent variables link ( f )
: type link _ f : Nx1 array
: param y : data
: type y : Nx1 array
: param Y _ metadata : Y _ metadata not used in gaussian
: returns : derivative of log likelihood evaluated at points link ( f ) w . r . t variance parameter
: rtype : Nx1 array""" | c = np . zeros_like ( y )
if Y_metadata is not None and 'censored' in Y_metadata . keys ( ) :
c = Y_metadata [ 'censored' ]
val = np . log ( y ) - link_f
val_scaled = val / np . sqrt ( self . variance )
val_scaled2 = val / self . variance
a = ( 1 - stats . norm . cdf ( val_scaled ) )
uncensored = ( 1 - c ) * ( 1. / ( self . variance ** 2 ) )
censored = c * ( val * np . exp ( - 3 * ( val ** 2 ) / ( 2 * self . variance ) ) / ( ( a ** 3 ) * np . sqrt ( 8 * np . pi ** 3 ) * self . variance ** ( 5 / 2. ) ) + np . exp ( - val ** 2 / self . variance ) / ( ( a ** 2 ) * 4 * np . pi * self . variance ** 2 ) - np . exp ( - val ** 2 / self . variance ) * val ** 2 / ( ( a ** 2 ) * 2 * np . pi * self . variance ** 3 ) + np . exp ( - val ** 2 / self . variance ) / ( ( a ** 2 ) * 4 * np . pi * self . variance ** 2 ) - np . exp ( - val ** 2 / ( 2 * self . variance ) ) * val / ( a * np . sqrt ( 2 * np . pi ) * 2 * self . variance ** ( 5 / 2. ) ) - np . exp ( - val ** 2 / self . variance ) * ( val ** 2 ) / ( ( a ** 2 ) * 4 * np . pi * self . variance ** 3 ) - np . exp ( - val ** 2 / ( 2 * self . variance ) ) * val / ( a * np . sqrt ( 2 * np . pi ) * self . variance ** ( 5 / 2. ) ) + np . exp ( - val ** 2 / ( 2 * self . variance ) ) * ( val ** 3 ) / ( a * np . sqrt ( 2 * np . pi ) * 2 * self . variance ** ( 7 / 2. ) ) )
dlik_hess_dsigma = uncensored + censored
return dlik_hess_dsigma |
def to_gremlin ( self ) :
"""Return a unicode object with the Gremlin representation of this expression .""" | self . validate ( )
immediate_operator_format = u'({left} {operator} {right})'
dotted_operator_format = u'{left}.{operator}({right})'
intersects_operator_format = u'(!{left}.{operator}({right}).empty)'
translation_table = { u'=' : ( u'==' , immediate_operator_format ) , u'!=' : ( u'!=' , immediate_operator_format ) , u'>=' : ( u'>=' , immediate_operator_format ) , u'<=' : ( u'<=' , immediate_operator_format ) , u'>' : ( u'>' , immediate_operator_format ) , u'<' : ( u'<' , immediate_operator_format ) , u'+' : ( u'+' , immediate_operator_format ) , u'||' : ( u'||' , immediate_operator_format ) , u'&&' : ( u'&&' , immediate_operator_format ) , u'contains' : ( u'contains' , dotted_operator_format ) , u'intersects' : ( u'intersect' , intersects_operator_format ) , u'has_substring' : ( u'contains' , dotted_operator_format ) , }
gremlin_operator , format_spec = translation_table . get ( self . operator , ( None , None ) )
if not gremlin_operator :
raise AssertionError ( u'Unrecognized operator used: ' u'{} {}' . format ( self . operator , self ) )
return format_spec . format ( operator = gremlin_operator , left = self . left . to_gremlin ( ) , right = self . right . to_gremlin ( ) ) |
def apply_handler_to_all_logs ( handler : logging . Handler , remove_existing : bool = False ) -> None :
"""Applies a handler to all logs , optionally removing existing handlers .
Should ONLY be called from the ` ` if _ _ name _ _ = = ' main ' ` ` script ;
see https : / / docs . python . org / 3.4 / howto / logging . html # library - config .
Generally MORE SENSIBLE just to apply a handler to the root logger .
Args :
handler : the handler to apply
remove _ existing : remove existing handlers from logger first ?""" | # noinspection PyUnresolvedReferences
for name , obj in logging . Logger . manager . loggerDict . items ( ) :
if remove_existing :
obj . handlers = [ ]
# http : / / stackoverflow . com / questions / 7484454
obj . addHandler ( handler ) |
def list_availability_zones ( self , retrieve_all = True , ** _params ) :
"""Fetches a list of all availability zones .""" | return self . list ( 'availability_zones' , self . availability_zones_path , retrieve_all , ** _params ) |
def read_file_to_buffer ( filename ) :
"""Reads a file to string buffer
: param filename :
: return :""" | f = open ( filename , "r" )
buf = BytesIO ( f . read ( ) )
f . close ( )
return buf |
def add_report ( self , report , ignore_errors = False ) :
"""Add all anchors from a report .""" | if not isinstance ( report , SignedListReport ) :
if ignore_errors :
return
raise ArgumentError ( "You can only add SignedListReports to a UTCAssigner" , report = report )
for reading in report . visible_readings :
self . add_reading ( reading )
self . add_point ( report . report_id , report . sent_timestamp , report . received_time ) |
def frombinary ( path , shape = None , dtype = None , ext = 'bin' , start = None , stop = None , recursive = False , nplanes = None , npartitions = None , labels = None , conf = 'conf.json' , order = 'C' , engine = None , credentials = None ) :
"""Load images from flat binary files .
Assumes one image per file , each with the shape and ordering as given
by the input arguments .
Parameters
path : str
Path to data files or directory , specified as either a local filesystem path
or in a URI - like format , including scheme . May include a single ' * ' wildcard character .
shape : tuple of positive int
Dimensions of input image data .
ext : string , optional , default = ' bin '
Extension required on data files to be loaded .
start , stop : nonnegative int , optional , default = None
Indices of the first and last - plus - one file to load , relative to the sorted
filenames matching ` path ` and ` ext ` . Interpreted using python slice indexing conventions .
recursive : boolean , optional , default = False
If true , will recursively descend directories from path , loading all files
with an extension matching ' ext ' .
nplanes : positive integer , optional , default = None
If passed , will cause single files to be subdivided into nplanes separate images .
Otherwise , each file is taken to represent one image .
npartitions : int , optional , default = None
Number of partitions for computational engine ,
if None will use default for engine .
labels : array , optional , default = None
Labels for records . If provided , should be one - dimensional .""" | import json
from thunder . readers import get_file_reader , FileNotFoundError
try :
reader = get_file_reader ( path ) ( credentials = credentials )
buf = reader . read ( path , filename = conf ) . decode ( 'utf-8' )
params = json . loads ( buf )
except FileNotFoundError :
params = { }
if 'dtype' in params . keys ( ) :
dtype = params [ 'dtype' ]
if 'dims' in params . keys ( ) :
shape = params [ 'dims' ]
if 'shape' in params . keys ( ) :
shape = params [ 'shape' ]
if not shape :
raise ValueError ( 'Image shape must be specified as argument or in a conf.json file' )
if not dtype :
dtype = 'int16'
if nplanes is not None :
if nplanes <= 0 :
raise ValueError ( 'nplanes must be positive if passed, got %d' % nplanes )
if shape [ - 1 ] % nplanes :
raise ValueError ( "Last dimension '%d' must be divisible by nplanes '%d'" % ( shape [ - 1 ] , nplanes ) )
def getarray ( idx_buffer_filename ) :
idx , buf , _ = idx_buffer_filename
ary = frombuffer ( buf , dtype = dtype , count = int ( prod ( shape ) ) ) . reshape ( shape , order = order )
if nplanes is None :
yield ( idx , ) , ary
else : # divide array into chunks of nplanes
npoints = shape [ - 1 ] / nplanes
# integer division
if shape [ - 1 ] % nplanes :
npoints += 1
timepoint = 0
last_plane = 0
current_plane = 1
while current_plane < ary . shape [ - 1 ] :
if current_plane % nplanes == 0 :
slices = [ slice ( None ) ] * ( ary . ndim - 1 ) + [ slice ( last_plane , current_plane ) ]
yield idx * npoints + timepoint , ary [ slices ] . squeeze ( )
timepoint += 1
last_plane = current_plane
current_plane += 1
# yield remaining planes
slices = [ slice ( None ) ] * ( ary . ndim - 1 ) + [ slice ( last_plane , ary . shape [ - 1 ] ) ]
yield ( idx * npoints + timepoint , ) , ary [ slices ] . squeeze ( )
recount = False if nplanes is None else True
append = [ nplanes ] if ( nplanes is not None and nplanes > 1 ) else [ ]
newdims = tuple ( list ( shape [ : - 1 ] ) + append ) if nplanes else shape
return frompath ( path , accessor = getarray , ext = ext , start = start , stop = stop , recursive = recursive , npartitions = npartitions , dims = newdims , dtype = dtype , labels = labels , recount = recount , engine = engine , credentials = credentials ) |
def _buttonbox ( msg , title , choices , root = None , timeout = None ) :
"""Display a msg , a title , and a set of buttons .
The buttons are defined by the members of the choices list .
Return the text of the button that the user selected .
@ arg msg : the msg to be displayed .
@ arg title : the window title
@ arg choices : a list or tuple of the choices to be displayed""" | global boxRoot , __replyButtonText , __widgetTexts , buttonsFrame
# Initialize _ _ replyButtonText to the first choice .
# This is what will be used if the window is closed by the close button .
__replyButtonText = choices [ 0 ]
if root :
root . withdraw ( )
boxRoot = tk . Toplevel ( master = root )
boxRoot . withdraw ( )
else :
boxRoot = tk . Tk ( )
boxRoot . withdraw ( )
boxRoot . title ( title )
boxRoot . iconname ( 'Dialog' )
boxRoot . geometry ( rootWindowPosition )
boxRoot . minsize ( 400 , 100 )
# - - - - - define the messageFrame - - - - -
messageFrame = tk . Frame ( master = boxRoot )
messageFrame . pack ( side = tk . TOP , fill = tk . BOTH )
# - - - - - define the buttonsFrame - - - - -
buttonsFrame = tk . Frame ( master = boxRoot )
buttonsFrame . pack ( side = tk . TOP , fill = tk . BOTH )
# - - - - - place the widgets in the frames - - - - -
messageWidget = tk . Message ( messageFrame , text = msg , width = 400 )
messageWidget . configure ( font = ( PROPORTIONAL_FONT_FAMILY , PROPORTIONAL_FONT_SIZE ) )
messageWidget . pack ( side = tk . TOP , expand = tk . YES , fill = tk . X , padx = '3m' , pady = '3m' )
__put_buttons_in_buttonframe ( choices )
# - - - - - the action begins - - - - -
# put the focus on the first button
__firstWidget . focus_force ( )
boxRoot . deiconify ( )
if timeout is not None :
boxRoot . after ( timeout , timeoutBoxRoot )
boxRoot . mainloop ( )
try :
boxRoot . destroy ( )
except tk . TclError :
if __replyButtonText != TIMEOUT_TEXT :
__replyButtonText = None
if root :
root . deiconify ( )
return __replyButtonText |
def app_to_context ( self , context ) :
"""Return a context encoded tag .""" | if self . tagClass != Tag . applicationTagClass :
raise ValueError ( "application tag required" )
# application tagged boolean now has data
if ( self . tagNumber == Tag . booleanAppTag ) :
return ContextTag ( context , chr ( self . tagLVT ) )
else :
return ContextTag ( context , self . tagData ) |
def write_fmt ( fp , fmt , * args ) :
"""Writes data to ` ` fp ` ` according to ` ` fmt ` ` .""" | fmt = str ( ">" + fmt )
fmt_size = struct . calcsize ( fmt )
written = write_bytes ( fp , struct . pack ( fmt , * args ) )
assert written == fmt_size , 'written=%d, expected=%d' % ( written , fmt_size )
return written |
def MarkDone ( self , responses ) :
"""Mark a client as done .""" | client_id = responses . request . client_id
self . AddResultsToCollection ( responses , client_id )
self . MarkClientDone ( client_id ) |
def top ( self ) :
"""Top coordinate .""" | if self . _has_real ( ) :
return self . _data . real_top
return self . _data . top |
def detectMidpCapable ( self ) :
"""Return detection of a MIDP mobile Java - capable device
Detects if the current device supports MIDP , a mobile Java technology .""" | return UAgentInfo . deviceMidp in self . __userAgent or UAgentInfo . deviceMidp in self . __httpAccept |
def _build_command ( self , cmds , sync = False ) :
"""Build full EOS ' s openstack CLI command .
Helper method to add commands to enter and exit from openstack
CLI modes .
: param cmds : The openstack CLI commands that need to be executed
in the openstack config mode .
: param sync : This flags indicates that the region is being synced .""" | region_cmd = 'region %s' % self . region
if sync :
region_cmd = self . cli_commands [ const . CMD_REGION_SYNC ]
full_command = [ 'enable' , 'configure' , 'cvx' , 'service openstack' , region_cmd , ]
full_command . extend ( cmds )
return full_command |
def encode ( input , encoding = UTF8 , errors = 'strict' ) :
"""Encode a single string .
: param input : An Unicode string .
: param encoding : An : class : ` Encoding ` object or a label string .
: param errors : Type of error handling . See : func : ` codecs . register ` .
: raises : : exc : ` ~ exceptions . LookupError ` for an unknown encoding label .
: return : A byte string .""" | return _get_encoding ( encoding ) . codec_info . encode ( input , errors ) [ 0 ] |
def set_expected_update_frequency ( self , update_frequency ) : # type : ( str ) - > None
"""Set expected update frequency
Args :
update _ frequency ( str ) : Update frequency
Returns :
None""" | try :
int ( update_frequency )
except ValueError :
update_frequency = Dataset . transform_update_frequency ( update_frequency )
if not update_frequency :
raise HDXError ( 'Invalid update frequency supplied!' )
self . data [ 'data_update_frequency' ] = update_frequency |
def notify_widget ( self , widget , message = None , clear_in = CLEAR_NOTIF_BAR_MESSAGE_IN ) :
"""opens notification popup .
: param widget : instance of Widget , widget to display
: param message : str , message to remove from list of notifications
: param clear _ in : int , time seconds when notification should be removed""" | @ log_traceback
def clear_notification ( * args , ** kwargs ) : # the point here is the log _ traceback
self . remove_widget ( widget , message = message )
if not widget :
return
logger . debug ( "display notification widget %s" , widget )
with self . notifications_lock :
self . widget_message_dict [ widget ] = message
if message :
self . message_widget_dict [ message ] = widget
self . reload_footer ( rebuild_statusbar = False )
self . loop . set_alarm_in ( clear_in , clear_notification )
return widget |
def available ( self ) :
"""Returns a set of the available versions .
: returns : A set of integers giving the available versions .""" | # Short - circuit
if not self . _schema :
return set ( )
# Build up the set of available versions
avail = set ( self . _schema . __vers_downgraders__ . keys ( ) )
avail . add ( self . _schema . __version__ )
return avail |
def spawn_containers ( addrs , env_cls = Environment , env_params = None , mgr_cls = EnvManager , * args , ** kwargs ) :
"""Spawn environments in a multiprocessing : class : ` multiprocessing . Pool ` .
Arguments and keyword arguments are passed down to the created environments
at initialization time if * env _ params * is None . If * env _ params * is not
None , then it is assumed to contain individual initialization parameters
for each environment in * addrs * .
: param addrs :
List of ( HOST , PORT ) addresses for the environments .
: param env _ cls :
Callable for the environments . Must be a subclass of
: py : class : ` ~ creamas . core . environment . Environment ` .
: param env _ params : Initialization parameters for the environments .
: type env _ params : Iterable of same length as * addrs * or None .
: param mgr _ cls :
Callable for the managers . Must be a subclass of
: py : class : ` ~ creamas . mp . EnvManager ` . s
: returns :
The created process pool and the * ApplyAsync * results for the spawned
environments .""" | pool = multiprocessing . Pool ( len ( addrs ) )
kwargs [ 'env_cls' ] = env_cls
kwargs [ 'mgr_cls' ] = mgr_cls
r = [ ]
for i , addr in enumerate ( addrs ) :
if env_params is not None :
k = env_params [ i ]
k [ 'env_cls' ] = env_cls
k [ 'mgr_cls' ] = mgr_cls
# Copy kwargs so that we can apply different address to different
# containers .
else :
k = kwargs . copy ( )
k [ 'addr' ] = addr
ret = pool . apply_async ( spawn_container , args = args , kwds = k , error_callback = logger . warning )
r . append ( ret )
return pool , r |
def make_header ( decoded_seq , maxlinelen = None , header_name = None , continuation_ws = ' ' ) :
"""Create a Header from a sequence of pairs as returned by decode _ header ( )
decode _ header ( ) takes a header value string and returns a sequence of
pairs of the format ( decoded _ string , charset ) where charset is the string
name of the character set .
This function takes one of those sequence of pairs and returns a Header
instance . Optional maxlinelen , header _ name , and continuation _ ws are as in
the Header constructor .""" | h = Header ( maxlinelen = maxlinelen , header_name = header_name , continuation_ws = continuation_ws )
for s , charset in decoded_seq : # None means us - ascii but we can simply pass it on to h . append ( )
if charset is not None and not isinstance ( charset , Charset ) :
charset = Charset ( charset )
h . append ( s , charset )
return h |
def get_song ( self , id_ ) :
"""Data for a specific song .""" | endpoint = "songs/{id}" . format ( id = id_ )
return self . _make_request ( endpoint ) |
def train_token ( self , word , count ) :
"""Trains a particular token ( increases the weight / count of it )
: param word : the token we ' re going to train
: type word : str
: param count : the number of occurances in the sample
: type count : int""" | if word not in self . tokens :
self . tokens [ word ] = 0
self . tokens [ word ] += count
self . tally += count |
def get_argument_parser ( ) :
"""Function to obtain the argument parser .
Parameters
Returns
` argparse . ArgumentParser `
A fully configured ` argparse . ArgumentParser ` object .
Notes
This function can also be used by the ` sphinx - argparse ` extension for
sphinx to generate documentation for this script .""" | desc = 'Convert Entrez IDs to gene symbols.'
parser = cli . get_argument_parser ( desc = desc )
file_mv = cli . file_mv
g = parser . add_argument_group ( 'Input and output files' )
g . add_argument ( '-e' , '--expression-file' , required = True , type = cli . str_type , metavar = file_mv , help = 'The expression file.' )
g . add_argument ( '-g' , '--gene-file' , required = True , type = cli . str_type , metavar = file_mv , help = textwrap . dedent ( '''\
The gene file (e.g., generated by the
ensembl_extract_protein_coding_genes.py script).''' ) )
g . add_argument ( '-c' , '--entrez2gene-file' , required = True , type = cli . str_type , metavar = file_mv , help = textwrap . dedent ( '''\
The entrez2gene file (.e.g., generated by the
ncbi_extract_entrez2gene.py script).''' ) )
g . add_argument ( '-o' , '--output-file' , required = True , type = cli . str_type , metavar = file_mv , help = 'The output file.' )
g = parser . add_argument_group ( 'Conversion options' )
g . add_argument ( '-s' , '--strip-affy-suffix' , action = 'store_true' , help = textwrap . dedent ( '''\
Strip the suffix "_at" from all Entrez IDs.
(For use in affymetrix microarray pipeline.)''' ) )
cli . add_reporting_args ( parser )
return parser |
def run_experiment ( self ) :
"""Sign up , run the ` ` participate ` ` method , then sign off and close
the driver .""" | try :
self . sign_up ( )
self . participate ( )
if self . sign_off ( ) :
self . complete_experiment ( "worker_complete" )
else :
self . complete_experiment ( "worker_failed" )
finally :
self . driver . quit ( ) |
def parse ( cls , parser , text , pos ) :
"""Using our own parse to enable the flag below .""" | try :
parser . _parsing_parenthesized_simple_values_expression = True
remaining_text , recognized_tokens = parser . parse ( text , cls . grammar )
return remaining_text , recognized_tokens
except SyntaxError as e :
return text , e
finally :
parser . _parsing_parenthesized_simple_values_expression = False |
def get_interpreter_path ( version = None ) :
"""Return the executable of a specified or current version .""" | if version and version != str ( sys . version_info [ 0 ] ) :
return settings . PYTHON_INTERPRETER + version
else :
return sys . executable |
def _get_features ( self , eopatch = None ) :
"""A generator of parsed features .
: param eopatch : A given EOPatch
: type eopatch : EOPatch or None
: return : One by one feature
: rtype : tuple ( FeatureType , str ) or tuple ( FeatureType , str , str )""" | for feature_type , feature_dict in self . feature_collection . items ( ) :
if feature_type is None and self . default_feature_type is not None :
feature_type = self . default_feature_type
if feature_type is None :
for feature_name , new_feature_name in feature_dict . items ( ) :
if eopatch is None :
yield self . _return_feature ( ... , feature_name , new_feature_name )
else :
found_feature_type = self . _find_feature_type ( feature_name , eopatch )
if found_feature_type :
yield self . _return_feature ( found_feature_type , feature_name , new_feature_name )
else :
raise ValueError ( "Feature with name '{}' does not exist among features of allowed feature" " types in given EOPatch. Allowed feature types are " "{}" . format ( feature_name , self . allowed_feature_types ) )
elif feature_dict is ... :
if not feature_type . has_dict ( ) or eopatch is None :
yield self . _return_feature ( feature_type , ... )
else :
for feature_name in eopatch [ feature_type ] :
yield self . _return_feature ( feature_type , feature_name )
else :
for feature_name , new_feature_name in feature_dict . items ( ) :
if eopatch is not None and feature_name not in eopatch [ feature_type ] :
raise ValueError ( 'Feature {} of type {} was not found in EOPatch' . format ( feature_name , feature_type ) )
yield self . _return_feature ( feature_type , feature_name , new_feature_name ) |
def is_package_installed ( distribution , pkg ) :
"""checks if a particular package is installed""" | if ( 'centos' in distribution or 'el' in distribution or 'redhat' in distribution ) :
return ( is_rpm_package_installed ( pkg ) )
if ( 'ubuntu' in distribution or 'debian' in distribution ) :
return ( is_deb_package_installed ( pkg ) ) |
def export_maxloss_ruptures ( ekey , dstore ) :
""": param ekey : export key , i . e . a pair ( datastore key , fmt )
: param dstore : datastore object""" | oq = dstore [ 'oqparam' ]
mesh = get_mesh ( dstore [ 'sitecol' ] )
rlzs_by_gsim = dstore [ 'csm_info' ] . get_rlzs_by_gsim_grp ( )
num_ses = oq . ses_per_logic_tree_path
fnames = [ ]
for loss_type in oq . loss_dt ( ) . names :
ebr = getters . get_maxloss_rupture ( dstore , loss_type )
root = hazard_writers . rupture_to_element ( ebr . export ( mesh , rlzs_by_gsim [ ebr . grp_id ] , num_ses ) )
dest = dstore . export_path ( 'rupture-%s.xml' % loss_type )
with open ( dest , 'wb' ) as fh :
nrml . write ( list ( root ) , fh )
fnames . append ( dest )
return fnames |
def is_nsphere ( points ) :
"""Check if a list of points is an nsphere .
Parameters
points : ( n , dimension ) float
Points in space
Returns
check : bool
True if input points are on an nsphere""" | center , radius , error = fit_nsphere ( points )
check = error < tol . merge
return check |
def list_ngrams ( token_list , n = 1 , join = ' ' ) :
"""Return a list of n - tuples , one for each possible sequence of n items in the token _ list
Arguments :
join ( bool or str ) : if str , then join ngrom tuples on it before returning
True is equivalent to join = ' '
default = True
See : http : / / stackoverflow . com / a / 30609050/623735
> > > list _ ngrams ( ' goodbye cruel world ' . split ( ) , join = False )
[ ( ' goodbye ' , ) , ( ' cruel ' , ) , ( ' world ' , ) ]
> > > list _ ngrams ( ' goodbye cruel world ' . split ( ) , 2 , join = False )
[ ( ' goodbye ' , ' cruel ' ) , ( ' cruel ' , ' world ' ) ]""" | join = ' ' if join is True else join
if isinstance ( join , str ) :
return [ join . join ( ng ) for ng in list_ngrams ( token_list , n = n , join = False ) ]
return list ( zip ( * [ token_list [ i : ] for i in range ( n ) ] ) ) |
def add_hlinkClick ( self , rId ) :
"""Add an < a : hlinkClick > child element with r : id attribute set to * rId * .""" | hlinkClick = self . get_or_add_hlinkClick ( )
hlinkClick . rId = rId
return hlinkClick |
def getSpaceUse ( self ) :
"""Get disk space usage .
@ return : Dictionary of filesystem space utilization stats for filesystems .""" | stats = { }
try :
out = subprocess . Popen ( [ dfCmd , "-Pk" ] , stdout = subprocess . PIPE ) . communicate ( ) [ 0 ]
except :
raise Exception ( 'Execution of command %s failed.' % dfCmd )
lines = out . splitlines ( )
if len ( lines ) > 1 :
for line in lines [ 1 : ] :
fsstats = { }
cols = line . split ( )
fsstats [ 'device' ] = cols [ 0 ]
fsstats [ 'type' ] = self . _fstypeDict [ cols [ 5 ] ]
fsstats [ 'total' ] = 1024 * int ( cols [ 1 ] )
fsstats [ 'inuse' ] = 1024 * int ( cols [ 2 ] )
fsstats [ 'avail' ] = 1024 * int ( cols [ 3 ] )
fsstats [ 'inuse_pcent' ] = int ( cols [ 4 ] [ : - 1 ] )
stats [ cols [ 5 ] ] = fsstats
return stats |
async def add_relation ( self , local_relation , remote_relation ) :
"""Add a relation to another application .
: param str local _ relation : Name of relation on this application
: param str remote _ relation : Name of relation on the other
application in the form ' < application > [ : < relation _ name > ] '""" | if ':' not in local_relation :
local_relation = '{}:{}' . format ( self . name , local_relation )
return await self . model . add_relation ( local_relation , remote_relation ) |
def query_mongo_sort_decend ( database_name , collection_name , query = { } , skip = 0 , limit = getattr ( settings , 'MONGO_LIMIT' , 200 ) , return_keys = ( ) , sortkey = None ) :
"""return a response _ dict with a list of search results in decending
order based on a sort key""" | l = [ ]
response_dict = { }
try :
mongodb_client_url = getattr ( settings , 'MONGODB_CLIENT' , 'mongodb://localhost:27017/' )
mc = MongoClient ( mongodb_client_url , document_class = OrderedDict )
db = mc [ str ( database_name ) ]
collection = db [ str ( collection_name ) ]
if return_keys :
return_dict = { }
for k in return_keys :
return_dict [ k ] = 1
# print " returndict = " , return _ dict
mysearchresult = collection . find ( query , return_dict ) . skip ( skip ) . limit ( limit ) . sort ( sortkey , DESCENDING )
else :
mysearchresult = collection . find ( query ) . skip ( skip ) . limit ( limit ) . sort ( sortkey , DESCENDING )
# response _ dict [ ' num _ results ' ] = int ( mysearchresult . count ( with _ limit _ and _ skip = False ) )
response_dict [ 'code' ] = 200
response_dict [ 'type' ] = "search-results"
for d in mysearchresult :
d [ 'id' ] = d [ '_id' ] . __str__ ( )
del d [ '_id' ]
l . append ( d )
response_dict [ 'results' ] = l
except :
print ( "Error reading from Mongo" )
print ( str ( sys . exc_info ( ) ) )
response_dict [ 'num_results' ] = 0
response_dict [ 'code' ] = 500
response_dict [ 'type' ] = "Error"
response_dict [ 'results' ] = [ ]
response_dict [ 'message' ] = str ( sys . exc_info ( ) )
return response_dict |
def visibleCount ( self ) :
"""Returns the number of visible items in this list .
: return < int >""" | return sum ( int ( not self . item ( i ) . isHidden ( ) ) for i in range ( self . count ( ) ) ) |
def remove_library_from_file_system ( self , library_path , library_name ) :
"""Remove library from hard disk .""" | library_file_system_path = self . get_os_path_to_library ( library_path , library_name ) [ 0 ]
shutil . rmtree ( library_file_system_path )
self . refresh_libraries ( ) |
def _uncheck_descendant ( self , item ) :
"""Uncheck the boxes of item ' s descendant .""" | children = self . get_children ( item )
for iid in children :
self . change_state ( iid , "unchecked" )
self . _uncheck_descendant ( iid ) |
def create_database ( self , database ) :
"""Create a database on the InfluxDB server .
: param database : the name of the database to create
: type database : string
: rtype : boolean""" | url = "db"
data = { 'name' : database }
self . request ( url = url , method = 'POST' , data = data , expected_response_code = 201 )
return True |
def _catch_exceptions ( self , exctype , value , tb ) :
"""Catches all exceptions and logs them .""" | # Now we log it .
self . error ( 'Uncaught exception' , exc_info = ( exctype , value , tb ) )
# First , we print to stdout with some colouring .
print_exception_formatted ( exctype , value , tb ) |
def open ( bucket_id , key_id , mode , buffer_size = DEFAULT_BUFFER_SIZE , min_part_size = DEFAULT_MIN_PART_SIZE , session = None , resource_kwargs = None , multipart_upload_kwargs = None , ) :
"""Open an S3 object for reading or writing .
Parameters
bucket _ id : str
The name of the bucket this object resides in .
key _ id : str
The name of the key within the bucket .
mode : str
The mode for opening the object . Must be either " rb " or " wb " .
buffer _ size : int , optional
The buffer size to use when performing I / O .
min _ part _ size : int , optional
The minimum part size for multipart uploads . For writing only .
session : object , optional
The S3 session to use when working with boto3.
resource _ kwargs : dict , optional
Keyword arguments to use when accessing the S3 resource for reading or writing .
multipart _ upload _ kwargs : dict , optional
Additional parameters to pass to boto3 ' s initiate _ multipart _ upload function .
For writing only .""" | logger . debug ( '%r' , locals ( ) )
if mode not in MODES :
raise NotImplementedError ( 'bad mode: %r expected one of %r' % ( mode , MODES ) )
if resource_kwargs is None :
resource_kwargs = { }
if multipart_upload_kwargs is None :
multipart_upload_kwargs = { }
if mode == READ_BINARY :
fileobj = SeekableBufferedInputBase ( bucket_id , key_id , buffer_size = buffer_size , session = session , resource_kwargs = resource_kwargs , )
elif mode == WRITE_BINARY :
fileobj = BufferedOutputBase ( bucket_id , key_id , min_part_size = min_part_size , session = session , multipart_upload_kwargs = multipart_upload_kwargs , resource_kwargs = resource_kwargs , )
else :
assert False , 'unexpected mode: %r' % mode
return fileobj |
def is_valid ( self , instance ) :
"""Return True if no errors are raised when validating instance .
instance can be a dict ( ie , form . cleaned _ data ) , a form , or a
model instance . If instance is a form , full _ clean ( ) will be
called .""" | errors = self . errors ( instance )
if isinstance ( errors , list ) :
return not any ( errors )
return not bool ( errors ) |
def distcheck ( appname = '' , version = '' , subdir = '' ) :
'''checks if the sources compile ( tarball from ' dist ' )''' | import tempfile , tarfile
if not appname :
appname = Utils . g_module . APPNAME
if not version :
version = Utils . g_module . VERSION
waf = os . path . abspath ( sys . argv [ 0 ] )
tarball = dist ( appname , version )
path = appname + '-' + version
if os . path . exists ( path ) :
shutil . rmtree ( path )
t = tarfile . open ( tarball )
for x in t :
t . extract ( x )
t . close ( )
if subdir :
build_path = os . path . join ( path , subdir )
else :
build_path = path
instdir = tempfile . mkdtemp ( '.inst' , '%s-%s' % ( appname , version ) )
ret = Utils . pproc . Popen ( [ waf , 'configure' , 'build' , 'install' , 'uninstall' , '--destdir=' + instdir ] , cwd = build_path ) . wait ( )
if ret :
raise Utils . WafError ( 'distcheck failed with code %i' % ret )
if os . path . exists ( instdir ) :
raise Utils . WafError ( 'distcheck succeeded, but files were left in %s' % instdir )
shutil . rmtree ( path ) |
def model_results ( self ) -> str :
"""Reads the model . results file""" | with open ( os . path . join ( self . directory , "model.results" ) ) as f :
return f . read ( ) |
def get_environment_vars ( filename ) :
"""Return a dict of environment variables required to run a service under faketime .""" | if sys . platform == "linux" or sys . platform == "linux2" :
return { 'LD_PRELOAD' : path . join ( LIBFAKETIME_DIR , "libfaketime.so.1" ) , 'FAKETIME_SKIP_CMDS' : 'nodejs' , # node doesn ' t seem to work in the current version .
'FAKETIME_TIMESTAMP_FILE' : filename , }
elif sys . platform == "darwin" :
return { 'DYLD_INSERT_LIBRARIES' : path . join ( LIBFAKETIME_DIR , "libfaketime.1.dylib" ) , 'DYLD_FORCE_FLAT_NAMESPACE' : '1' , 'FAKETIME_TIMESTAMP_FILE' : filename , }
else :
raise RuntimeError ( "libfaketime does not support '{}' platform" . format ( sys . platform ) ) |
def list_priority_class ( self , ** kwargs ) :
"""list or watch objects of kind PriorityClass
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ priority _ class ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1PriorityClassList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_priority_class_with_http_info ( ** kwargs )
else :
( data ) = self . list_priority_class_with_http_info ( ** kwargs )
return data |
def copy_config_input_with_inactive ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
copy_config = ET . Element ( "copy_config" )
config = copy_config
input = ET . SubElement ( copy_config , "input" )
with_inactive = ET . SubElement ( input , "with-inactive" , xmlns = "http://tail-f.com/ns/netconf/inactive/1.0" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def merge ( self , schema ) :
"""Merge the contents from the schema . Only objects not already contained
in this schema ' s collections are merged . This is to provide for
bidirectional import which produce cyclic includes .
@ returns : self
@ rtype : L { Schema }""" | for item in schema . attributes . items ( ) :
if item [ 0 ] in self . attributes :
continue
self . all . append ( item [ 1 ] )
self . attributes [ item [ 0 ] ] = item [ 1 ]
for item in schema . elements . items ( ) :
if item [ 0 ] in self . elements :
continue
self . all . append ( item [ 1 ] )
self . elements [ item [ 0 ] ] = item [ 1 ]
for item in schema . types . items ( ) :
if item [ 0 ] in self . types :
continue
self . all . append ( item [ 1 ] )
self . types [ item [ 0 ] ] = item [ 1 ]
for item in schema . groups . items ( ) :
if item [ 0 ] in self . groups :
continue
self . all . append ( item [ 1 ] )
self . groups [ item [ 0 ] ] = item [ 1 ]
for item in schema . agrps . items ( ) :
if item [ 0 ] in self . agrps :
continue
self . all . append ( item [ 1 ] )
self . agrps [ item [ 0 ] ] = item [ 1 ]
schema . merged = True
return self |
def quast_general_stats_table ( self ) :
"""Take the parsed stats from the QUAST report and add some to the
General Statistics table at the top of the report""" | headers = OrderedDict ( )
headers [ 'N50' ] = { 'title' : 'N50 ({})' . format ( self . contig_length_suffix ) , 'description' : 'N50 is the contig length such that using longer or equal length contigs produces half (50%) of the bases of the assembly (kilo base pairs)' , 'min' : 0 , 'suffix' : self . contig_length_suffix , 'scale' : 'RdYlGn' , 'modify' : lambda x : x * self . contig_length_multiplier }
headers [ 'Total length' ] = { 'title' : 'Length ({})' . format ( self . total_length_suffix ) , 'description' : 'The total number of bases in the assembly (mega base pairs).' , 'min' : 0 , 'suffix' : self . total_length_suffix , 'scale' : 'YlGn' , 'modify' : lambda x : x * self . total_length_multiplier }
self . general_stats_addcols ( self . quast_data , headers ) |
def add ( self , data_source , module , package = None ) :
"""Add data _ source to model . Tries to import module , then looks for data
source class definition .
: param data _ source : Name of data source to add .
: type data _ source : str
: param module : Module in which data source resides . Can be absolute or
relative . See : func : ` importlib . import _ module `
: type module : str
: param package : Optional , but must be used if module is relative .
: type package : str
. . seealso : :
: func : ` importlib . import _ module `""" | super ( Data , self ) . add ( data_source , module , package )
# only update layer info if it is missing !
if data_source not in self . layer : # copy data source parameters to : attr : ` Layer . layer `
self . layer [ data_source ] = { 'module' : module , 'package' : package }
# add a place holder for the data source object when it ' s constructed
self . objects [ data_source ] = None |
def main ( self ) :
"""Scheduler steps :
- run ready until exhaustion
- if there ' s something scheduled
- run overdue scheduled immediately
- or if there ' s nothing registered , sleep until next scheduled
and then go back to ready
- if there ' s nothing registered and nothing scheduled , we ' ve
deadlocked , so stopped
- poll on registered , with timeout of next scheduled , if something
is scheduled""" | while True :
while self . ready :
task , a = self . ready . popleft ( )
self . run_task ( task , * a )
if self . scheduled :
timeout = self . scheduled . timeout ( )
# run overdue scheduled immediately
if timeout < 0 :
task , a = self . scheduled . pop ( )
self . run_task ( task , * a )
continue
# if nothing registered , just sleep until next scheduled
if not self . registered :
time . sleep ( timeout )
task , a = self . scheduled . pop ( )
self . run_task ( task , * a )
continue
else :
timeout = - 1
# TODO : add better handling for deadlock
if not self . registered :
self . stopped . send ( True )
return
# run poll
events = None
try :
events = self . poll . poll ( timeout = timeout )
# IOError from a signal interrupt
except IOError :
pass
if events :
self . spawn ( self . dispatch_events , events ) |
def headerData ( self , section , orientation , role ) :
"""Get the text to put in the header of the levels of the indexes .
By default it returns ' Index i ' , where i is the section in the index""" | if role == Qt . TextAlignmentRole :
if orientation == Qt . Horizontal :
return Qt . AlignCenter | Qt . AlignBottom
else :
return Qt . AlignRight | Qt . AlignVCenter
if role != Qt . DisplayRole and role != Qt . ToolTipRole :
return None
if self . model . header_shape [ 0 ] <= 1 and orientation == Qt . Horizontal :
if self . model . name ( 1 , section ) :
return self . model . name ( 1 , section )
return _ ( 'Index' )
elif self . model . header_shape [ 0 ] <= 1 :
return None
elif self . model . header_shape [ 1 ] <= 1 and orientation == Qt . Vertical :
return None
return _ ( 'Index' ) + ' ' + to_text_string ( section ) |
def needs_invalidation ( self , requirement , cache_file ) :
"""Check whether a cached binary distribution needs to be invalidated .
: param requirement : A : class : ` . Requirement ` object .
: param cache _ file : The pathname of a cached binary distribution ( a string ) .
: returns : : data : ` True ` if the cached binary distribution needs to be
invalidated , : data : ` False ` otherwise .""" | if self . config . trust_mod_times :
return requirement . last_modified > os . path . getmtime ( cache_file )
else :
checksum = self . recall_checksum ( cache_file )
return checksum and checksum != requirement . checksum |
def frac_vol_floc_initial ( ConcAluminum , ConcClay , coag , material ) :
"""Return the volume fraction of flocs initially present , accounting for both suspended particles and coagulant precipitates .
: param ConcAluminum : Concentration of aluminum in solution
: type ConcAluminum : float
: param ConcClay : Concentration of particle in suspension
: type ConcClay : float
: param coag : Type of coagulant in solution
: type coag : float
: param material : Type of particles in suspension , e . g . floc _ model . Clay
: type material : floc _ model . Material
: return : Volume fraction of particles initially present
: rtype : float""" | return ( ( conc_precipitate ( ConcAluminum , coag ) . magnitude / coag . PrecipDensity ) + ( ConcClay / material . Density ) ) |
def copy ( self ) :
"""Make a copy of this runnable .
@ return : Copy of this runnable .
@ rtype : lems . sim . runnable . Runnable""" | if self . debug :
print ( "Coping....." + self . id )
r = Runnable ( self . id , self . component , self . parent )
copies = dict ( )
# Copy simulation time parameters
r . time_step = self . time_step
r . time_completed = self . time_completed
r . time_total = self . time_total
# Plasticity and state stack ( ? )
r . plastic = self . plastic
r . state_stack = Stack ( )
# Copy variables ( GG - Faster using the add _ * methods ? )
for v in self . instance_variables :
r . instance_variables . append ( v )
r . __dict__ [ v ] = self . __dict__ [ v ]
r . __dict__ [ v + '_shadow' ] = self . __dict__ [ v + '_shadow' ]
for v in self . derived_variables :
r . derived_variables . append ( v )
r . __dict__ [ v ] = self . __dict__ [ v ]
r . __dict__ [ v + '_shadow' ] = self . __dict__ [ v + '_shadow' ]
# Copy array elements
for child in self . array :
child_copy = child . copy ( )
child_copy . parent = r
r . array . append ( child_copy )
copies [ child . uid ] = child_copy
# Copy attachment def
for att in self . attachments :
atn = self . attachments [ att ]
r . attachments [ att ] = atn
r . __dict__ [ atn ] = [ ]
# Copy children
for uid in self . uchildren :
child = self . uchildren [ uid ]
child_copy = child . copy ( )
child_copy . parent = r
copies [ child . uid ] = child_copy
r . add_child ( child_copy . id , child_copy )
# For typerefs
try :
idx = [ k for k in self . __dict__ if self . __dict__ [ k ] == child ] [ 0 ]
r . __dict__ [ idx ] = child_copy
except :
pass
# For groups and attachments :
try :
idx = [ k for k in self . __dict__ if child in self . __dict__ [ k ] ] [ 0 ]
if idx not in r . __dict__ :
r . __dict__ [ idx ] = [ ]
r . __dict__ [ idx ] . append ( child_copy )
except :
pass
# Copy event ports
for port in self . event_in_ports :
r . event_in_ports . append ( port )
r . event_in_counters [ port ] = 0
for port in self . event_out_ports :
r . event_out_ports . append ( port )
r . event_out_callbacks [ port ] = self . event_out_callbacks [ port ]
for ec in r . component . structure . event_connections :
if self . debug :
print ( "--- Fixing event_connection: %s in %s" % ( ec . toxml ( ) , id ( r ) ) )
source = r . parent . resolve_path ( ec . from_ )
target = r . parent . resolve_path ( ec . to )
if ec . receiver : # Will throw error . . .
receiver_template = self . build_runnable ( ec . receiver , target )
# receiver = copy . deepcopy ( receiver _ template )
receiver = receiver_template . copy ( )
receiver . id = "{0}__{1}__" . format ( component . id , receiver_template . id )
if ec . receiver_container :
target . add_attachment ( receiver , ec . receiver_container )
target . add_child ( receiver_template . id , receiver )
target = receiver
else :
source = r . resolve_path ( ec . from_ )
target = r . resolve_path ( ec . to )
source_port = ec . source_port
target_port = ec . target_port
if not source_port :
if len ( source . event_out_ports ) == 1 :
source_port = source . event_out_ports [ 0 ]
else :
raise SimBuildError ( ( "No source event port " "uniquely identifiable" " in '{0}'" ) . format ( source . id ) )
if not target_port :
if len ( target . event_in_ports ) == 1 :
target_port = target . event_in_ports [ 0 ]
else :
raise SimBuildError ( ( "No destination event port " "uniquely identifiable " "in '{0}'" ) . format ( target ) )
if self . debug :
print ( "register_event_out_callback\n Source: %s, %s (port: %s) \n -> %s, %s (port: %s)" % ( source , id ( source ) , source_port , target , id ( target ) , target_port ) )
source . register_event_out_callback ( source_port , lambda : target . inc_event_in ( target_port ) )
# Copy methods
if getattr ( self , "update_kinetic_scheme" , None ) :
r . update_kinetic_scheme = self . update_kinetic_scheme
if getattr ( self , "run_startup_event_handlers" , None ) :
r . run_startup_event_handlers = self . run_startup_event_handlers
if getattr ( self , "run_preprocessing_event_handlers" , None ) :
r . run_preprocessing_event_handlers = self . run_preprocessing_event_handlers
if getattr ( self , "run_postprocessing_event_handlers" , None ) :
r . run_postprocessing_event_handlers = self . run_postprocessing_event_handlers
if getattr ( self , "update_state_variables" , None ) :
r . update_state_variables = self . update_state_variables
if getattr ( self , "update_derived_variables" , None ) :
r . update_derived_variables = self . update_derived_variables
# r . update _ shadow _ variables = self . update _ shadow _ variables
if getattr ( self , "update_derived_parameters" , None ) :
r . update_derived_parameters = self . update_derived_parameters
for rn in self . regimes :
r . add_regime ( self . regimes [ rn ] )
r . current_regime = self . current_regime
# Copy groups
for gn in self . groups :
g = self . __dict__ [ gn ]
for c in g :
if c . uid in copies :
r . add_child_to_group ( gn , copies [ c . uid ] )
else :
c2 = c . copy ( )
c2 . parent = r
copies [ c . uid ] = c2
r . add_child_to_group ( gn , c2 )
# Copy remaining runnable references .
for k in self . __dict__ :
if k == 'parent' :
continue
c = self . __dict__ [ k ]
if isinstance ( c , Runnable ) :
if c . uid in copies :
r . __dict__ [ k ] = copies [ c . uid ]
else :
c2 = c . copy ( )
c2 . parent = r
copies [ c . uid ] = c2
r . __dict__ [ k ] = c2
# Copy text fields
for k in self . __dict__ :
if not k in r . __dict__ :
c = self . __dict__ [ k ]
if self . debug :
print ( "Adding remaining field: %s = %s" % ( k , c ) )
r . __dict__ [ k ] = c
if self . debug :
print ( '########################################' )
keys = list ( self . __dict__ . keys ( ) )
keys . sort ( )
print ( len ( keys ) )
for k in keys :
print ( k , self . __dict__ [ k ] )
print ( '----------------------------------------' )
keys = list ( r . __dict__ . keys ( ) )
keys . sort ( )
print ( len ( keys ) )
for k in keys :
print ( k , r . __dict__ [ k ] )
print ( '########################################' )
print ( '' )
print ( '' )
print ( '' )
print ( '' )
if self . debug :
print ( "Finished coping..." + self . id )
return r |
def retry ( f , exc_classes = DEFAULT_EXC_CLASSES , logger = None , retry_log_level = logging . INFO , retry_log_message = "Connection broken in '{f}' (error: '{e}'); " "retrying with new connection." , max_failures = None , interval = 0 , max_failure_log_level = logging . ERROR , max_failure_log_message = "Max retries reached for '{f}'. Aborting." ) :
"""Decorator to automatically reexecute a function if the connection is
broken for any reason .""" | exc_classes = tuple ( exc_classes )
@ wraps ( f )
def deco ( * args , ** kwargs ) :
failures = 0
while True :
try :
return f ( * args , ** kwargs )
except exc_classes as e :
if logger is not None :
logger . log ( retry_log_level , retry_log_message . format ( f = f . func_name , e = e ) )
gevent . sleep ( interval )
failures += 1
if max_failures is not None and failures > max_failures :
if logger is not None :
logger . log ( max_failure_log_level , max_failure_log_message . format ( f = f . func_name , e = e ) )
raise
return deco |
def rapl_read ( ) :
"""Read power stats and return dictionary""" | basenames = glob . glob ( '/sys/class/powercap/intel-rapl:*/' )
basenames = sorted ( set ( { x for x in basenames } ) )
pjoin = os . path . join
ret = list ( )
for path in basenames :
name = None
try :
name = cat ( pjoin ( path , 'name' ) , fallback = None , binary = False )
except ( IOError , OSError , ValueError ) as err :
logging . warning ( "ignoring %r for file %r" , ( err , path ) , RuntimeWarning )
continue
if name :
try :
current = cat ( pjoin ( path , 'energy_uj' ) )
max_reading = 0.0
ret . append ( RaplStats ( name , float ( current ) , max_reading ) )
except ( IOError , OSError , ValueError ) as err :
logging . warning ( "ignoring %r for file %r" , ( err , path ) , RuntimeWarning )
return ret |
def fromElement ( cls , elem ) :
"""Read properties from a MetaDataVersion element
: param lxml . etree . _ Element elem : Source etree Element""" | self = cls ( )
self . oid = elem . get ( "OID" )
self . name = elem . get ( "Name" )
return self |
def add_url ( self ) :
"""Add non - empty URLs to the queue .""" | if self . url :
self . url_data . add_url ( self . url , line = self . parser . CurrentLineNumber , column = self . parser . CurrentColumnNumber )
self . url = u"" |
def create_parser ( ) :
"""construct the program options""" | parser = argparse . ArgumentParser ( prog = constants . PROGRAM_NAME , description = constants . PROGRAM_DESCRIPTION )
parser . add_argument ( "-cd" , "--%s" % constants . LABEL_CONFIG_DIR , help = "the directory for configuration file lookup" , )
parser . add_argument ( "-c" , "--%s" % constants . LABEL_CONFIG , help = "the dictionary file" )
parser . add_argument ( "-td" , "--%s" % constants . LABEL_TMPL_DIRS , nargs = "*" , help = "the directories for template file lookup" , )
parser . add_argument ( "-t" , "--%s" % constants . LABEL_TEMPLATE , help = "the template file" )
parser . add_argument ( "-o" , "--%s" % constants . LABEL_OUTPUT , help = "the output file" )
parser . add_argument ( "--%s" % constants . LABEL_TEMPLATE_TYPE , help = "the template type, default is jinja2" , )
parser . add_argument ( "-f" , action = "store_true" , dest = constants . LABEL_FORCE , default = False , help = "force moban to template all files despite of .moban.hashes" , )
parser . add_argument ( "--%s" % constants . LABEL_EXIT_CODE , action = "store_true" , dest = constants . LABEL_EXIT_CODE , default = False , help = "tell moban to change exit code" , )
parser . add_argument ( "-m" , "--%s" % constants . LABEL_MOBANFILE , help = "custom moban file" )
parser . add_argument ( "-g" , "--%s" % constants . LABEL_GROUP , help = "a subset of targets" )
parser . add_argument ( constants . POSITIONAL_LABEL_TEMPLATE , metavar = "template" , type = str , nargs = "?" , help = "string templates" , )
parser . add_argument ( "-v" , "--%s" % constants . LABEL_VERSION , action = "version" , version = "%(prog)s {v}" . format ( v = __version__ ) , )
return parser |
def get_unique_object_contents ( self , location : str ) -> Tuple [ bool , str , Union [ str , Dict [ str , str ] ] ] :
"""Utility method to find a unique singlefile or multifile object .
This method throws
* ObjectNotFoundOnFileSystemError if no file is found
* ObjectPresentMultipleTimesOnFileSystemError if the object is found multiple times ( for example with
several file extensions , or as a file AND a folder )
* IllegalContentNameError if a multifile child name is None or empty string .
It relies on the abstract methods of this class ( find _ simpleobject _ file _ occurrences and
find _ multifile _ object _ children ) to find the various files present .
: param location : a location identifier compliant with the provided file mapping configuration
: return : [ True , singlefile _ ext , singlefile _ path ] if a unique singlefile object is present ;
False , MULTIFILE _ EXT , complexobject _ attributes _ found ] if a unique multifile object is present , with
complexobject _ attributes _ found being a dictionary { name : location }""" | # First check what is present on the filesystem according to the filemapping
simpleobjects_found = self . find_simpleobject_file_occurrences ( location )
complexobject_attributes_found = self . find_multifile_object_children ( location , no_errors = True )
# Then handle the various cases
if len ( simpleobjects_found ) > 1 or ( len ( simpleobjects_found ) == 1 and len ( complexobject_attributes_found ) > 0 ) : # the object is present several times > error
u = simpleobjects_found
u . update ( complexobject_attributes_found )
raise ObjectPresentMultipleTimesOnFileSystemError . create ( location , list ( u . keys ( ) ) )
elif len ( simpleobjects_found ) == 1 : # a singlefile object > create the output
is_single_file = True
ext = list ( simpleobjects_found . keys ( ) ) [ 0 ]
singlefile_object_file_path = simpleobjects_found [ ext ]
return is_single_file , ext , singlefile_object_file_path
elif len ( complexobject_attributes_found ) > 0 : # a multifile object > create the output
is_single_file = False
ext = MULTIFILE_EXT
if '' in complexobject_attributes_found . keys ( ) or None in complexobject_attributes_found . keys ( ) :
raise IllegalContentNameError . create ( location , complexobject_attributes_found [ MULTIFILE_EXT ] )
return is_single_file , ext , complexobject_attributes_found
else : # handle special case of multifile object with no children ( if applicable )
if self . is_multifile_object_without_children ( location ) :
is_single_file = False
ext = MULTIFILE_EXT
return is_single_file , ext , dict ( )
else : # try if by any chance the issue is that location has an extension
loc_without_ext = splitext ( location ) [ 0 ]
simpleobjects_found = self . find_simpleobject_file_occurrences ( loc_without_ext )
complexobject_attributes_found = self . find_multifile_object_children ( loc_without_ext , no_errors = True )
# the object was not found in a form that can be parsed
raise ObjectNotFoundOnFileSystemError . create ( location , simpleobjects_found , complexobject_attributes_found ) |
def viable_source_types_for_generator ( generator ) :
"""Caches the result of ' viable _ source _ types _ for _ generator ' .""" | assert isinstance ( generator , Generator )
if generator not in __viable_source_types_cache :
__vstg_cached_generators . append ( generator )
__viable_source_types_cache [ generator ] = viable_source_types_for_generator_real ( generator )
return __viable_source_types_cache [ generator ] |
def assertDateTimesPast ( self , sequence , strict = True , msg = None ) :
'''Fail if any elements in ` ` sequence ` ` are not in the past .
If the max element is a datetime , " past " is defined as anything
prior to ` ` datetime . now ( ) ` ` ; if the max element is a date ,
" past " is defined as anything prior to ` ` date . today ( ) ` ` .
If ` ` strict = True ` ` , fail unless all elements in ` ` sequence ` `
are strictly less than ` ` date . today ( ) ` ` ( or ` ` datetime . now ( ) ` ` ) .
If ` ` strict = False ` ` , fail unless all elements in ` ` sequence ` `
are less than or equal to ` ` date . today ( ) ` ` ( or
` ` datetime . now ( ) ` ` ) .
Parameters
sequence : iterable
strict : bool
msg : str
If not provided , the : mod : ` marbles . mixins ` or
: mod : ` unittest ` standard message will be used .
Raises
TypeError
If ` ` sequence ` ` is not iterable .
TypeError
If max element in ` ` sequence ` ` is not a datetime or date
object .''' | if not isinstance ( sequence , collections . Iterable ) :
raise TypeError ( 'First argument is not iterable' )
# Cannot compare datetime to date , so if dates are provided use
# date . today ( ) , if datetimes are provided use datetime . today ( )
if isinstance ( max ( sequence ) , datetime ) :
target = datetime . today ( )
elif isinstance ( max ( sequence ) , date ) :
target = date . today ( )
else :
raise TypeError ( 'Expected iterable of datetime or date objects' )
self . assertDateTimesBefore ( sequence , target , strict = strict , msg = msg ) |
def inspect ( object ) :
"""A better dir ( ) showing attributes and values""" | for k in dir ( object ) :
try :
details = getattr ( object , k )
except Exception as e :
details = e
try :
details = str ( details )
except Exception as e :
details = e
print ( "{}: {}" . format ( k , details ) , file = sys . stderr ) |
def warn ( self , key ) :
"""Returns True if the warning setting is enabled .""" | return ( not self . quiet and not self . warn_none and ( self . warn_all or getattr ( self , "warn_%s" % key ) ) ) |
def visit_arguments ( self , node : AST , dfltChaining : bool = True ) -> str :
"""Return ` node ` s representation as argument list .""" | args = node . args
dflts = node . defaults
vararg = node . vararg
kwargs = node . kwonlyargs
kwdflts = node . kw_defaults
kwarg = node . kwarg
self . compact = True
n_args_without_dflt = len ( args ) - len ( dflts )
args_src = ( arg . arg for arg in args [ : n_args_without_dflt ] )
dflts_src = ( f"{arg.arg}={self.visit(dflt)}" for arg , dflt in zip ( args [ n_args_without_dflt : ] , dflts ) )
vararg_src = ( f"*{vararg.arg}" , ) if vararg else ( )
kwargs_src = ( ( f"{kw.arg}={self.visit(dflt)}" if dflt is not None else f"{kw.arg}" ) for kw , dflt in zip ( kwargs , kwdflts ) )
kwarg_src = ( f"**{kwarg.arg}" , ) if kwarg else ( )
src = ', ' . join ( chain ( args_src , dflts_src , vararg_src , kwargs_src , kwarg_src ) )
self . compact = False
return src |
def list_only ( ) :
"""List - Mode : Retrieve and display data then exit .""" | ( cred , providers ) = config_read ( )
conn_objs = cld . get_conns ( cred , providers )
nodes = cld . get_data ( conn_objs , providers )
node_dict = make_node_dict ( nodes , "name" )
table . indx_table ( node_dict ) |
def get_all_email_receivers_of_recurring ( self , recurring_id ) :
"""Get all email receivers of recurring
This will iterate over all pages until it gets all elements .
So if the rate limit exceeded it will throw an Exception and you will get nothing
: param recurring _ id : the recurring id
: return : list""" | return self . _iterate_through_pages ( get_function = self . get_email_receivers_of_recurring_per_page , resource = RECURRING_EMAIL_RECEIVERS , ** { 'recurring_id' : recurring_id } ) |
def extract_bzip2 ( archive , compression , cmd , verbosity , interactive , outdir ) :
"""Extract a BZIP2 archive with the bz2 Python module .""" | targetname = util . get_single_outfile ( outdir , archive )
try :
with bz2 . BZ2File ( archive ) as bz2file :
with open ( targetname , 'wb' ) as targetfile :
data = bz2file . read ( READ_SIZE_BYTES )
while data :
targetfile . write ( data )
data = bz2file . read ( READ_SIZE_BYTES )
except Exception as err :
msg = "error extracting %s to %s: %s" % ( archive , targetname , err )
raise util . PatoolError ( msg )
return None |
def pop ( self ) :
"""Pops a task off the front of the queue & runs it .
Typically , you ' ll favor using a ` ` Worker ` ` to handle processing the
queue ( to constantly consume ) . However , if you need to custom - process
the queue in - order , this method is useful .
Ex : :
# Tasks were previously added , maybe by a different process or
# machine . . .
finished _ topmost _ task = gator . pop ( )
: returns : The completed ` ` Task ` ` instance""" | data = self . backend . pop ( self . queue_name )
if data :
task = self . task_class . deserialize ( data )
return self . execute ( task ) |
def cancelMarketData ( self , contracts = None ) :
"""Cancel streaming market data for contract
https : / / www . interactivebrokers . com / en / software / api / apiguide / java / cancelmktdata . htm""" | if contracts == None :
contracts = list ( self . contracts . values ( ) )
elif not isinstance ( contracts , list ) :
contracts = [ contracts ]
for contract in contracts : # tickerId = self . tickerId ( contract . m _ symbol )
tickerId = self . tickerId ( self . contractString ( contract ) )
self . ibConn . cancelMktData ( tickerId = tickerId ) |
def use_mock ( self , mock , * args , ** kwarg ) :
"""Context manager or decorator in order to use a coroutine as mock of service
endpoint in a test .
: param mock : Coroutine to use as mock . It should behave like : meth : ` ~ ClientSession . request ` .
: type mock : coroutine
: param service _ name : Name of service where you want to use mock . If None it will be used
as soon as possible .
: type service _ name : str
: param endpoint : Endpoint where you want to use mock . If None it will be used
as soon as possible .
: type endpoint : str
: param offset : Times it must be ignored before use . Default 0 . Only positive integers .
: type offset : int
: param limit : Times it could be used . Default 1 . 0 means no limit . Only positive integers .
: type limit : int
: return : UseMockDefinition""" | return UseMockDefinition ( mock , self , * args , ** kwarg ) |
def main ( ) :
"""Core function for the script""" | commands = [ 'update' , 'list' , 'get' , 'info' , 'count' , 'search' , 'download' ]
parser = argparse . ArgumentParser ( description = "Command line access to software repositories for TI calculators, primarily ticalc.org and Cemetech" )
parser . add_argument ( "action" , metavar = "ACTION" , type = str , help = "The calcpkg command to execute (count, get, info, list, update)" )
parser . add_argument ( "string" , metavar = "STRING" , type = str , help = "The string to search for when using count, get, info, or list commands" , nargs = "?" , default = "" )
parser . add_argument ( "-c" , "--category" , dest = "category" , help = "Limit searching to a specified category" , default = "" )
parser . add_argument ( "-e" , "--extension" , dest = "extension" , help = "Limit searching to a specified file extension" , default = "" )
parser . add_argument ( "-f" , "--filename" , dest = "searchFiles" , action = "store_true" , help = "Search by archive filenames rather than descriptive package name" )
parser . add_argument ( "-g" , "--game" , dest = "game" , action = "store_true" , help = "Limit searching to games only" )
parser . add_argument ( "-m" , "--math" , dest = "math" , action = "store_true" , help = "Limit searching to math and science programs only" )
parser . add_argument ( "-r" , "--repository" , dest = "repo" , help = "Limit searching by one repository- default is to use all" , default = "" )
parser . add_argument ( "-v" , "--verbose" , dest = "verbose" , action = "store_true" , help = "Always provide verbose output" )
parser . add_argument ( "-x" , "--extract" , dest = "extract" , action = "store_true" , help = "After downloading, autoextract archive files when possible" )
parser . add_argument ( "-y" , "--assume-yes" , dest = "prompt" , action = "store_false" , help = "Never prompt for verification of command" )
args = parser . parse_args ( )
# Verify that a valid command was specified
if not args . action in commands :
print "Error: Invalid action specified, action must be one of " + str ( commands )
return
# args . category is special
if args . category != "" :
category = "/" + args . category + "/"
else :
category = ""
# Initialize repositories ; all behind - the - scene processing is done by plugins in calcrepo . repos
repositories = createRepoObjects ( )
if args . repo != "" :
for repoName , repository in repositories . iteritems ( ) :
if repoName != args . repo :
repositories [ repoName ] = None
# Now , run commands for each repo
for name , repository in repositories . iteritems ( ) :
if repository != None :
repository . setRepoData ( args . string , category , args . extension , args . math , args . game , args . searchFiles )
if args . action == "update" :
repository . updateRepoIndexes ( args . verbose )
elif ( args . action == "list" or args . action == "search" ) :
repository . searchIndex ( )
elif ( args . action == "get" or args . action == "download" ) :
repository . searchIndex ( )
repository . downloadFiles ( args . prompt , args . extract )
elif args . action == "info" :
repository . getFileInfos ( )
elif args . action == "count" :
repository . countIndex ( ) |
def construct_1d_arraylike_from_scalar ( value , length , dtype ) :
"""create a np . ndarray / pandas type of specified shape and dtype
filled with values
Parameters
value : scalar value
length : int
dtype : pandas _ dtype / np . dtype
Returns
np . ndarray / pandas type of length , filled with value""" | if is_datetime64tz_dtype ( dtype ) :
from pandas import DatetimeIndex
subarr = DatetimeIndex ( [ value ] * length , dtype = dtype )
elif is_categorical_dtype ( dtype ) :
from pandas import Categorical
subarr = Categorical ( [ value ] * length , dtype = dtype )
else :
if not isinstance ( dtype , ( np . dtype , type ( np . dtype ) ) ) :
dtype = dtype . dtype
if length and is_integer_dtype ( dtype ) and isna ( value ) : # coerce if we have nan for an integer dtype
dtype = np . dtype ( 'float64' )
elif isinstance ( dtype , np . dtype ) and dtype . kind in ( "U" , "S" ) : # we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = object
if not isna ( value ) :
value = to_str ( value )
subarr = np . empty ( length , dtype = dtype )
subarr . fill ( value )
return subarr |
def make_wheel_filename_generic ( wheel ) :
"""Wheel filenames contain the python version and the python ABI version
for the wheel . https : / / www . python . org / dev / peps / pep - 0427 / # file - name - convention
Since we ' re distributing a rust binary this doesn ' t matter for us . . .""" | name , version , python , abi , platform = wheel . split ( "-" )
# our binary handles multiple abi / versions of python
python , abi = "py2.py3" , "none"
# hack , lets pretend to be manylinux1 so we can do a binary distribution
if platform == "linux_x86_64.whl" :
platform = "manylinux1_x86_64.whl"
elif platform == "linux_i686.whl" :
platform = "manylinux1_i686.whl"
return "-" . join ( ( name , version , python , abi , platform ) ) |
def get_working_days_delta ( self , start , end ) :
"""Return the number of working day between two given dates .
The order of the dates provided doesn ' t matter .
In the following example , there are 5 days , because of the week - end :
> > > cal = WesternCalendar ( ) # does not include easter monday
> > > day1 = date ( 2018 , 3 , 29)
> > > day2 = date ( 2018 , 4 , 5)
> > > cal . get _ working _ days _ delta ( day1 , day2)
In France , April 1st 2018 is a holiday because it ' s Easter monday :
> > > cal = France ( )
> > > cal . get _ working _ days _ delta ( day1 , day2)
This method should even work if your ` ` start ` ` and ` ` end ` ` arguments
are datetimes .""" | start = cleaned_date ( start )
end = cleaned_date ( end )
if start == end :
return 0
if start > end :
start , end = end , start
# Starting count here
count = 0
while start < end :
start += timedelta ( days = 1 )
if self . is_working_day ( start ) :
count += 1
return count |
def version ( self ) :
"""Return kernel and btrfs version .""" | return dict ( buttersink = theVersion , btrfs = self . butterStore . butter . btrfsVersion , linux = platform . platform ( ) , ) |
def parse_objective_coefficient ( entry ) :
"""Return objective value for reaction entry .
Detect objectives that are specified using the non - standardized
kinetic law parameters which are used by many pre - FBC SBML models . The
objective coefficient is returned for the given reaction , or None if
undefined .
Args :
entry : : class : ` SBMLReactionEntry ` .""" | for parameter in entry . kinetic_law_reaction_parameters :
pid , name , value , units = parameter
if ( pid == 'OBJECTIVE_COEFFICIENT' or name == 'OBJECTIVE_COEFFICIENT' ) :
return value
return None |
def killCellRegion ( self , centerColumn , radius ) :
"""Kill cells around a centerColumn , within radius""" | self . deadCols = topology . wrappingNeighborhood ( centerColumn , radius , self . _columnDimensions )
self . deadColumnInputSpan = self . getConnectedSpan ( self . deadCols )
self . removeDeadColumns ( ) |
def filter ( self , u ) :
"""Filter the valid identities for this matcher .
: param u : unique identity which stores the identities to filter
: returns : a list of identities valid to work with this matcher .
: raises ValueError : when the unique identity is not an instance
of UniqueIdentity class""" | if not isinstance ( u , UniqueIdentity ) :
raise ValueError ( "<u> is not an instance of UniqueIdentity" )
filtered = [ ]
for id_ in u . identities :
email = None
if self . sources and id_ . source . lower ( ) not in self . sources :
continue
if self . strict :
if self . _check_email ( id_ . email ) :
email = id_ . email . lower ( )
else :
email = id_ . email . lower ( ) if id_ . email else None
if email :
fid = EmailIdentity ( id_ . id , id_ . uuid , email )
filtered . append ( fid )
return filtered |
def align_and_build_tree ( seqs , moltype , best_tree = False , params = None ) :
"""Returns an alignment and a tree from Sequences object seqs .
seqs : a cogent . core . alignment . SequenceCollection object , or data that can
be used to build one .
moltype : cogent . core . moltype . MolType object
best _ tree : if True ( default : False ) , uses a slower but more accurate
algorithm to build the tree .
params : dict of parameters to pass in to the Muscle app controller .
The result will be a tuple containing a cogent . core . alignment . Alignment
and a cogent . core . tree . PhyloNode object ( or None for the alignment
and / or tree if either fails ) .""" | aln = align_unaligned_seqs ( seqs , moltype = moltype , params = params )
tree = build_tree_from_alignment ( aln , moltype , best_tree , params )
return { 'Align' : aln , 'Tree' : tree } |
def _return ( self , ary ) :
"""Wrap the ary to return an Array type""" | if isinstance ( ary , Array ) :
return ary
return Array ( ary , copy = False ) |
def run ( ) :
"""Main script entry to handle the arguments given to the script .""" | _parser_options ( )
set_verbose ( args [ "verbose" ] )
if _check_global_settings ( ) :
_load_db ( )
else :
exit ( - 1 )
# Check the server configuration against the script arguments passed in .
_setup_server ( )
if args [ "rollback" ] :
_server_rollback ( )
okay ( "The server rollback appears to have been successful." )
exit ( 0 )
_server_enable ( )
_list_repos ( )
_handle_install ( )
# This is the workhorse once a successful installation has happened .
_do_cron ( ) |
def _fetch_output_files ( self , retrieved ) :
"""Checks the output folder for standard output and standard error
files , returns their absolute paths on success .
: param retrieved : A dictionary of retrieved nodes , as obtained from the
parser .""" | from aiida . common . datastructures import calc_states
from aiida . common . exceptions import InvalidOperation
import os
# check in order not to overwrite anything
# state = self . _ calc . get _ state ( )
# if state ! = calc _ states . PARSING :
# raise InvalidOperation ( " Calculation not in { } state "
# . format ( calc _ states . PARSING ) )
# Check that the retrieved folder is there
try :
out_folder = retrieved [ self . _calc . _get_linkname_retrieved ( ) ]
except KeyError :
raise IOError ( "No retrieved folder found" )
list_of_files = out_folder . get_folder_list ( )
output_path = None
error_path = None
if self . _calc . _DEFAULT_OUTPUT_FILE in list_of_files :
output_path = os . path . join ( out_folder . get_abs_path ( '.' ) , self . _calc . _DEFAULT_OUTPUT_FILE )
if self . _calc . _DEFAULT_ERROR_FILE in list_of_files :
error_path = os . path . join ( out_folder . get_abs_path ( '.' ) , self . _calc . _DEFAULT_ERROR_FILE )
return output_path , error_path |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.