signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def sync ( infile , outfile ) :
"""Sync ( download ) latest policies from the Anchore . io service .""" | ecode = 0
try :
rc , ret = anchore_policy . sync_policymeta ( bundlefile = infile , outfile = outfile )
if not rc :
anchore_print_err ( ret [ 'text' ] )
ecode = 1
elif outfile and outfile == '-' :
anchore_print ( ret [ 'text' ] )
except Exception as err :
anchore_print_err ( 'operation failed' )
ecode = 1
sys . exit ( ecode ) |
def register_blueprint ( self , blueprint ) :
'''Register given blueprint on curren app .
This method is provided for using inside plugin ' s module - level
: func : ` register _ plugin ` functions .
: param blueprint : blueprint object with plugin endpoints
: type blueprint : flask . Blueprint''' | if blueprint not in self . _blueprint_known :
self . app . register_blueprint ( blueprint )
self . _blueprint_known . add ( blueprint ) |
def store_fw_db_router ( self , tenant_id , net_id , subnet_id , router_id , os_status ) :
"""Store the result of FW router operation in DB .
Calls the service object routine to commit the result of router
operation in to DB , after updating the local cache .""" | serv_obj = self . get_service_obj ( tenant_id )
serv_obj . update_fw_local_router ( net_id , subnet_id , router_id , os_status )
serv_obj . commit_fw_db ( )
serv_obj . commit_fw_db_result ( ) |
def end ( self ) :
"""Finalise lz4 frame , outputting any remaining as return from this function or by writing to fp )""" | with self . __lock :
if self . __write :
self . __write ( compress_end ( self . __ctx ) )
else :
return compress_end ( self . __ctx ) |
def can_fetch ( self , useragent , url ) :
"""Using the parsed robots . txt decide if useragent can fetch url .
@ return : True if agent can fetch url , else False
@ rtype : bool""" | log . debug ( LOG_CHECK , "%r check allowance for:\n user agent: %r\n url: %r ..." , self . url , useragent , url )
if not isinstance ( useragent , str ) :
useragent = useragent . encode ( "ascii" , "ignore" )
if not isinstance ( url , str ) :
url = url . encode ( "ascii" , "ignore" )
if self . disallow_all :
log . debug ( LOG_CHECK , " ... disallow all." )
return False
if self . allow_all :
log . debug ( LOG_CHECK , " ... allow all." )
return True
# search for given user agent matches
# the first match counts
url = urllib . quote ( urlparse . urlparse ( urllib . unquote ( url ) ) [ 2 ] ) or "/"
for entry in self . entries :
if entry . applies_to ( useragent ) :
return entry . allowance ( url )
# try the default entry last
if self . default_entry is not None :
return self . default_entry . allowance ( url )
# agent not found = = > access granted
log . debug ( LOG_CHECK , " ... agent not found, allow." )
return True |
def rebin_scale ( a , scale = 1 ) :
"""Scale an array to a new shape .""" | newshape = tuple ( ( side * scale ) for side in a . shape )
return rebin ( a , newshape ) |
def _aha_request ( self , cmd , ain = None , param = None , rf = str ) :
"""Send an AHA request .""" | url = 'http://' + self . _host + '/webservices/homeautoswitch.lua'
params = { 'switchcmd' : cmd , 'sid' : self . _sid }
if param :
params [ 'param' ] = param
if ain :
params [ 'ain' ] = ain
plain = self . _request ( url , params )
if plain == 'inval' :
raise InvalidError
if rf == bool :
return bool ( int ( plain ) )
return rf ( plain ) |
def create ( cls , data , path = None , defaults = None , overwrite = False , random_id = False , ** kwargs ) :
"""Creates a new database object and stores it in the database
NOTE : The path and defaults parameters to this function are to allow
use of the DatabaseObject class directly . However , this class is
intended for subclassing and children of it should override the PATH
and DEFAULTS attributes rather than passing them as parameters here .
@ param data : dictionary of data that the object should be created with ;
this must follow all mongo rules , as well as have an entry for
ID _ KEY unless random _ id = = True
@ param path : the path of the database to use , in the form
" database . collection "
@ param defaults : the defaults dictionary to use for this object
@ param overwrite : if set to true , will overwrite any object in the
database with the same ID _ KEY ; if set to false will raise an
exception if there is another object with the same ID _ KEY
@ param random _ id : stores the new object with a random value for ID _ KEY ;
overwrites data [ ID _ KEY ]
@ param * * kwargs : ignored
@ raise Exception : if path and self . PATH are None ; the database path
must be defined in at least one of these
@ raise DatabaseConflictError : if there is already an object with that
ID _ KEY and overwrite = = False
@ raise MalformedObjectError : if a REQUIRED key of defaults is missing ,
or if the ID _ KEY of the object is None and random _ id is False""" | self = cls ( path = path , defaults = defaults , _new_object = data )
for key , value in self . items ( ) :
if key == ID_KEY :
continue
if self . DEFAULTS and key not in self . DEFAULTS :
self . _handle_non_default_key ( key , value )
self . _check_type ( key , value )
if random_id and ID_KEY in self :
dict . __delitem__ ( self , ID_KEY )
if not random_id and ID_KEY not in self :
raise MalformedObjectError ( "No " + ID_KEY + " key in item" )
if not random_id and not overwrite and self . _collection . find_one ( { ID_KEY : data [ ID_KEY ] } ) :
raise DatabaseConflictError ( 'ID_KEY "%s" already exists in collection %s' % ( data [ ID_KEY ] , self . PATH ) )
self . _pre_save ( )
if ID_KEY in self and overwrite :
self . _collection . replace_one ( { ID_KEY : self [ ID_KEY ] } , dict ( self ) , upsert = True )
else :
insert_result = self . _collection . insert_one ( dict ( self ) )
dict . __setitem__ ( self , ID_KEY , insert_result . inserted_id )
return self |
def process_insert_get_id ( self , query , sql , values , sequence = None ) :
"""Process an " insert get ID " query .
: param query : A QueryBuilder instance
: type query : QueryBuilder
: param sql : The sql query to execute
: type sql : str
: param values : The value bindings
: type values : list
: param sequence : The ids sequence
: type sequence : str
: return : The inserted row id
: rtype : int""" | result = query . get_connection ( ) . select_from_write_connection ( sql , values )
id = result [ 0 ] [ 0 ]
if isinstance ( id , int ) :
return id
if str ( id ) . isdigit ( ) :
return int ( id )
return id |
def identity ( self ) :
"""A unique identifier for the current visitor .""" | if hasattr ( self . _identity , 'get_identity' ) :
return self . _identity . get_identity ( self . _environ )
return self . _identity ( self . _environ ) |
def find_geo_coords ( s ) :
"""Returns a list of lat / lons found by scanning the given text""" | coords = [ ]
LOG . debug ( "Matching in text size %s" , len ( s ) )
for c in INFO_BOX_LAT_LON . findall ( s ) :
try :
coord = ( float ( c [ 1 ] ) , float ( c [ 2 ] ) )
# , c [ 0 ] )
coords . append ( coord )
LOG . debug ( "Found info box lat/lon: %s" , coord )
except Exception as ex :
LOG . warn ( "Bad parse of info box %s: %s" , c , ex )
for c in COORDS_GEN . findall ( s ) : # Special cases
if skip_coords ( c ) :
LOG . debug ( "Ignorning coords %s" , c )
continue
m = COORDS_GROUPS . search ( c )
if not m :
LOG . warn ( "Unrecognized coord format: %s" , c )
continue
try : # Remove empty optional groups and remove pipes from matches
g = [ ( s [ 0 : - 1 ] if s [ - 1 ] == '|' else s ) for s in list ( m . groups ( ) ) if s is not None and len ( s ) ]
# LOG . info ( " Found groups : % s " , g )
if len ( g ) == 1 : # Single lat | lon
lat , lon = g [ 0 ] . split ( '|' )
coord = ( float ( lat ) , float ( lon ) )
coords . append ( coord )
LOG . debug ( "Found lat|lon: %s" , coord )
elif g [ 3 ] == 'E' or g [ 3 ] == 'W' :
lat = depipe ( g [ 0 ] ) * ( 1 if g [ 1 ] . upper ( ) == 'N' else - 1 )
lon = depipe ( g [ 2 ] ) * ( 1 if g [ 3 ] . upper ( ) == 'E' else - 1 )
coord = ( lat , lon )
coords . append ( coord )
LOG . debug ( "Found lat|NS|lon|EW: %s" , coord )
else :
LOG . warn ( "Unrecognized coord format: %s (parsed %s)" , c , g )
except Exception as ex :
LOG . warn ( "Bad parse of %s: %s" , c , ex )
l = [ ]
for c in set ( coords ) : # Dedupe ; the reality is non - trivial though . . . we want to keep only the most precise
if c [ 0 ] > 90 or c [ 0 ] < - 90 or c [ 1 ] > 180 or c [ 1 ] < - 180 or ( c [ 0 ] == 0 and c [ 1 ] == 0 ) :
LOG . warn ( "Invalid lat or lon: %s" , c )
else :
l . append ( { "type" : "Point" , "coordinates" : ( c [ 1 ] , c [ 0 ] ) } )
# GeoJSON , lon goes first
return l |
def _check_pip_installed ( ) :
"""Invoke ` pip - - version ` and make sure it doesn ' t error .
Use check _ output to capture stdout and stderr
Invokes pip by the same manner that we plan to in _ call _ pip ( )
Don ' t bother trying to reuse _ call _ pip to do this . . . Finnicky and not worth
the effort .""" | try :
subprocess . check_output ( [ sys . executable , "-m" , "pip" , "--version" ] , stderr = subprocess . STDOUT )
return True
except subprocess . CalledProcessError :
return False |
def evaluate_cut ( uncut_subsystem , cut , unpartitioned_ces ) :
"""Compute the system irreducibility for a given cut .
Args :
uncut _ subsystem ( Subsystem ) : The subsystem without the cut applied .
cut ( Cut ) : The cut to evaluate .
unpartitioned _ ces ( CauseEffectStructure ) : The cause - effect structure of
the uncut subsystem .
Returns :
SystemIrreducibilityAnalysis : The | SystemIrreducibilityAnalysis | for
that cut .""" | log . debug ( 'Evaluating %s...' , cut )
cut_subsystem = uncut_subsystem . apply_cut ( cut )
if config . ASSUME_CUTS_CANNOT_CREATE_NEW_CONCEPTS :
mechanisms = unpartitioned_ces . mechanisms
else : # Mechanisms can only produce concepts if they were concepts in the
# original system , or the cut divides the mechanism .
mechanisms = set ( unpartitioned_ces . mechanisms + list ( cut_subsystem . cut_mechanisms ) )
partitioned_ces = ces ( cut_subsystem , mechanisms )
log . debug ( 'Finished evaluating %s.' , cut )
phi_ = ces_distance ( unpartitioned_ces , partitioned_ces )
return SystemIrreducibilityAnalysis ( phi = phi_ , ces = unpartitioned_ces , partitioned_ces = partitioned_ces , subsystem = uncut_subsystem , cut_subsystem = cut_subsystem ) |
def render ( self , form = None , ** kwargs ) :
"""Returns the ` ` HttpResponse ` ` with the context data""" | context = self . get_context ( ** kwargs )
return self . render_to_response ( context ) |
def new ( self , bootstrap_with = None , use_timer = False , with_proof = False ) :
"""Actual constructor of the solver .""" | if not self . maplesat :
self . maplesat = pysolvers . maplechrono_new ( )
if bootstrap_with :
for clause in bootstrap_with :
self . add_clause ( clause )
self . use_timer = use_timer
self . call_time = 0.0
# time spent for the last call to oracle
self . accu_time = 0.0
# time accumulated for all calls to oracle
if with_proof :
self . prfile = tempfile . TemporaryFile ( )
pysolvers . maplechrono_tracepr ( self . maplesat , self . prfile ) |
def _serve_file ( self , path ) :
"""Call Paste ' s FileApp ( a WSGI application ) to serve the file
at the specified path""" | request = self . _py_object . request
request . environ [ 'PATH_INFO' ] = '/%s' % path
return PkgResourcesParser ( 'pylons' , 'pylons' ) ( request . environ , self . start_response ) |
def execute ( self , conn , file_id_list , transaction = False ) :
"""file _ id _ list : file _ id _ list""" | sql = self . sql
binds = { }
if file_id_list :
count = 0
for an_id in file_id_list :
if count > 0 :
sql += ", "
sql += ":file_id_%s" % count
binds . update ( { "file_id_%s" % count : an_id } )
count += 1
sql += ")"
else :
dbsExceptionHandler ( 'dbsException-invalid-input' , "Oracle/FileParentBlock/List. this_file_id not provided" , self . logger . exception )
result = self . dbi . processData ( sql , binds , conn , transaction )
plist = self . formatDict ( result )
return plist |
def parse ( input , server = default_erg_server , params = None , headers = None ) :
"""Request a parse of * input * on * server * and return the response .
Args :
input ( str ) : sentence to be parsed
server ( str ) : the url for the server ( the default LOGON server
is used by default )
params ( dict ) : a dictionary of request parameters
headers ( dict ) : a dictionary of additional request headers
Returns :
A ParseResponse containing the results , if the request was
successful .
Raises :
requests . HTTPError : if the status code was not 200""" | return next ( parse_from_iterable ( [ input ] , server , params , headers ) , None ) |
def save ( self , directory = None ) :
"""Dump the entire contents of the database into the tabledata directory as ascii files""" | from subprocess import call
# If user did not supply a new directory , use the one loaded ( default : tabledata )
if isinstance ( directory , type ( None ) ) :
directory = self . directory
# Create the . sql file is it doesn ' t exist , i . e . if the Database class called a . db file initially
if not os . path . isfile ( self . sqlpath ) :
self . sqlpath = self . dbpath . replace ( '.db' , '.sql' )
os . system ( 'touch {}' . format ( self . sqlpath ) )
# # Write the data to the . sql file
# with open ( self . sqlpath , ' w ' ) as f :
# for line in self . conn . iterdump ( ) :
# f . write ( ' % s \ n ' % line )
# Alternatively . . .
# Write the schema
os . system ( "echo '.output {}\n.schema' | sqlite3 {}" . format ( self . sqlpath , self . dbpath ) )
# Write the table files to the tabledata directory
os . system ( "mkdir -p {}" . format ( directory ) )
tables = self . query ( "select tbl_name from sqlite_master where type='table'" ) [ 'tbl_name' ]
tablepaths = [ self . sqlpath ]
for table in tables :
print ( 'Generating {}...' . format ( table ) )
tablepath = '{0}/{1}.sql' . format ( directory , table )
tablepaths . append ( tablepath )
with open ( tablepath , 'w' ) as f :
for line in self . conn . iterdump ( ) :
line = line . strip ( )
if line . startswith ( 'INSERT INTO "{}"' . format ( table ) ) :
if sys . version_info . major == 2 :
f . write ( u'{}\n' . format ( line ) . encode ( 'utf-8' ) )
else :
f . write ( u'{}\n' . format ( line ) )
print ( "Tables saved to directory {}/" . format ( directory ) )
print ( """=======================================================================================
You can now run git to commit and push these changes, if needed.
For example, if on the master branch you can do the following:
git add {0} {1}/*.sql
git commit -m "COMMIT MESSAGE HERE"
git push origin master
You can then issue a pull request on GitHub to have these changes reviewed and accepted
=======================================================================================""" . format ( self . sqlpath , directory ) ) |
def _pos ( self , k ) :
"""Description :
Position k breaking
Parameters :
k : position k is used for the breaking""" | if k < 2 :
raise ValueError ( "k smaller than 2" )
G = np . zeros ( ( self . m , self . m ) )
for i in range ( self . m ) :
for j in range ( self . m ) :
if i == j :
continue
if i < k or j < k :
continue
if i == k or j == k :
G [ i ] [ j ] = 1
return G |
def psf_convolution ( self , grid , grid_scale , psf_subgrid = False , subgrid_res = 1 ) :
"""convolves a given pixel grid with a PSF""" | psf_type = self . psf_type
if psf_type == 'NONE' :
return grid
elif psf_type == 'GAUSSIAN' :
sigma = self . _sigma_gaussian / grid_scale
img_conv = ndimage . filters . gaussian_filter ( grid , sigma , mode = 'nearest' , truncate = self . _truncation )
return img_conv
elif psf_type == 'PIXEL' :
if psf_subgrid :
kernel = self . subgrid_pixel_kernel ( subgrid_res )
else :
kernel = self . _kernel_pixel
img_conv1 = signal . fftconvolve ( grid , kernel , mode = 'same' )
return img_conv1
else :
raise ValueError ( 'PSF type %s not valid!' % psf_type ) |
def ip_addrs6 ( interface = None , include_loopback = False , cidr = None ) :
'''Returns a list of IPv6 addresses assigned to the host .
interface
Only IP addresses from that interface will be returned .
include _ loopback : False
Include loopback : : 1 IPv6 address .
cidr
Describes subnet using CIDR notation and only IPv6 addresses that belong
to this subnet will be returned .
. . versionchanged : : 2019.2.0
CLI Example :
. . code - block : : bash
salt ' * ' network . ip _ addrs6
salt ' * ' network . ip _ addrs6 cidr = 2000 : : / 3''' | addrs = salt . utils . network . ip_addrs6 ( interface = interface , include_loopback = include_loopback )
if cidr :
return [ i for i in addrs if salt . utils . network . in_subnet ( cidr , [ i ] ) ]
else :
return addrs |
def _create_fulltext_query ( self ) :
"""Support the json - server fulltext search with a broad LIKE filter .""" | filter_by = [ ]
if 'q' in request . args :
columns = flat_model ( model_tree ( self . __class__ . __name__ , self . model_cls ) )
for q in request . args . getlist ( 'q' ) :
filter_by += [ '{col}::like::%{q}%' . format ( col = col , q = q ) for col in columns ]
return filter_by |
def counter ( self , counter_name , default = 0 ) :
"""Get the current counter value .
Args :
counter _ name : name of the counter in string .
default : default value in int if one doesn ' t exist .
Returns :
Current value of the counter .""" | return self . _state . counters_map . get ( counter_name , default ) |
def _init_id2gos ( assoc_fn ) : # # , no _ top = False ) :
"""Reads a gene id go term association file . The format of the file
is as follows :
AAR1GO : 0005575 ; GO : 0003674 ; GO : 0006970 ; GO : 0006970 ; GO : 0040029
AAR2GO : 0005575 ; GO : 0003674 ; GO : 0040029 ; GO : 0009845
ACD5GO : 0005575 ; GO : 0003674 ; GO : 0008219
ACL1GO : 0005575 ; GO : 0003674 ; GO : 0009965 ; GO : 0010073
ACL2GO : 0005575 ; GO : 0003674 ; GO : 0009826
ACL3GO : 0005575 ; GO : 0003674 ; GO : 0009826 ; GO : 0009965
Also , the following format is accepted ( gene ids are repeated ) :
AAR1GO : 0005575
AAR1 GO : 0003674
AAR1 GO : 0006970
AAR2GO : 0005575
AAR2 GO : 0003674
AAR2 GO : 0040029
: param assoc _ fn : file name of the association
: return : dictionary having keys : gene id , values set of GO terms""" | assoc = cx . defaultdict ( set )
# # top _ terms = set ( [ ' GO : 0008150 ' , ' GO : 0003674 ' , ' GO : 0005575 ' ] ) # BP , MF , CC
for row in open ( assoc_fn , 'r' ) :
atoms = row . split ( )
if len ( atoms ) == 2 :
gene_id , go_terms = atoms
elif len ( atoms ) > 2 and row . count ( '\t' ) == 1 :
gene_id , go_terms = row . split ( "\t" )
else :
continue
gos = set ( go_terms . split ( ";" ) )
# # if no _ top :
# # gos = gos . difference ( top _ terms )
assoc [ gene_id ] |= gos
return assoc |
def path_expand ( text ) :
"""returns a string with expanded variable .
: param text : the path to be expanded , which can include ~ and environment $ variables
: param text : string""" | result = os . path . expandvars ( os . path . expanduser ( text ) )
# template = Template ( text )
# result = template . substitute ( os . environ )
if result . startswith ( "." ) :
result = result . replace ( "." , os . getcwd ( ) , 1 )
return result |
def dict_merge ( dct , merge_dct ) :
"""Recursive dict merge . Inspired by : meth : ` ` dict . update ( ) ` ` , instead of
updating only top - level keys , dict _ merge recurses down into dicts nested
to an arbitrary depth , updating keys . The ` ` merge _ dct ` ` is merged into
` ` dct ` ` .
: param dct : dict onto which the merge is executed
: param merge _ dct : dct merged into dct
: return : None""" | for k , v in merge_dct . items ( ) :
if ( k in dct and isinstance ( dct [ k ] , dict ) and isinstance ( v , dict ) ) :
dict_merge ( dct [ k ] , v )
else :
dct [ k ] = v |
def is_tango_object ( arg ) :
"""Return tango data if the argument is a tango object ,
False otherwise .""" | classes = attribute , device_property
if isinstance ( arg , classes ) :
return arg
try :
return arg . __tango_command__
except AttributeError :
return False |
def import_process_template ( self , upload_stream , ignore_warnings = None , ** kwargs ) :
"""ImportProcessTemplate .
[ Preview API ] Imports a process from zip file .
: param object upload _ stream : Stream to upload
: param bool ignore _ warnings : Default value is false
: rtype : : class : ` < ProcessImportResult > < azure . devops . v5_0 . work _ item _ tracking _ process _ template . models . ProcessImportResult > `""" | route_values = { }
route_values [ 'action' ] = 'Import'
query_parameters = { }
if ignore_warnings is not None :
query_parameters [ 'ignoreWarnings' ] = self . _serialize . query ( 'ignore_warnings' , ignore_warnings , 'bool' )
if "callback" in kwargs :
callback = kwargs [ "callback" ]
else :
callback = None
content = self . _client . stream_upload ( upload_stream , callback = callback )
response = self . _send ( http_method = 'POST' , location_id = '29e1f38d-9e9c-4358-86a5-cdf9896a5759' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters , content = content , media_type = 'application/octet-stream' )
return self . _deserialize ( 'ProcessImportResult' , response ) |
def _parse_args ( args : List [ str ] ) -> _SetArgumentsRunConfig :
"""Parses the given CLI arguments to get a run configuration .
: param args : CLI arguments
: return : run configuration derived from the given CLI arguments""" | parser = argparse . ArgumentParser ( prog = "gitlab-set-variables" , description = "Tool for setting a GitLab project's build variables" )
add_common_arguments ( parser , project = True )
parser . add_argument ( "source" , nargs = "+" , type = str , help = "File to source build variables from. Can be a ini file, JSON file or a shell script " "containing 'export' statements" )
arguments = parser . parse_args ( args )
return _SetArgumentsRunConfig ( arguments . source , arguments . project , arguments . url , arguments . token , arguments . debug ) |
def clone ( self , ** keywd ) :
"""constructs new argument _ t instance
return argument _ t (
name = keywd . get ( ' name ' , self . name ) ,
decl _ type = keywd . get ( ' decl _ type ' , self . decl _ type ) ,
default _ value = keywd . get ( ' default _ value ' , self . default _ value ) ,
attributes = keywd . get ( ' attributes ' , self . attributes ) )""" | return argument_t ( name = keywd . get ( 'name' , self . name ) , decl_type = keywd . get ( 'decl_type' , self . decl_type ) , default_value = keywd . get ( 'default_value' , self . default_value ) , attributes = keywd . get ( 'attributes' , self . attributes ) ) |
def grab ( self , * keys : typing . List [ str ] , default_value = None ) -> typing . Tuple :
"""Returns a tuple containing multiple values from the cache specified by
the keys arguments
: param keys :
One or more variable names stored in the cache that should be
returned by the grab function . The order of these arguments are
preserved by the returned tuple .
: param default _ value :
If one or more of the keys is not found within the cache , this
value will be returned as the missing value .
: return :
A tuple containing values for each of the keys specified in the
arguments""" | return tuple ( [ self . fetch ( k , default_value ) for k in keys ] ) |
def standardize_back ( xs , offset , scale ) :
"""This is function for de - standarization of input series .
* * Args : * *
* ` xs ` : standardized input ( 1 dimensional array )
* ` offset ` : offset to add ( float ) .
* ` scale ` : scale ( float ) .
* * Returns : * *
* ` x ` : original ( destandardised ) series""" | try :
offset = float ( offset )
except :
raise ValueError ( 'The argument offset is not None or float.' )
try :
scale = float ( scale )
except :
raise ValueError ( 'The argument scale is not None or float.' )
try :
xs = np . array ( xs , dtype = "float64" )
except :
raise ValueError ( 'The argument xs is not numpy array or similar.' )
return xs * scale + offset |
def getControls ( self ) :
'''Calculates consumption for each consumer of this type using the consumption functions .
Parameters
None
Returns
None''' | cNrmNow = np . zeros ( self . AgentCount ) + np . nan
for t in range ( self . T_cycle ) :
these = t == self . t_cycle
cNrmNow [ these ] = self . solution [ t ] . cFunc ( self . mNrmNow [ these ] , self . PrefShkNow [ these ] )
self . cNrmNow = cNrmNow
return None |
def qstat ( self , queue_name , return_dict = False ) :
"""Return the status of the queue ( currently unimplemented ) .
Future support / testing of QSTAT support in Disque
QSTAT < qname >
Return produced . . . consumed . . . idle . . . sources [ . . . ] ctime . . .""" | rtn = self . execute_command ( 'QSTAT' , queue_name )
if return_dict :
grouped = self . _grouper ( rtn , 2 )
rtn = dict ( ( a , b ) for a , b in grouped )
return rtn |
def add_external_parameter ( self , parameter ) :
"""Add a parameter that comes from something other than a function , to the model .
: param parameter : a Parameter instance
: return : none""" | assert isinstance ( parameter , Parameter ) , "Variable must be an instance of IndependentVariable"
if self . _has_child ( parameter . name ) : # Remove it from the children only if it is a Parameter instance , otherwise don ' t , which will
# make the _ add _ child call fail ( which is the expected behaviour ! You shouldn ' t call two children
# with the same name )
if isinstance ( self . _get_child ( parameter . name ) , Parameter ) :
warnings . warn ( "External parameter %s already exist in the model. Overwriting it..." % parameter . name , RuntimeWarning )
self . _remove_child ( parameter . name )
# This will fail if another node with the same name is already in the model
self . _add_child ( parameter ) |
def get_variants ( self , chromosome = None , start = None , end = None ) :
"""Return all variants in the database
If no region is specified all variants will be returned .
Args :
chromosome ( str )
start ( int )
end ( int )
Returns :
variants ( Iterable ( Variant ) )""" | query = { }
if chromosome :
query [ 'chrom' ] = chromosome
if start :
query [ 'start' ] = { '$lte' : end }
query [ 'end' ] = { '$gte' : start }
LOG . info ( "Find all variants {}" . format ( query ) )
return self . db . variant . find ( query ) . sort ( [ ( 'start' , ASCENDING ) ] ) |
def volume_attach ( provider , names , ** kwargs ) :
'''Attach volume to a server
CLI Example :
. . code - block : : bash
salt minionname cloud . volume _ attach my - nova myblock server _ name = myserver device = ' / dev / xvdf ' ''' | client = _get_client ( )
info = client . extra_action ( provider = provider , names = names , action = 'volume_attach' , ** kwargs )
return info |
def updateMesh ( self , polydata ) :
"""Overwrite the polygonal mesh of the actor with a new one .""" | self . poly = polydata
self . mapper . SetInputData ( polydata )
self . mapper . Modified ( )
return self |
async def on_isupport_invex ( self , value ) :
"""Server allows invite exceptions .""" | if not value :
value = INVITE_EXCEPT_MODE
self . _channel_modes . add ( value )
self . _channel_modes_behaviour [ rfc1459 . protocol . BEHAVIOUR_LIST ] . add ( value ) |
def is_writable_attr ( ext ) :
"""Check if an extension attribute is writable .
ext ( tuple ) : The ( default , getter , setter , method ) tuple available via
{ Doc , Span , Token } . get _ extension .
RETURNS ( bool ) : Whether the attribute is writable .""" | default , method , getter , setter = ext
# Extension is writable if it has a setter ( getter + setter ) , if it has a
# default value ( or , if its default value is none , none of the other values
# should be set ) .
if setter is not None or default is not None or all ( e is None for e in ext ) :
return True
return False |
def project_activity ( index , start , end ) :
"""Compute the metrics for the project activity section of the enriched
github issues index .
Returns a dictionary containing a " metric " key . This key contains the
metrics for this section .
: param index : index object
: param start : start date to get the data from
: param end : end date to get the data upto
: return : dictionary with the value of the metrics""" | results = { "metrics" : [ OpenedIssues ( index , start , end ) , ClosedIssues ( index , start , end ) ] }
return results |
def lines_touch_2D ( ab , cd ) :
'''lines _ touch _ 2D ( ( a , b ) , ( c , d ) ) is equivalent to lines _ colinear ( ( a , b ) , ( c , d ) ) |
numpy . isfinite ( line _ intersection _ 2D ( ( a , b ) , ( c , d ) ) [ 0 ] )''' | return lines_colinear ( ab , cd ) | np . isfinite ( line_intersection_2D ( ab , cd ) [ 0 ] ) |
def load ( self , rel_path = None ) :
"""Add sim _ src to layer .""" | for k , v in self . layer . iteritems ( ) :
self . add ( k , v [ 'module' ] , v . get ( 'package' ) )
filename = v . get ( 'filename' )
path = v . get ( 'path' )
if filename :
warnings . warn ( DeprecationWarning ( SIMFILE_LOAD_WARNING ) )
# default path for data is in . . / simulations
if not path :
path = rel_path
else :
path = os . path . join ( rel_path , path )
filename = os . path . join ( path , filename )
self . open ( k , filename ) |
def make_list ( self , end_token = "]" ) :
"""We are in a list so get values until the end token . This can also
used to get tuples .""" | out = [ ]
while True :
try :
value = self . value_assign ( end_token = end_token )
out . append ( value )
self . separator ( end_token = end_token )
except self . ParseEnd :
return out |
def main ( ) :
"""Main entry point""" | parser = OptionParser ( )
parser . add_option ( '-a' , '--hostname' , help = 'ClamAV source server hostname' , dest = 'hostname' , type = 'str' , default = 'db.de.clamav.net' )
parser . add_option ( '-r' , '--text-record' , help = 'ClamAV Updates TXT record' , dest = 'txtrecord' , type = 'str' , default = 'current.cvd.clamav.net' )
parser . add_option ( '-w' , '--work-directory' , help = 'Working directory' , dest = 'workdir' , type = 'str' , default = '/var/spool/clamav-mirror' )
parser . add_option ( '-d' , '--mirror-directory' , help = 'The mirror directory' , dest = 'mirrordir' , type = 'str' , default = '/srv/www/clamav' )
parser . add_option ( '-u' , '--user' , help = 'Change file owner to this user' , dest = 'user' , type = 'str' , default = 'nginx' )
parser . add_option ( '-g' , '--group' , help = 'Change file group to this group' , dest = 'group' , type = 'str' , default = 'nginx' )
parser . add_option ( '-l' , '--locks-directory' , help = 'Lock files directory' , dest = 'lockdir' , type = 'str' , default = '/var/lock/subsys' )
parser . add_option ( '-v' , '--verbose' , help = 'Display verbose output' , dest = 'verbose' , action = 'store_true' , default = False )
options , _ = parser . parse_args ( )
try :
lockfile = os . path . join ( options . lockdir , 'clamavmirror' )
with open ( lockfile , 'w+' ) as lock :
fcntl . lockf ( lock , fcntl . LOCK_EX | fcntl . LOCK_NB )
work ( options )
except IOError :
info ( "=> Another instance is already running" )
sys . exit ( 254 ) |
def makeInstance ( self , instanceDescriptor , doRules = False , glyphNames = None , pairs = None , bend = False ) :
"""Generate a font object for this instance""" | font = self . _instantiateFont ( None )
# make fonty things here
loc = Location ( instanceDescriptor . location )
anisotropic = False
locHorizontal = locVertical = loc
if self . isAnisotropic ( loc ) :
anisotropic = True
locHorizontal , locVertical = self . splitAnisotropic ( loc )
# groups
renameMap = getattr ( self . fonts [ self . default . name ] , "kerningGroupConversionRenameMaps" , None )
font . kerningGroupConversionRenameMaps = renameMap if renameMap is not None else { 'side1' : { } , 'side2' : { } }
# make the kerning
# this kerning is always horizontal . We can take the horizontal location
# filter the available pairs ?
if instanceDescriptor . kerning :
if pairs :
try :
kerningMutator = self . getKerningMutator ( pairs = pairs )
kerningObject = kerningMutator . makeInstance ( locHorizontal , bend = bend )
kerningObject . extractKerning ( font )
except :
self . problems . append ( "Could not make kerning for %s. %s" % ( loc , traceback . format_exc ( ) ) )
else :
kerningMutator = self . getKerningMutator ( )
kerningObject = kerningMutator . makeInstance ( locHorizontal , bend = bend )
kerningObject . extractKerning ( font )
# make the info
try :
infoMutator = self . getInfoMutator ( )
if not anisotropic :
infoInstanceObject = infoMutator . makeInstance ( loc , bend = bend )
else :
horizontalInfoInstanceObject = infoMutator . makeInstance ( locHorizontal , bend = bend )
verticalInfoInstanceObject = infoMutator . makeInstance ( locVertical , bend = bend )
# merge them again
infoInstanceObject = ( 1 , 0 ) * horizontalInfoInstanceObject + ( 0 , 1 ) * verticalInfoInstanceObject
if self . roundGeometry :
try :
infoInstanceObject = infoInstanceObject . round ( )
except AttributeError :
pass
infoInstanceObject . extractInfo ( font . info )
font . info . familyName = instanceDescriptor . familyName
font . info . styleName = instanceDescriptor . styleName
font . info . postscriptFontName = instanceDescriptor . postScriptFontName
# yikes , note the differences in capitalisation . .
font . info . styleMapFamilyName = instanceDescriptor . styleMapFamilyName
font . info . styleMapStyleName = instanceDescriptor . styleMapStyleName
# NEED SOME HELP WITH THIS
# localised names need to go to the right openTypeNameRecords
# records = [ ]
# nameID = 1
# platformID =
# for languageCode , name in instanceDescriptor . localisedStyleMapFamilyName . items ( ) :
# # Name ID 1 ( font family name ) is found at the generic styleMapFamily attribute .
# records . append ( ( nameID , ) )
except :
self . problems . append ( "Could not make fontinfo for %s. %s" % ( loc , traceback . format_exc ( ) ) )
for sourceDescriptor in self . sources :
if sourceDescriptor . copyInfo : # this is the source
if self . fonts [ sourceDescriptor . name ] is not None :
self . _copyFontInfo ( self . fonts [ sourceDescriptor . name ] . info , font . info )
if sourceDescriptor . copyLib : # excplicitly copy the font . lib items
if self . fonts [ sourceDescriptor . name ] is not None :
for key , value in self . fonts [ sourceDescriptor . name ] . lib . items ( ) :
font . lib [ key ] = value
if sourceDescriptor . copyGroups :
if self . fonts [ sourceDescriptor . name ] is not None :
sides = font . kerningGroupConversionRenameMaps . get ( 'side1' , { } )
sides . update ( font . kerningGroupConversionRenameMaps . get ( 'side2' , { } ) )
for key , value in self . fonts [ sourceDescriptor . name ] . groups . items ( ) :
if key not in sides :
font . groups [ key ] = value
if sourceDescriptor . copyFeatures :
if self . fonts [ sourceDescriptor . name ] is not None :
featuresText = self . fonts [ sourceDescriptor . name ] . features . text
font . features . text = featuresText
# glyphs
if glyphNames :
selectedGlyphNames = glyphNames
else :
selectedGlyphNames = self . glyphNames
# add the glyphnames to the font . lib [ ' public . glyphOrder ' ]
if not 'public.glyphOrder' in font . lib . keys ( ) :
font . lib [ 'public.glyphOrder' ] = selectedGlyphNames
for glyphName in selectedGlyphNames :
try :
glyphMutator = self . getGlyphMutator ( glyphName )
if glyphMutator is None :
continue
except :
self . problems . append ( "Could not make mutator for glyph %s %s" % ( glyphName , traceback . format_exc ( ) ) )
continue
if glyphName in instanceDescriptor . glyphs . keys ( ) : # XXX this should be able to go now that we have full rule support .
# reminder : this is what the glyphData can look like
# { ' instanceLocation ' : { ' custom ' : 0.0 , ' weight ' : 824.0 } ,
# ' masters ' : [ { ' font ' : ' master . Adobe VF Prototype . Master _ 0.0 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 0.0 , ' weight ' : 0.0 } } ,
# { ' font ' : ' master . Adobe VF Prototype . Master _ 1.1 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 0.0 , ' weight ' : 368.0 } } ,
# { ' font ' : ' master . Adobe VF Prototype . Master _ 2.2 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 0.0 , ' weight ' : 1000.0 } } ,
# { ' font ' : ' master . Adobe VF Prototype . Master _ 3.3 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 100.0 , ' weight ' : 1000.0 } } ,
# { ' font ' : ' master . Adobe VF Prototype . Master _ 0.4 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 100.0 , ' weight ' : 0.0 } } ,
# { ' font ' : ' master . Adobe VF Prototype . Master _ 4.5 ' ,
# ' glyphName ' : ' dollar . nostroke ' ,
# ' location ' : { ' custom ' : 100.0 , ' weight ' : 368.0 } } ] ,
# ' unicodes ' : [ 36 ] }
glyphData = instanceDescriptor . glyphs [ glyphName ]
else :
glyphData = { }
font . newGlyph ( glyphName )
font [ glyphName ] . clear ( )
if glyphData . get ( 'mute' , False ) : # mute this glyph , skip
continue
glyphInstanceLocation = glyphData . get ( "instanceLocation" , instanceDescriptor . location )
glyphInstanceLocation = Location ( glyphInstanceLocation )
uniValues = [ ]
neutral = glyphMutator . get ( ( ) )
if neutral is not None :
uniValues = neutral [ 0 ] . unicodes
else :
neutralFont = self . getNeutralFont ( )
if glyphName in neutralFont :
uniValues = neutralFont [ glyphName ] . unicodes
glyphInstanceUnicodes = glyphData . get ( "unicodes" , uniValues )
note = glyphData . get ( "note" )
if note :
font [ glyphName ] = note
# XXXX phase out support for instance - specific masters
# this should be handled by the rules system .
masters = glyphData . get ( "masters" , None )
if masters is not None :
items = [ ]
for glyphMaster in masters :
sourceGlyphFont = glyphMaster . get ( "font" )
sourceGlyphName = glyphMaster . get ( "glyphName" , glyphName )
m = self . fonts . get ( sourceGlyphFont )
if not sourceGlyphName in m :
continue
if hasattr ( m [ sourceGlyphName ] , "toMathGlyph" ) :
sourceGlyph = m [ sourceGlyphName ] . toMathGlyph ( )
else :
sourceGlyph = MathGlyph ( m [ sourceGlyphName ] )
sourceGlyphLocation = glyphMaster . get ( "location" )
items . append ( ( Location ( sourceGlyphLocation ) , sourceGlyph ) )
bias , glyphMutator = self . getVariationModel ( items , axes = self . serializedAxes , bias = self . newDefaultLocation ( ) )
try :
if not self . isAnisotropic ( glyphInstanceLocation ) :
glyphInstanceObject = glyphMutator . makeInstance ( glyphInstanceLocation , bend = bend )
else : # split anisotropic location into horizontal and vertical components
horizontal , vertical = self . splitAnisotropic ( glyphInstanceLocation )
horizontalGlyphInstanceObject = glyphMutator . makeInstance ( horizontal , bend = bend )
verticalGlyphInstanceObject = glyphMutator . makeInstance ( vertical , bend = bend )
# merge them again
glyphInstanceObject = ( 1 , 0 ) * horizontalGlyphInstanceObject + ( 0 , 1 ) * verticalGlyphInstanceObject
except IndexError : # alignment problem with the data ?
continue
font . newGlyph ( glyphName )
font [ glyphName ] . clear ( )
if self . roundGeometry :
try :
glyphInstanceObject = glyphInstanceObject . round ( )
except AttributeError :
pass
try : # File " / Users / erik / code / ufoProcessor / Lib / ufoProcessor / _ _ init _ _ . py " , line 649 , in makeInstance
# glyphInstanceObject . extractGlyph ( font [ glyphName ] , onlyGeometry = True )
# File " / Applications / RoboFont . app / Contents / Resources / lib / python3.6 / fontMath / mathGlyph . py " , line 315 , in extractGlyph
# glyph . anchors = [ dict ( anchor ) for anchor in self . anchors ]
# File " / Applications / RoboFont . app / Contents / Resources / lib / python3.6 / fontParts / base / base . py " , line 103 , in _ _ set _ _
# raise FontPartsError ( " no setter for % r " % self . name )
# fontParts . base . errors . FontPartsError : no setter for ' anchors '
if hasattr ( font [ glyphName ] , "fromMathGlyph" ) :
font [ glyphName ] . fromMathGlyph ( glyphInstanceObject )
else :
glyphInstanceObject . extractGlyph ( font [ glyphName ] , onlyGeometry = True )
except TypeError : # this causes ruled glyphs to end up in the wrong glyphname
# but defcon2 objects don ' t support it
pPen = font [ glyphName ] . getPointPen ( )
font [ glyphName ] . clear ( )
glyphInstanceObject . drawPoints ( pPen )
font [ glyphName ] . width = glyphInstanceObject . width
font [ glyphName ] . unicodes = glyphInstanceUnicodes
if doRules :
resultNames = processRules ( self . rules , loc , self . glyphNames )
for oldName , newName in zip ( self . glyphNames , resultNames ) :
if oldName != newName :
swapGlyphNames ( font , oldName , newName )
# copy the glyph lib ?
# for sourceDescriptor in self . sources :
# if sourceDescriptor . copyLib :
# pass
# pass
# store designspace location in the font . lib
font . lib [ 'designspace.location' ] = list ( instanceDescriptor . location . items ( ) )
return font |
def evaluate_errors ( json_response ) :
"""Evaluate rest errors .""" | if 'errors' not in json_response or not isinstance ( json_response [ 'errors' ] , list ) or not json_response [ 'errors' ] or not isinstance ( json_response [ 'errors' ] [ 0 ] , int ) :
raise PyVLXException ( 'Could not evaluate errors {0}' . format ( json . dumps ( json_response ) ) )
# unclear if response may contain more errors than one . Taking the first .
first_error = json_response [ 'errors' ] [ 0 ]
if first_error in [ 402 , 403 , 405 , 406 ] :
raise InvalidToken ( first_error )
raise PyVLXException ( 'Unknown error code {0}' . format ( first_error ) ) |
def run ( self , name , replace = None , actions = None ) :
"""Do an action .
If ` replace ` is provided as a dictionary , do a search / replace using
% { } templates on content of action ( unique to action type )""" | self . actions = actions
# incase we use group
action = actions . get ( name )
if not action :
self . die ( "Action not found: {}" , name )
action [ 'name' ] = name
action_type = action . get ( 'type' , "none" )
try :
func = getattr ( self , '_run__' + action_type )
except AttributeError :
self . die ( "Unsupported action type " + action_type )
try :
return func ( action , replace )
except Exception as err : # pylint : disable = broad - except
if self . _debug :
self . debug ( traceback . format_exc ( ) )
self . die ( "Error running action name={} type={} error={}" , name , action_type , err ) |
def binned_entropy ( x , max_bins ) :
"""First bins the values of x into max _ bins equidistant bins .
Then calculates the value of
. . math : :
- \\ sum _ { k = 0 } ^ { min ( max \\ _ bins , len ( x ) ) } p _ k log ( p _ k ) \\ cdot \\ mathbf { 1 } _ { ( p _ k > 0 ) }
where : math : ` p _ k ` is the percentage of samples in bin : math : ` k ` .
: param x : the time series to calculate the feature of
: type x : numpy . ndarray
: param max _ bins : the maximal number of bins
: type max _ bins : int
: return : the value of this feature
: return type : float""" | if not isinstance ( x , ( np . ndarray , pd . Series ) ) :
x = np . asarray ( x )
hist , bin_edges = np . histogram ( x , bins = max_bins )
probs = hist / x . size
return - np . sum ( p * np . math . log ( p ) for p in probs if p != 0 ) |
def add_options ( self ) :
"""Add program options .""" | super ( ThemeSwitcher , self ) . add_options ( )
self . add_bool_option ( "-l" , "--list" , help = "list available themes" )
self . add_bool_option ( "-c" , "--current" , help = "print path to currently selected theme" )
self . add_bool_option ( "-n" , "--next" , help = "rotate through selected themes, and print new path" )
self . add_bool_option ( "-a" , "--all" , help = "remove any selections, and use all themes" )
self . add_value_option ( "-t" , "--toggle" , "NAME" , help = "toggle selection of a theme" ) |
def set_block ( arr , arr_block ) :
"""Sets the diagonal blocks of an array to an given array
Parameters
arr : numpy ndarray
the original array
block _ arr : numpy ndarray
the block array for the new diagonal
Returns
numpy ndarray ( the modified array )""" | nr_col = arr . shape [ 1 ]
nr_row = arr . shape [ 0 ]
nr_col_block = arr_block . shape [ 1 ]
nr_row_block = arr_block . shape [ 0 ]
if np . mod ( nr_row , nr_row_block ) or np . mod ( nr_col , nr_col_block ) :
raise ValueError ( 'Number of rows/columns of the input array ' 'must be a multiple of block shape' )
if nr_row / nr_row_block != nr_col / nr_col_block :
raise ValueError ( 'Block array can not be filled as ' 'diagonal blocks in the given array' )
arr_out = arr . copy ( )
for row_ind in range ( int ( nr_row / nr_row_block ) ) :
row_start = row_ind * nr_row_block
row_end = nr_row_block + nr_row_block * row_ind
col_start = row_ind * nr_col_block
col_end = nr_col_block + nr_col_block * row_ind
arr_out [ row_start : row_end , col_start : col_end ] = arr_block
return arr_out |
def dP_demister_dry_Setekleiv_Svendsen ( S , voidage , vs , rho , mu , L = 1 ) :
r'''Calculates dry pressure drop across a demister , using the
correlation in [ 1 ] _ . This model is for dry demisters with no holdup only .
. . math : :
\ frac { \ Delta P \ epsilon ^ 2 } { \ rho _ f v ^ 2 } = 10.29 - \ frac { 565}
{69.6SL - ( SL ) ^ 2 - 779 } - \ frac { 74.9 } { 160.9 - 4.85SL } + 45.33 \ left (
\ frac { \ mu _ f \ epsilon S ^ 2 L } { \ rho _ f v } \ right ) ^ { 0.75}
Parameters
S : float
Specific area of the demister , normally ~ 250-1000 [ m ^ 2 / m ^ 3]
voidage : float
Voidage of bed of the demister material , normally ~ 0.98 [ ]
vs : float
Superficial velocity of fluid , Q / A [ m / s ]
rho : float
Density of fluid [ kg / m ^ 3]
mu : float
Viscosity of fluid [ Pa * s ]
L : float , optional
Length of the demister [ m ]
Returns
dP : float
Pressure drop across a dry demister [ Pa ]
Notes
Useful at startup and in modeling . Dry pressure drop is normally negligible
compared to wet pressure drop . Coefficients obtained by evolutionary
programming and may not fit data outside of the limits of the variables .
Examples
> > > dP _ demister _ dry _ Setekleiv _ Svendsen ( S = 250 , voidage = . 983 , vs = 1.2 , rho = 10 , mu = 3E - 5 , L = 1)
320.3280788941329
References
. . [ 1 ] Setekleiv , A . Eddie , and Hallvard F . Svendsen . " Dry Pressure Drop in
Spiral Wound Wire Mesh Pads at Low and Elevated Pressures . " Chemical
Engineering Research and Design 109 ( May 2016 ) : 141-149.
doi : 10.1016 / j . cherd . 2016.01.019.''' | term = 10.29 - 565. / ( 69.6 * S * L - ( S * L ) ** 2 - 779 ) - 74.9 / ( 160.9 - 4.85 * S * L )
right = term + 45.33 * ( mu * voidage * S ** 2 * L / rho / vs ) ** 0.75
return right * rho * vs ** 2 / voidage ** 2 |
def scan ( client , query = None , scroll = "5m" , raise_on_error = True , preserve_order = False , size = 1000 , request_timeout = None , clear_scroll = True , scroll_kwargs = None , ** kwargs ) :
"""Simple abstraction on top of the
: meth : ` ~ elasticsearch . Elasticsearch . scroll ` api - a simple iterator that
yields all hits as returned by underlining scroll requests .
By default scan does not return results in any pre - determined order . To
have a standard order in the returned documents ( either by score or
explicit sort definition ) when scrolling , use ` ` preserve _ order = True ` ` . This
may be an expensive operation and will negate the performance benefits of
using ` ` scan ` ` .
: arg client : instance of : class : ` ~ elasticsearch . Elasticsearch ` to use
: arg query : body for the : meth : ` ~ elasticsearch . Elasticsearch . search ` api
: arg scroll : Specify how long a consistent view of the index should be
maintained for scrolled search
: arg raise _ on _ error : raises an exception ( ` ` ScanError ` ` ) if an error is
encountered ( some shards fail to execute ) . By default we raise .
: arg preserve _ order : don ' t set the ` ` search _ type ` ` to ` ` scan ` ` - this will
cause the scroll to paginate with preserving the order . Note that this
can be an extremely expensive operation and can easily lead to
unpredictable results , use with caution .
: arg size : size ( per shard ) of the batch send at each iteration .
: arg request _ timeout : explicit timeout for each call to ` ` scan ` `
: arg clear _ scroll : explicitly calls delete on the scroll id via the clear
scroll API at the end of the method on completion or error , defaults
to true .
: arg scroll _ kwargs : additional kwargs to be passed to
: meth : ` ~ elasticsearch . Elasticsearch . scroll `
Any additional keyword arguments will be passed to the initial
: meth : ` ~ elasticsearch . Elasticsearch . search ` call : :
scan ( es ,
query = { " query " : { " match " : { " title " : " python " } } } ,
index = " orders - * " ,
doc _ type = " books " """ | scroll_kwargs = scroll_kwargs or { }
if not preserve_order :
query = query . copy ( ) if query else { }
query [ "sort" ] = "_doc"
# initial search
resp = client . search ( body = query , scroll = scroll , size = size , request_timeout = request_timeout , ** kwargs )
scroll_id = resp . get ( "_scroll_id" )
try :
while scroll_id and resp [ 'hits' ] [ 'hits' ] :
for hit in resp [ "hits" ] [ "hits" ] :
yield hit
# check if we have any errors
if resp [ "_shards" ] [ "successful" ] < resp [ "_shards" ] [ "total" ] :
logger . warning ( "Scroll request has only succeeded on %d shards out of %d." , resp [ "_shards" ] [ "successful" ] , resp [ "_shards" ] [ "total" ] , )
if raise_on_error :
raise ScanError ( scroll_id , "Scroll request has only succeeded on %d shards out of %d." % ( resp [ "_shards" ] [ "successful" ] , resp [ "_shards" ] [ "total" ] ) , )
resp = client . scroll ( scroll_id , scroll = scroll , request_timeout = request_timeout , ** scroll_kwargs )
scroll_id = resp . get ( "_scroll_id" )
finally :
if scroll_id and clear_scroll :
client . clear_scroll ( body = { "scroll_id" : [ scroll_id ] } , ignore = ( 404 , ) ) |
def _user_settings ( self ) :
"""Resolve settings dict from django settings module .
Validate that all the required keys are present and also that none of
the removed keys do .
Result is cached .""" | user_settings = getattr ( settings , self . _name , { } )
if not user_settings and self . _required :
raise ImproperlyConfigured ( "Settings file is missing dict options with name {}" . format ( self . _name ) )
keys = frozenset ( user_settings . keys ( ) )
required = self . _required - keys
if required :
raise ImproperlyConfigured ( "Following options for {} are missing from settings file: {}" . format ( self . _name , ', ' . join ( sorted ( required ) ) ) )
removed = keys & self . _removed
if removed :
raise ImproperlyConfigured ( "Following options for {} have been removed: {}" . format ( self . _name , ', ' . join ( sorted ( removed ) ) ) )
return user_settings |
def bytes2guid ( s ) :
"""Converts a serialized GUID to a text GUID""" | assert isinstance ( s , bytes )
u = struct . unpack
v = [ ]
v . extend ( u ( "<IHH" , s [ : 8 ] ) )
v . extend ( u ( ">HQ" , s [ 8 : 10 ] + b"\x00\x00" + s [ 10 : ] ) )
return "%08X-%04X-%04X-%04X-%012X" % tuple ( v ) |
def trim_trailing_silence ( obj ) :
"""Return a copy of the object with trimmed trailing silence of the
piano - roll ( s ) .""" | _check_supported ( obj )
copied = deepcopy ( obj )
length = copied . get_active_length ( )
copied . pianoroll = copied . pianoroll [ : length ]
return copied |
def insert_graph ( self , graph : BELGraph , store_parts : bool = True , use_tqdm : bool = False ) -> Network :
"""Insert a graph in the database and returns the corresponding Network model .
: raises : pybel . resources . exc . ResourceError""" | if not graph . name :
raise ValueError ( 'Can not upload a graph without a name' )
if not graph . version :
raise ValueError ( 'Can not upload a graph without a version' )
log . debug ( 'inserting %s v%s' , graph . name , graph . version )
t = time . time ( )
self . ensure_default_namespace ( )
namespace_urls = graph . namespace_url . values ( )
if use_tqdm :
namespace_urls = tqdm ( namespace_urls , desc = 'namespaces' )
for namespace_url in namespace_urls :
if namespace_url not in graph . uncached_namespaces :
self . get_or_create_namespace ( namespace_url )
for keyword , pattern in graph . namespace_pattern . items ( ) :
self . ensure_regex_namespace ( keyword , pattern )
annotation_urls = graph . annotation_url . values ( )
if use_tqdm :
annotation_urls = tqdm ( annotation_urls , desc = 'annotations' )
for annotation_url in annotation_urls :
self . get_or_create_annotation ( annotation_url )
network = Network ( ** { key : value for key , value in graph . document . items ( ) if key in METADATA_INSERT_KEYS } )
network . store_bel ( graph )
if store_parts :
network . nodes , network . edges = self . _store_graph_parts ( graph , use_tqdm = use_tqdm )
self . session . add ( network )
self . session . commit ( )
log . info ( 'inserted %s v%s in %.2f seconds' , graph . name , graph . version , time . time ( ) - t )
return network |
def make ( cls , ** kwargs ) :
"""Create a container .
Reports extra keys as well as missing ones .
Thanks to habnabit for the idea !""" | cls_attrs = { f . name : f for f in attr . fields ( cls ) }
unknown = { k : v for k , v in kwargs . items ( ) if k not in cls_attrs }
if len ( unknown ) > 0 :
_LOGGER . warning ( "Got unknowns for %s: %s - please create an issue!" , cls . __name__ , unknown )
missing = [ k for k in cls_attrs if k not in kwargs ]
data = { k : v for k , v in kwargs . items ( ) if k in cls_attrs }
# initialize missing values to avoid passing default = None
# for the attrs attribute definitions
for m in missing :
default = cls_attrs [ m ] . default
if isinstance ( default , attr . Factory ) :
if not default . takes_self :
data [ m ] = default . factory ( )
else :
raise NotImplementedError
else :
_LOGGER . debug ( "Missing key %s with no default for %s" , m , cls . __name__ )
data [ m ] = None
# initialize and store raw data for debug purposes
inst = cls ( ** data )
setattr ( inst , "raw" , kwargs )
return inst |
def is_c_extension ( module : ModuleType ) -> bool :
"""Modified from
https : / / stackoverflow . com / questions / 20339053 / in - python - how - can - one - tell - if - a - module - comes - from - a - c - extension .
` ` True ` ` only if the passed module is a C extension implemented as a
dynamically linked shared library specific to the current platform .
Args :
module : Previously imported module object to be tested .
Returns :
bool : ` ` True ` ` only if this module is a C extension .
Examples :
. . code - block : : python
from cardinal _ pythonlib . modules import is _ c _ extension
import os
import _ elementtree as et
import numpy
import numpy . core . multiarray as numpy _ multiarray
is _ c _ extension ( os ) # False
is _ c _ extension ( numpy ) # False
is _ c _ extension ( et ) # False on my system ( Python 3.5.6 ) . True in the original example .
is _ c _ extension ( numpy _ multiarray ) # True""" | # noqa
assert inspect . ismodule ( module ) , '"{}" not a module.' . format ( module )
# If this module was loaded by a PEP 302 - compliant CPython - specific loader
# loading only C extensions , this module is a C extension .
if isinstance ( getattr ( module , '__loader__' , None ) , ExtensionFileLoader ) :
return True
# If it ' s built - in , it ' s not a C extension .
if is_builtin_module ( module ) :
return False
# Else , fallback to filetype matching heuristics .
# Absolute path of the file defining this module .
module_filename = inspect . getfile ( module )
# " . " - prefixed filetype of this path if any or the empty string otherwise .
module_filetype = os . path . splitext ( module_filename ) [ 1 ]
# This module is only a C extension if this path ' s filetype is that of a
# C extension specific to the current platform .
return module_filetype in EXTENSION_SUFFIXES |
def complexity_fd_higushi ( signal , k_max ) :
"""Computes Higuchi Fractal Dimension of a signal . Based on the ` pyrem < https : / / github . com / gilestrolab / pyrem > ` _ repo by Quentin Geissmann .
Parameters
signal : list or array
List or array of values .
k _ max : int
The maximal value of k . The point at which the FD plateaus is considered a saturation point and that kmax value should be selected ( Gómez , 2009 ) . Some studies use a value of 8 or 16 for ECG signal and other 48 for MEG .
Returns
fd _ higushi : float
The Higushi Fractal Dimension as float value .
Example
> > > import neurokit as nk
> > > signal = np . sin ( np . log ( np . random . sample ( 666 ) ) )
> > > fd _ higushi = nk . complexity _ fd _ higushi ( signal , 8)
Notes
* Details *
- * * Higushi Fractal Dimension * * : Higuchi proposed in 1988 an efficient algorithm for measuring the FD of discrete time sequences . As the reconstruction of the attractor phase space is not necessary , this algorithm is simpler and faster than D2 and other classical measures derived from chaos theory . FD can be used to quantify the complexity and self - similarity of a signal . HFD has already been used to analyse the complexity of brain recordings and other biological signals .
* Authors *
- Quentin Geissmann ( https : / / github . com / qgeissmann )
* Dependencies *
- numpy
* See Also *
- pyrem package : https : / / github . com / gilestrolab / pyrem
References
- Accardo , A . , Affinito , M . , Carrozzi , M . , & Bouquet , F . ( 1997 ) . Use of the fractal dimension for the analysis of electroencephalographic time series . Biological cybernetics , 77(5 ) , 339-350.
- Gómez , C . , Mediavilla , Á . , Hornero , R . , Abásolo , D . , & Fernández , A . ( 2009 ) . Use of the Higuchi ' s fractal dimension for the analysis of MEG recordings from Alzheimer ' s disease patients . Medical engineering & physics , 31(3 ) , 306-313.""" | signal = np . array ( signal )
L = [ ]
x = [ ]
N = signal . size
km_idxs = np . triu_indices ( k_max - 1 )
km_idxs = k_max - np . flipud ( np . column_stack ( km_idxs ) ) - 1
km_idxs [ : , 1 ] -= 1
for k in range ( 1 , k_max ) :
Lk = 0
for m in range ( 0 , k ) : # we pregenerate all idxs
idxs = np . arange ( 1 , int ( np . floor ( ( N - m ) / k ) ) )
Lmk = np . sum ( np . abs ( signal [ m + idxs * k ] - signal [ m + k * ( idxs - 1 ) ] ) )
Lmk = ( Lmk * ( N - 1 ) / ( ( ( N - m ) / k ) * k ) ) / k
Lk += Lmk
if Lk != 0 :
L . append ( np . log ( Lk / ( m + 1 ) ) )
x . append ( [ np . log ( 1.0 / k ) , 1 ] )
( p , r1 , r2 , s ) = np . linalg . lstsq ( x , L )
fd_higushi = p [ 0 ]
return ( fd_higushi ) |
def source_to_unicode ( txt , errors = 'replace' , skip_encoding_cookie = True ) :
"""Converts a bytes string with python source code to unicode .
Unicode strings are passed through unchanged . Byte strings are checked
for the python source file encoding cookie to determine encoding .
txt can be either a bytes buffer or a string containing the source
code .""" | if isinstance ( txt , six . text_type ) :
return txt
if isinstance ( txt , six . binary_type ) :
buffer = io . BytesIO ( txt )
else :
buffer = txt
try :
encoding , _ = detect_encoding ( buffer . readline )
except SyntaxError :
encoding = "ascii"
buffer . seek ( 0 )
newline_decoder = io . IncrementalNewlineDecoder ( None , True )
text = io . TextIOWrapper ( buffer , encoding , errors = errors , line_buffering = True )
text . mode = 'r'
if skip_encoding_cookie :
return u"" . join ( strip_encoding_cookie ( text ) )
else :
return text . read ( ) |
def _assign_value_by_type ( self , pbuf_obj , value , _bool = True , _float = True , _integer = True , _string = True , error_prefix = '' ) :
"""Assigns the supplied value to the appropriate protobuf value type""" | # bool inherits int , so bool instance check must be executed prior to
# checking for integer types
if isinstance ( value , bool ) and _bool is True :
pbuf_obj . value . boolValue = value
elif isinstance ( value , six . integer_types ) and not isinstance ( value , bool ) and _integer is True :
if value < INTEGER_MIN or value > INTEGER_MAX :
raise ValueError ( ( '{}: {} exceeds signed 64 bit integer range ' 'as defined by ProtocolBuffers ({} to {})' ) . format ( error_prefix , str ( value ) , str ( INTEGER_MIN ) , str ( INTEGER_MAX ) ) )
pbuf_obj . value . intValue = value
elif isinstance ( value , float ) and _float is True :
pbuf_obj . value . doubleValue = value
elif isinstance ( value , six . string_types ) and _string is True :
pbuf_obj . value . strValue = value
else :
raise ValueError ( '{}: {} is of invalid type {}' . format ( error_prefix , str ( value ) , str ( type ( value ) ) ) ) |
def terminate ( self , wait = False ) :
"""Terminate the process .""" | if self . proc is not None :
self . proc . stdout . close ( )
try :
self . proc . terminate ( )
except ProcessLookupError :
pass
if wait :
self . proc . wait ( ) |
def list_subnets ( self , retrieve_all = True , ** _params ) :
"""Fetches a list of all subnets for a project .""" | return self . list ( 'subnets' , self . subnets_path , retrieve_all , ** _params ) |
def send ( self , auto_complete = True , callback = None ) :
"""Begin uploading file ( s ) and sending email ( s ) .
If ` auto _ complete ` is set to ` ` False ` ` you will have to call the
: func : ` Transfer . complete ` function at a later stage .
: param auto _ complete : Whether or not to mark transfer as complete
and send emails to recipient ( s )
: param callback : Callback function which will receive total file size
and bytes read as arguments
: type auto _ complete : ` ` bool ` `
: type callback : ` ` func ` `""" | tot = len ( self . files )
url = self . transfer_info [ 'transferurl' ]
for index , fmfile in enumerate ( self . files ) :
msg = 'Uploading: "{filename}" ({cur}/{tot})'
logger . debug ( msg . format ( filename = fmfile [ 'thefilename' ] , cur = index + 1 , tot = tot ) )
with open ( fmfile [ 'filepath' ] , 'rb' ) as file_obj :
fields = { fmfile [ 'thefilename' ] : ( 'filename' , file_obj , fmfile [ 'content-type' ] ) }
def pg_callback ( monitor ) :
if pm . COMMANDLINE :
bar . show ( monitor . bytes_read )
elif callback is not None :
callback ( fmfile [ 'totalsize' ] , monitor . bytes_read )
m_encoder = encoder . MultipartEncoder ( fields = fields )
monitor = encoder . MultipartEncoderMonitor ( m_encoder , pg_callback )
label = fmfile [ 'thefilename' ] + ': '
if pm . COMMANDLINE :
bar = ProgressBar ( label = label , expected_size = fmfile [ 'totalsize' ] )
headers = { 'Content-Type' : m_encoder . content_type }
res = self . session . post ( url , params = fmfile , data = monitor , headers = headers )
if res . status_code != 200 :
hellraiser ( res )
# logger . info ( ' \ r ' )
if auto_complete :
return self . complete ( )
return res |
def _check_refer_redirect ( self , environ ) :
"""Returns a WbResponse for a HTTP 307 redirection if the HTTP referer header is the same as the HTTP host header
: param dict environ : The WSGI environment dictionary for the request
: return : WbResponse HTTP 307 redirection
: rtype : WbResponse""" | referer = environ . get ( 'HTTP_REFERER' )
if not referer :
return
host = environ . get ( 'HTTP_HOST' )
if host not in referer :
return
inx = referer [ 1 : ] . find ( 'http' )
if not inx :
inx = referer [ 1 : ] . find ( '///' )
if inx > 0 :
inx + 1
if inx < 0 :
return
url = referer [ inx + 1 : ]
host = referer [ : inx + 1 ]
orig_url = environ [ 'PATH_INFO' ]
if environ . get ( 'QUERY_STRING' ) :
orig_url += '?' + environ [ 'QUERY_STRING' ]
full_url = host + urljoin ( url , orig_url )
return WbResponse . redir_response ( full_url , '307 Redirect' ) |
def removeID ( self , attr ) :
"""Remove the given attribute from the ID table maintained
internally .""" | if attr is None :
attr__o = None
else :
attr__o = attr . _o
ret = libxml2mod . xmlRemoveID ( self . _o , attr__o )
return ret |
def __StripName ( self , name ) :
"""Strip strip _ prefix entries from name .""" | if not name :
return name
for prefix in self . __strip_prefixes :
if name . startswith ( prefix ) :
return name [ len ( prefix ) : ]
return name |
def _find_base_type ( data_type ) :
"""Find the Nani ' s base type for a given data type .
This is useful when Nani ' s data types were subclassed and the original type
is required .""" | bases = type ( data_type ) . __mro__
for base in bases :
if base in _ALL :
return base
return None |
def filter_factory ( global_conf , ** local_conf ) :
"""Returns a WSGI filter app for use with paste . deploy .""" | conf = global_conf . copy ( )
conf . update ( local_conf )
def blacklist ( app ) :
return BlacklistFilter ( app , conf )
return blacklist |
def _read_join_syn ( self , bits , size , kind ) :
"""Read Join Connection option for Initial SYN .
Positional arguments :
* bits - str , 4 - bit data
* size - int , length of option
* kind - int , 30 ( Multipath TCP )
Returns :
* dict - - extracted Join Connection ( MP _ JOIN - SYN ) option for Initial SYN
Structure of MP _ JOIN - SYN [ RFC 6824 ] :
1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| Kind | Length = 12 | Subtype | | B | Address ID |
| Receiver ' s Token ( 32 bits ) |
| Sender ' s Random Number ( 32 bits ) |
Octets Bits Name Description
0 0 tcp . mp . kind Kind ( 30)
1 8 tcp . mp . length Length ( 12)
2 16 tcp . mp . subtype Subtype ( 1 | SYN )
2 20 - Reserved ( must be zero )
2 23 tcp . mp . join . syn . backup Backup Path ( B )
3 24 tcp . mp . join . syn . addrid Address ID
4 32 tcp . mp . join . syn . token Receiver ' s Token
8 64 tcp . mp . join . syn . randnum Sender ' s Random Number""" | adid = self . _read_unpack ( 1 )
rtkn = self . _read_fileng ( 4 )
srno = self . _read_unpack ( 4 )
data = dict ( kind = kind , length = size + 1 , subtype = 'MP_JOIN-SYN' , join = dict ( syn = dict ( backup = True if int ( bits [ 3 ] ) else False , addrid = adid , token = rtkn , randnum = srno , ) , ) , )
return data |
def setup_prj_page ( self , ) :
"""Create and set the model on the project page
: returns : None
: rtype : None
: raises : None""" | self . prj_seq_tablev . horizontalHeader ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents )
self . prj_atype_tablev . horizontalHeader ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents )
self . prj_dep_tablev . horizontalHeader ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents )
self . prj_user_tablev . horizontalHeader ( ) . setResizeMode ( QtGui . QHeaderView . ResizeToContents ) |
def get_user_info ( tokens , uk ) :
'''获取用户的部分信息 .
比如头像 , 用户名 , 自我介绍 , 粉丝数等 .
这个接口可用于查询任何用户的信息 , 只要知道他 / 她的uk .''' | url = '' . join ( [ const . PAN_URL , 'pcloud/user/getinfo?channel=chunlei&clienttype=0&web=1' , '&bdstoken=' , tokens [ 'bdstoken' ] , '&query_uk=' , uk , '&t=' , util . timestamp ( ) , ] )
req = net . urlopen ( url )
if req :
info = json . loads ( req . data . decode ( ) )
if info and info [ 'errno' ] == 0 :
return info [ 'user_info' ]
return None |
def fixcode ( ** kwargs ) :
"""auto pep8 format all python file in ` ` source code ` ` and ` ` tests ` ` dir .""" | # repository direcotry
repo_dir = Path ( __file__ ) . parent . absolute ( )
# source code directory
source_dir = Path ( repo_dir , package . __name__ )
if source_dir . exists ( ) :
print ( "Source code locate at: '%s'." % source_dir )
print ( "Auto pep8 all python file ..." )
source_dir . autopep8 ( ** kwargs )
else :
print ( "Source code directory not found!" )
# unittest code directory
unittest_dir = Path ( repo_dir , "tests" )
if unittest_dir . exists ( ) :
print ( "Unittest code locate at: '%s'." % unittest_dir )
print ( "Auto pep8 all python file ..." )
unittest_dir . autopep8 ( ** kwargs )
else :
print ( "Unittest code directory not found!" )
print ( "Complete!" ) |
def listChoices ( self , category , libtype = None , ** kwargs ) :
"""Returns a list of : class : ` ~ plexapi . library . FilterChoice ` objects for the
specified category and libtype . kwargs can be any of the same kwargs in
: func : ` plexapi . library . LibraySection . search ( ) ` to help narrow down the choices
to only those that matter in your current context .
Parameters :
category ( str ) : Category to list choices for ( genre , contentRating , etc ) .
libtype ( int ) : Library type of item filter .
* * kwargs ( dict ) : Additional kwargs to narrow down the choices .
Raises :
: class : ` plexapi . exceptions . BadRequest ` : Cannot include kwarg equal to specified category .""" | # TODO : Should this be moved to base ?
if category in kwargs :
raise BadRequest ( 'Cannot include kwarg equal to specified category: %s' % category )
args = { }
for subcategory , value in kwargs . items ( ) :
args [ category ] = self . _cleanSearchFilter ( subcategory , value )
if libtype is not None :
args [ 'type' ] = utils . searchType ( libtype )
key = '/library/sections/%s/%s%s' % ( self . key , category , utils . joinArgs ( args ) )
return self . fetchItems ( key , cls = FilterChoice ) |
def find_lexer_for_filename ( filename ) :
"""Get a Pygments Lexer given a filename .""" | filename = filename or ''
root , ext = os . path . splitext ( filename )
if ext in custom_extension_lexer_mapping :
lexer = get_lexer_by_name ( custom_extension_lexer_mapping [ ext ] )
else :
try :
lexer = get_lexer_for_filename ( filename )
except Exception :
return TextLexer ( )
return lexer |
def load_level ( self ) :
"""| coro |
Load the players XP and level""" | data = yield from self . auth . get ( "https://public-ubiservices.ubi.com/v1/spaces/%s/sandboxes/%s/r6playerprofile/playerprofile/progressions?profile_ids=%s" % ( self . spaceid , self . platform_url , self . id ) )
if "player_profiles" in data and len ( data [ "player_profiles" ] ) > 0 :
self . xp = data [ "player_profiles" ] [ 0 ] . get ( "xp" , 0 )
self . level = data [ "player_profiles" ] [ 0 ] . get ( "level" , 0 )
else :
raise InvalidRequest ( "Missing key player_profiles in returned JSON object %s" % str ( data ) ) |
def merge_split_adjustments_with_overwrites ( self , pre , post , overwrites , requested_split_adjusted_columns ) :
"""Merge split adjustments with the dict containing overwrites .
Parameters
pre : dict [ str - > dict [ int - > list ] ]
The adjustments that occur before the split - adjusted - asof - date .
post : dict [ str - > dict [ int - > list ] ]
The adjustments that occur after the split - adjusted - asof - date .
overwrites : dict [ str - > dict [ int - > list ] ]
The overwrites across all time . Adjustments will be merged into
this dictionary .
requested _ split _ adjusted _ columns : list of str
List of names of split adjusted columns that are being requested .""" | for column_name in requested_split_adjusted_columns : # We can do a merge here because the timestamps in ' pre ' and
# ' post ' are guaranteed to not overlap .
if pre : # Either empty or contains all columns .
for ts in pre [ column_name ] :
add_new_adjustments ( overwrites , pre [ column_name ] [ ts ] , column_name , ts )
if post : # Either empty or contains all columns .
for ts in post [ column_name ] :
add_new_adjustments ( overwrites , post [ column_name ] [ ts ] , column_name , ts ) |
def linear ( m = 1 , b = 0 ) :
'''Return a driver function that can advance a sequence of linear values .
. . code - block : : none
value = m * i + b
Args :
m ( float ) : a slope for the linear driver
x ( float ) : an offset for the linear driver''' | def f ( i ) :
return m * i + b
return partial ( force , sequence = _advance ( f ) ) |
def _objectify ( items , container_name ) :
"""Splits a listing of objects into their appropriate wrapper classes .""" | objects = [ ]
# Deal with objects and object pseudo - folders first , save subdirs for later
for item in items :
if item . get ( "subdir" , None ) is not None :
object_cls = PseudoFolder
else :
object_cls = StorageObject
objects . append ( object_cls ( item , container_name ) )
return objects |
def format_configurablefield_nodes ( field_name , field , field_id , state , lineno ) :
"""Create a section node that documents a ConfigurableField config field .
Parameters
field _ name : ` str `
Name of the configuration field ( the attribute name of on the config
class ) .
field : ` ` lsst . pex . config . ConfigurableField ` `
A configuration field .
field _ id : ` str `
Unique identifier for this field . This is used as the id and name of
the section node . with a - section suffix
state : ` ` docutils . statemachine . State ` `
Usually the directive ' s ` ` state ` ` attribute .
lineno ( ` int ` )
Usually the directive ' s ` ` lineno ` ` attribute .
Returns
` ` docutils . nodes . section ` `
Section containing documentation nodes for the ConfigurableField .""" | # Custom default target definition list that links to Task topics
default_item = nodes . definition_list_item ( )
default_item . append ( nodes . term ( text = "Default" ) )
default_item_content = nodes . definition ( )
para = nodes . paragraph ( )
name = '.' . join ( ( field . target . __module__ , field . target . __name__ ) )
para += pending_task_xref ( rawsource = name )
default_item_content += para
default_item += default_item_content
# Definition list for key - value metadata
dl = nodes . definition_list ( )
dl += default_item
dl += create_field_type_item_node ( field , state )
# Doc for this ConfigurableField , parsed as rst
desc_node = create_description_node ( field , state )
# Title for configuration field
title = create_title_node ( field_name , field , field_id , state , lineno )
return [ title , dl , desc_node ] |
def info ( device ) :
'''Get filesystem geometry information .
CLI Example :
. . code - block : : bash
salt ' * ' xfs . info / dev / sda1''' | out = __salt__ [ 'cmd.run_all' ] ( "xfs_info {0}" . format ( device ) )
if out . get ( 'stderr' ) :
raise CommandExecutionError ( out [ 'stderr' ] . replace ( "xfs_info:" , "" ) . strip ( ) )
return _parse_xfs_info ( out [ 'stdout' ] ) |
def exec_command ( cmd , in_data = '' , chdir = None , shell = None , emulate_tty = False ) :
"""Run a command in a subprocess , emulating the argument handling behaviour of
SSH .
: param bytes cmd :
String command line , passed to user ' s shell .
: param bytes in _ data :
Optional standard input for the command .
: return :
( return code , stdout bytes , stderr bytes )""" | assert isinstance ( cmd , mitogen . core . UnicodeType )
return exec_args ( args = [ get_user_shell ( ) , '-c' , cmd ] , in_data = in_data , chdir = chdir , shell = shell , emulate_tty = emulate_tty , ) |
def next_bday ( self ) :
"""Used for moving to next business day .""" | if self . n >= 0 :
nb_offset = 1
else :
nb_offset = - 1
if self . _prefix . startswith ( 'C' ) : # CustomBusinessHour
return CustomBusinessDay ( n = nb_offset , weekmask = self . weekmask , holidays = self . holidays , calendar = self . calendar )
else :
return BusinessDay ( n = nb_offset ) |
def play_note ( note ) :
"""play _ note determines the coordinates of a note on the keyboard image
and sends a request to play the note to the fluidsynth server""" | global text
octave_offset = ( note . octave - LOWEST ) * width
if note . name in WHITE_KEYS : # Getting the x coordinate of a white key can be done automatically
w = WHITE_KEYS . index ( note . name ) * white_key_width
w = w + octave_offset
# Add a list containing the x coordinate , the tick at the current time
# and of course the note itself to playing _ w
playing_w . append ( [ w , tick , note ] )
else : # For black keys I hard coded the x coordinates . It ' s ugly .
i = BLACK_KEYS . index ( note . name )
if i == 0 :
w = 18
elif i == 1 :
w = 58
elif i == 2 :
w = 115
elif i == 3 :
w = 151
else :
w = 187
w = w + octave_offset
playing_b . append ( [ w , tick , note ] )
# To find out what sort of chord is being played we have to look at both the
# white and black keys , obviously :
notes = playing_w + playing_b
notes . sort ( )
notenames = [ ]
for n in notes :
notenames . append ( n [ 2 ] . name )
# Determine the chord
det = chords . determine ( notenames )
if det != [ ] :
det = det [ 0 ]
else :
det = ''
# And render it onto the text surface
t = font . render ( det , 2 , ( 0 , 0 , 0 ) )
text . fill ( ( 255 , 255 , 255 ) )
text . blit ( t , ( 0 , 0 ) )
# Play the note
fluidsynth . play_Note ( note , channel , 100 ) |
def down_the_wabbit_hole ( session , class_name ) :
"""Authenticate on class . coursera . org""" | auth_redirector_url = AUTH_REDIRECT_URL . format ( class_name = class_name )
r = session . get ( auth_redirector_url )
logging . debug ( 'Following %s to authenticate on class.coursera.org.' , auth_redirector_url )
try :
r . raise_for_status ( )
except requests . exceptions . HTTPError as e :
raise AuthenticationFailed ( 'Cannot login on class.coursera.org: %s' % e )
logging . debug ( 'Exiting "deep" authentication.' ) |
def unobserve_property ( self , name , handler ) :
"""Unregister a property observer . This requires both the observed property ' s name and the handler function that
was originally registered as one handler could be registered for several properties . To unregister a handler
from * all * observed properties see ` ` unobserve _ all _ properties ` ` .""" | self . _property_handlers [ name ] . remove ( handler )
if not self . _property_handlers [ name ] :
_mpv_unobserve_property ( self . _event_handle , hash ( name ) & 0xffffffffffffffff ) |
def check_for_flexible_downtime ( self , timeperiods , hosts , services ) :
"""Enter in a downtime if necessary and raise start notification
When a non Ok state occurs we try to raise a flexible downtime .
: param timeperiods : Timeperiods objects , used for downtime period
: type timeperiods : alignak . objects . timeperiod . Timeperiods
: param hosts : hosts objects , used to enter downtime
: type hosts : alignak . objects . host . Hosts
: param services : services objects , used to enter downtime
: type services : alignak . objects . service . Services
: return : None""" | status_updated = False
for downtime_id in self . downtimes :
downtime = self . downtimes [ downtime_id ]
# Activate flexible downtimes ( do not activate triggered downtimes )
# Note : only activate if we are between downtime start and end time !
if downtime . fixed or downtime . is_in_effect :
continue
if downtime . start_time <= self . last_chk and downtime . end_time >= self . last_chk and self . state_id != 0 and downtime . trigger_id in [ '' , '0' ] : # returns downtimestart notifications
self . broks . extend ( downtime . enter ( timeperiods , hosts , services ) )
status_updated = True
if status_updated is True :
self . broks . append ( self . get_update_status_brok ( ) ) |
def extra ( method , profile , ** libcloud_kwargs ) :
'''Call an extended method on the driver
: param method : Driver ' s method name
: type method : ` ` str ` `
: param profile : The profile key
: type profile : ` ` str ` `
: param libcloud _ kwargs : Extra arguments for the driver ' s method
: type libcloud _ kwargs : ` ` dict ` `
CLI Example :
. . code - block : : bash
salt myminion libcloud _ loadbalancer . extra ex _ get _ permissions google container _ name = my _ container object _ name = me . jpg - - out = yaml''' | libcloud_kwargs = salt . utils . args . clean_kwargs ( ** libcloud_kwargs )
conn = _get_driver ( profile = profile )
connection_method = getattr ( conn , method )
return connection_method ( ** libcloud_kwargs ) |
def assert_inequivalent ( o1 , o2 ) :
'''Asserts that o1 and o2 are distinct and inequivalent objects''' | if not ( isinstance ( o1 , type ) and isinstance ( o2 , type ) ) :
assert o1 is not o2
assert not o1 == o2 and o1 != o2
assert not o2 == o1 and o2 != o1 |
def get_jids ( ) :
'''Return a dict mapping all job ids to job information''' | ret = { }
for jid , job , _ , _ in _walk_through ( _job_dir ( ) ) :
ret [ jid ] = salt . utils . jid . format_jid_instance ( jid , job )
if __opts__ . get ( 'job_cache_store_endtime' ) :
endtime = get_endtime ( jid )
if endtime :
ret [ jid ] [ 'EndTime' ] = endtime
return ret |
def update_nanopubstore_start_dt ( url : str , start_dt : str ) :
"""Add nanopubstore start _ dt to belapi . state _ mgmt collection
Args :
url : url of nanopubstore
start _ dt : datetime of last query against nanopubstore for new ID ' s""" | hostname = urllib . parse . urlsplit ( url ) [ 1 ]
start_dates_doc = state_mgmt . get ( start_dates_doc_key )
if not start_dates_doc :
start_dates_doc = { "_key" : start_dates_doc_key , "start_dates" : [ { "nanopubstore" : hostname , "start_dt" : start_dt } ] , }
state_mgmt . insert ( start_dates_doc )
else :
for idx , start_date in enumerate ( start_dates_doc [ "start_dates" ] ) :
if start_date [ "nanopubstore" ] == hostname :
start_dates_doc [ "start_dates" ] [ idx ] [ "start_dt" ] = start_dt
break
else :
start_dates_doc [ "start_dates" ] . append ( { "nanopubstore" : hostname , "start_dt" : start_dt } )
state_mgmt . replace ( start_dates_doc ) |
def _mapFuture ( callable_ , * iterables ) :
"""Similar to the built - in map function , but each of its
iteration will spawn a separate independent parallel Future that will run
either locally or remotely as ` callable ( * args ) ` .
: param callable : Any callable object ( function or class object with * _ _ call _ _ *
method ) ; this object will be called to execute each Future .
: param iterables : A tuple of iterable objects ; each will be zipped
to form an iterable of arguments tuples that will be passed to the
callable object as a separate Future .
: returns : A list of Future objects , each corresponding to an iteration of
map .
On return , the Futures are pending execution locally , but may also be
transfered remotely depending on global load . Execution may be carried on
with any further computations . To retrieve the map results , you need to
either wait for or join with the spawned Futures . See functions waitAny ,
waitAll , or joinAll . Alternatively , You may also use functions mapWait or
mapJoin that will wait or join before returning .""" | childrenList = [ ]
for args in zip ( * iterables ) :
childrenList . append ( submit ( callable_ , * args ) )
return childrenList |
def _py_code_clean ( lines , tab , executable ) :
"""Appends all the code lines needed to create the result class instance and populate
its keys with all output variables .""" | count = 0
allparams = executable . ordered_parameters
if type ( executable ) . __name__ == "Function" :
allparams = allparams + [ executable ]
for p in allparams :
value = _py_clean ( p , tab )
if value is not None :
count += 1
lines . append ( value )
return count |
def copy_logstore ( from_client , from_project , from_logstore , to_logstore , to_project = None , to_client = None ) :
"""copy logstore , index , logtail config to target logstore , machine group are not included yet .
the target logstore will be crated if not existing
: type from _ client : LogClient
: param from _ client : logclient instance
: type from _ project : string
: param from _ project : project name
: type from _ logstore : string
: param from _ logstore : logstore name
: type to _ logstore : string
: param to _ logstore : target logstore name
: type to _ project : string
: param to _ project : project name , copy to same project if not being specified , will try to create it if not being specified
: type to _ client : LogClient
: param to _ client : logclient instance , use it to operate on the " to _ project " if being specified
: return :""" | # check client
if to_project is not None : # copy to a different project in different client
to_client = to_client or from_client
# check if target project exists or not
ret = from_client . get_project ( from_project )
try :
ret = to_client . create_project ( to_project , ret . get_description ( ) )
except LogException as ex :
if ex . get_error_code ( ) == 'ProjectAlreadyExist' : # don ' t create the project as it already exists
pass
else :
raise
to_project = to_project or from_project
to_client = to_client or from_client
# return if logstore are the same one
if from_client is to_client and from_project == to_project and from_logstore == to_logstore :
return
# copy logstore
ret = from_client . get_logstore ( from_project , from_logstore )
res_shard = from_client . list_shards ( from_project , from_logstore )
expected_rwshard_count = len ( [ shard for shard in res_shard . shards if shard [ 'status' ] . lower ( ) == 'readwrite' ] )
try :
ret = to_client . create_logstore ( to_project , to_logstore , ttl = ret . get_ttl ( ) , shard_count = min ( expected_rwshard_count , MAX_INIT_SHARD_COUNT ) , enable_tracking = ret . get_enable_tracking ( ) , append_meta = ret . append_meta , auto_split = ret . auto_split , max_split_shard = ret . max_split_shard , preserve_storage = ret . preserve_storage )
except LogException as ex :
if ex . get_error_code ( ) == 'LogStoreAlreadyExist' : # update logstore ' s settings
ret = to_client . update_logstore ( to_project , to_logstore , ttl = ret . get_ttl ( ) , enable_tracking = ret . get_enable_tracking ( ) , append_meta = ret . append_meta , auto_split = ret . auto_split , max_split_shard = ret . max_split_shard , preserve_storage = ret . preserve_storage )
# arrange shard to expected count
res = arrange_shard ( to_client , to_project , to_logstore , min ( expected_rwshard_count , MAX_INIT_SHARD_COUNT ) )
else :
raise
# copy index
try :
ret = from_client . get_index_config ( from_project , from_logstore )
ret = to_client . create_index ( to_project , to_logstore , ret . get_index_config ( ) )
except LogException as ex :
if ex . get_error_code ( ) == 'IndexConfigNotExist' : # source has no index
pass
elif ex . get_error_code ( ) == 'IndexAlreadyExist' : # target already has index , overwrite it
ret = to_client . update_index ( to_project , to_logstore , ret . get_index_config ( ) )
pass
else :
raise
# list logtail config linked to the logstore and copy them
default_fetch_size = 100
offset , size = 0 , default_fetch_size
while True :
ret = from_client . list_logtail_config ( from_project , offset = offset , size = size )
count = ret . get_configs_count ( )
total = ret . get_configs_total ( )
for config_name in ret . get_configs ( ) :
ret = from_client . get_logtail_config ( from_project , config_name )
config = ret . logtail_config
if config . logstore_name != from_logstore :
continue
config . config_name = to_logstore + '_' + config_name
config . logstore_name = to_logstore
ret = to_client . create_logtail_config ( to_project , config )
offset += count
if count < size or offset >= total :
break |
def ads_use_dev_spaces ( cluster_name , resource_group_name , update = False , space_name = None , do_not_prompt = False ) :
"""Use Azure Dev Spaces with a managed Kubernetes cluster .
: param cluster _ name : Name of the managed cluster .
: type cluster _ name : String
: param resource _ group _ name : Name of resource group . You can configure the default group . Using ' az configure - - defaults group = < name > ' .
: type resource _ group _ name : String
: param update : Update to the latest Azure Dev Spaces client components .
: type update : bool
: param space _ name : Name of the new or existing dev space to select . Defaults to an interactive selection experience .
: type space _ name : String
: param do _ not _ prompt : Do not prompt for confirmation . Requires - - space .
: type do _ not _ prompt : bool""" | azds_cli = _install_dev_spaces_cli ( update )
use_command_arguments = [ azds_cli , 'use' , '--name' , cluster_name , '--resource-group' , resource_group_name ]
if space_name is not None :
use_command_arguments . append ( '--space' )
use_command_arguments . append ( space_name )
if do_not_prompt :
use_command_arguments . append ( '-y' )
subprocess . call ( use_command_arguments , universal_newlines = True ) |
def get_league ( self , slug ) :
"""Returns a Pokemon League object containing the details about the
league .""" | endpoint = '/league/' + slug
return self . make_request ( self . BASE_URL + endpoint ) |
def catches ( catch = None , handler = None , exit = True , handle_all = False ) :
"""Very simple decorator that tries any of the exception ( s ) passed in as
a single exception class or tuple ( containing multiple ones ) returning the
exception message and optionally handling the problem if it raises with the
handler if it is provided .
So instead of doing something like this : :
def bar ( ) :
try :
some _ call ( )
print " Success ! "
except TypeError , exc :
print " Error while handling some call : % s " % exc
sys . exit ( 1)
You would need to decorate it like this to have the same effect : :
@ catches ( TypeError )
def bar ( ) :
some _ call ( )
print " Success ! "
If multiple exceptions need to be caught they need to be provided as a
tuple : :
@ catches ( ( TypeError , AttributeError ) )
def bar ( ) :
some _ call ( )
print " Success ! "
If adding a handler , it should accept a single argument , which would be the
exception that was raised , it would look like : :
def my _ handler ( exc ) :
print ' Handling exception % s ' % str ( exc )
raise SystemExit
@ catches ( KeyboardInterrupt , handler = my _ handler )
def bar ( ) :
some _ call ( )
Note that the handler needs to raise its SystemExit if it wants to halt
execution , otherwise the decorator would continue as a normal try / except
block .
: param catch : A tuple with one ( or more ) Exceptions to catch
: param handler : Optional handler to have custom handling of exceptions
: param exit : Raise a ` ` SystemExit ` ` after handling exceptions
: param handle _ all : Handle all other exceptions via logging .""" | catch = catch or Exception
logger = logging . getLogger ( 'ceph_deploy' )
def decorate ( f ) :
@ wraps ( f )
def newfunc ( * a , ** kw ) :
exit_from_catch = False
try :
return f ( * a , ** kw )
except catch as e :
if handler :
return handler ( e )
else :
logger . error ( make_exception_message ( e ) )
if exit :
exit_from_catch = True
sys . exit ( 1 )
except Exception : # anything else , no need to save the exception as a variable
if handle_all is False : # re - raise if we are not supposed to handle everything
raise
# Make sure we don ' t spit double tracebacks if we are raising
# SystemExit from the ` except catch ` block
if exit_from_catch :
sys . exit ( 1 )
str_failure = traceback . format_exc ( )
for line in str_failure . split ( '\n' ) :
logger . error ( "%s" % line )
sys . exit ( 1 )
return newfunc
return decorate |
def is_coincident ( self , other , keys = None ) :
"""Return True if any segment in any list in self intersects
any segment in any list in other . If the optional keys
argument is not None , then it should be an iterable of keys
and only segment lists for those keys will be considered in
the test ( instead of raising KeyError , keys not present in
both segment list dictionaries will be ignored ) . If keys
is None ( the default ) then all segment lists are
considered .
This method is equivalent to the intersects ( ) method , but
without requiring the keys of the intersecting segment
lists to match .""" | if keys is not None :
keys = set ( keys )
self = tuple ( self [ key ] for key in set ( self ) & keys )
other = tuple ( other [ key ] for key in set ( other ) & keys )
else :
self = tuple ( self . values ( ) )
other = tuple ( other . values ( ) )
# make sure inner loop is smallest
if len ( self ) < len ( other ) :
self , other = other , self
return any ( a . intersects ( b ) for a in self for b in other ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.