signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def next_lookup ( self , symbol ) :
"""Returns the next TerminalSymbols produced by the input symbol within this grammar definition"""
|
result = [ ]
if symbol == self . initialsymbol :
result . append ( EndSymbol ( ) )
for production in self . productions :
if symbol in production . rightside :
nextindex = production . rightside . index ( symbol ) + 1
while nextindex < len ( production . rightside ) :
nextsymbol = production . rightside [ nextindex ]
firstlist = self . first_lookup ( nextsymbol )
cleanfirstlist = Choice ( [ x for x in firstlist if x != NullSymbol ( ) ] )
result . append ( cleanfirstlist )
if NullSymbol ( ) not in firstlist :
break
else :
result += self . next_lookup ( production . leftside [ 0 ] )
# reached the end of the rightside
return result
|
def __parse_namespace ( self ) :
"""Parse the namespace from various sources"""
|
if self . manifest . has_option ( 'config' , 'namespace' ) :
return self . manifest . get ( 'config' , 'namespace' )
elif self . manifest . has_option ( 'config' , 'source' ) :
return NAMESPACE_REGEX . search ( self . manifest . get ( 'config' , 'source' ) ) . groups ( ) [ 0 ]
else :
logger . warn ( 'Could not parse namespace implicitely' )
return None
|
def set_width ( self , width ) :
"""Set Screen Width"""
|
if width > 0 and width <= self . server . server_info . get ( "screen_width" ) :
self . width = width
self . server . request ( "screen_set %s wid %i" % ( self . ref , self . width ) )
|
def create_one ( self , commit = True ) :
'''Create and return one model instance . If * commit * is ` ` False ` ` the
instance will not be saved and many to many relations will not be
processed .
Subclasses that override ` ` create _ one ` ` can specify arbitrary keyword
arguments . They will be passed through by the
: meth : ` autofixture . base . AutoFixture . create ` method and the helper
functions : func : ` autofixture . create ` and
: func : ` autofixture . create _ one ` .
May raise : exc : ` CreateInstanceError ` if constraints are not satisfied .'''
|
tries = self . tries
instance = self . model ( )
process = instance . _meta . fields
while process and tries > 0 :
for field in process :
self . process_field ( instance , field )
process = self . check_constraints ( instance )
tries -= 1
if tries == 0 :
raise CreateInstanceError ( u'Cannot solve constraints for "%s", tried %d times. ' u'Please check value generators or model constraints. ' u'At least the following fields are involved: %s' % ( '%s.%s' % ( self . model . _meta . app_label , self . model . _meta . object_name ) , self . tries , ', ' . join ( [ field . name for field in process ] ) , ) )
instance = self . pre_process_instance ( instance )
if commit :
instance . save ( )
# to handle particular case of GenericRelation
# in Django pre 1.6 it appears in . many _ to _ many
many_to_many = [ f for f in instance . _meta . many_to_many if not isinstance ( f , get_GenericRelation ( ) ) ]
for field in many_to_many :
self . process_m2m ( instance , field )
signals . instance_created . send ( sender = self , model = self . model , instance = instance , committed = commit )
post_process_kwargs = { }
if 'commit' in getargnames ( self . post_process_instance ) :
post_process_kwargs [ 'commit' ] = commit
else :
warnings . warn ( "Subclasses of AutoFixture need to provide a `commit` " "argument for post_process_instance methods" , DeprecationWarning )
return self . post_process_instance ( instance , ** post_process_kwargs )
|
def global_horizontal_radiation ( self , value = 9999.0 ) :
"""Corresponds to IDD Field ` global _ horizontal _ radiation `
Args :
value ( float ) : value for IDD Field ` global _ horizontal _ radiation `
Unit : Wh / m2
value > = 0.0
Missing value : 9999.0
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value"""
|
if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `global_horizontal_radiation`' . format ( value ) )
if value < 0.0 :
raise ValueError ( 'value need to be greater or equal 0.0 ' 'for field `global_horizontal_radiation`' )
self . _global_horizontal_radiation = value
|
def load ( self , label ) :
"""Load obj with give label from hidden state directory"""
|
objloc = '{0}/{1}' . format ( self . statedir , label )
try :
obj = pickle . load ( open ( objloc , 'r' ) )
except ( KeyError , IndexError , EOFError ) :
obj = open ( objloc , 'r' ) . read ( )
try :
obj = float ( obj )
except ValueError :
pass
except IOError :
obj = None
return obj
|
def pip_execute ( * args , ** kwargs ) :
"""Overriden pip _ execute ( ) to stop sys . path being changed .
The act of importing main from the pip module seems to cause add wheels
from the / usr / share / python - wheels which are installed by various tools .
This function ensures that sys . path remains the same after the call is
executed ."""
|
try :
_path = sys . path
try :
from pip import main as _pip_execute
except ImportError :
apt_update ( )
if six . PY2 :
apt_install ( 'python-pip' )
else :
apt_install ( 'python3-pip' )
from pip import main as _pip_execute
_pip_execute ( * args , ** kwargs )
finally :
sys . path = _path
|
def int2base36 ( n ) :
"""Convert int base10 to base36.
Back convert : int ( ' < base36 > ' , 36)"""
|
assert isinstance ( n , ( int , long ) )
c = '0123456789abcdefghijklmnopqrstuvwxyz'
if n < 0 :
return '-' + int2base36 ( - n )
elif n < 36 :
return c [ n ]
b36 = ''
while n != 0 :
n , i = divmod ( n , 36 )
b36 = c [ i ] + b36
return b36
|
def _build_package_finder ( self , options , index_urls ) :
"""Create a package finder appropriate to this install command .
This method is meant to be overridden by subclasses , not
called directly ."""
|
return PackageFinder ( find_links = options . find_links , index_urls = index_urls , use_mirrors = options . use_mirrors , mirrors = options . mirrors )
|
def add_all_to_env ( env ) :
"""Add builders and construction variables for all supported fortran
dialects ."""
|
add_fortran_to_env ( env )
add_f77_to_env ( env )
add_f90_to_env ( env )
add_f95_to_env ( env )
add_f03_to_env ( env )
add_f08_to_env ( env )
|
def mean_squared_error ( true , pred ) :
"""L2 distance between tensors true and pred .
Args :
true : the ground truth image .
pred : the predicted image .
Returns :
mean squared error between ground truth and predicted image ."""
|
result = tf . reduce_sum ( tf . squared_difference ( true , pred ) ) / tf . to_float ( tf . size ( pred ) )
return result
|
async def subscribe ( self , topic ) :
"""Subscribe the socket to the specified topic .
: param topic : The topic to subscribe to ."""
|
if self . socket_type not in { SUB , XSUB } :
raise AssertionError ( "A %s socket cannot subscribe." % self . socket_type . decode ( ) , )
# Do this * * BEFORE * * awaiting so that new connections created during
# the execution below honor the setting .
self . _subscriptions . append ( topic )
tasks = [ asyncio . ensure_future ( peer . connection . local_subscribe ( topic ) , loop = self . loop , ) for peer in self . _peers if peer . connection ]
if tasks :
try :
await asyncio . wait ( tasks , loop = self . loop )
finally :
for task in tasks :
task . cancel ( )
|
def email_quote_txt ( text , indent_txt = '>>' , linebreak_input = "\n" , linebreak_output = "\n" ) :
"""Takes a text and returns it in a typical mail quoted format , e . g . : :
C ' est un lapin , lapin de bois .
> > Quoi ?
Un cadeau .
> > What ?
A present .
> > Oh , un cadeau .
will return : :
> > C ' est un lapin , lapin de bois .
> > > > Quoi ?
> > Un cadeau .
> > > > What ?
> > A present .
> > > > Oh , un cadeau .
@ param text : the string to quote
@ param indent _ txt : the string used for quoting ( default : ' > > ' )
@ param linebreak _ input : in the text param , string used for linebreaks
@ param linebreak _ output : linebreak used for output
@ return : the text as a quoted string"""
|
if ( text == "" ) :
return ""
lines = text . split ( linebreak_input )
text = ""
for line in lines :
text += indent_txt + line + linebreak_output
return text
|
def get_context ( request , context = None ) :
"""Returns common context data for network topology views ."""
|
if context is None :
context = { }
network_config = getattr ( settings , 'OPENSTACK_NEUTRON_NETWORK' , { } )
context [ 'launch_instance_allowed' ] = policy . check ( ( ( "compute" , "os_compute_api:servers:create" ) , ) , request )
context [ 'instance_quota_exceeded' ] = _quota_exceeded ( request , 'instances' )
context [ 'create_network_allowed' ] = policy . check ( ( ( "network" , "create_network" ) , ) , request )
context [ 'network_quota_exceeded' ] = _quota_exceeded ( request , 'network' )
context [ 'create_router_allowed' ] = ( network_config . get ( 'enable_router' , True ) and policy . check ( ( ( "network" , "create_router" ) , ) , request ) )
context [ 'router_quota_exceeded' ] = _quota_exceeded ( request , 'router' )
context [ 'console_type' ] = getattr ( settings , 'CONSOLE_TYPE' , 'AUTO' )
context [ 'show_ng_launch' ] = ( base . is_service_enabled ( request , 'compute' ) and getattr ( settings , 'LAUNCH_INSTANCE_NG_ENABLED' , True ) )
context [ 'show_legacy_launch' ] = ( base . is_service_enabled ( request , 'compute' ) and getattr ( settings , 'LAUNCH_INSTANCE_LEGACY_ENABLED' , False ) )
return context
|
def check_child_attr_data_types ( self , ds ) :
"""For any variables which contain any of the following attributes :
- valid _ min / valid _ max
- valid _ range
- scale _ factor
- add _ offset
- _ FillValue
the data type of the attribute must match the type of its parent variable as specified in the
NetCDF User Guide ( NUG ) https : / / www . unidata . ucar . edu / software / netcdf / docs / attribute _ conventions . html ,
referenced in the CF Conventions in Section 2.5.2
( http : / / cfconventions . org / Data / cf - conventions / cf - conventions - 1.7 / cf - conventions . html # missing - data )
: param netCDF4 . Dataset ds : open netCDF dataset object
: rtype : compliance _ checker . base . Result"""
|
ctx = TestCtx ( BaseCheck . MEDIUM , self . section_titles [ '2.5' ] )
special_attrs = { "actual_range" , "actual_min" , "actual_max" , "valid_min" , "valid_max" , "valid_range" , "scale_factor" , "add_offset" , "_FillValue" }
for var_name , var in ds . variables . items ( ) :
for att in special_attrs . intersection ( var . ncattrs ( ) ) :
val = var . getncattr ( att )
if isinstance ( val , basestring ) :
type_match = var . dtype . kind == 'S'
val_type = type ( val )
else :
val_type = val . dtype . type
type_match = val_type == var . dtype . type
ctx . assert_true ( type_match , "Attribute '{}' (type: {}) and parent variable '{}' (type: {}) " "must have equivalent datatypes" . format ( att , val_type , var_name , var . dtype . type ) )
return ctx . to_result ( )
|
def setter_override ( attribute = None , # type : str
f = DECORATED ) :
"""A decorator to indicate an overridden setter for a given attribute . If the attribute name is None , the function name
will be used as the attribute name . The @ contract will still be dynamically added .
: param attribute : the attribute name for which the decorated function is an overridden setter
: return :"""
|
return autoprops_override_decorate ( f , attribute = attribute , is_getter = False )
|
def getAllSensors ( self ) :
"""Retrieve all the user ' s own sensors by iterating over the SensorsGet function
@ return ( list ) - Array of sensors"""
|
j = 0
sensors = [ ]
parameters = { 'page' : 0 , 'per_page' : 1000 , 'owned' : 1 }
while True :
parameters [ 'page' ] = j
if self . SensorsGet ( parameters ) :
s = json . loads ( self . getResponse ( ) ) [ 'sensors' ]
sensors . extend ( s )
else : # if any of the calls fails , we cannot be cannot be sure about the sensors in CommonSense
return None
if len ( s ) < 1000 :
break
j += 1
return sensors
|
def sha1sum ( filename ) :
"""Calculates sha1 hash of a file"""
|
sha1 = hashlib . sha1 ( )
with open ( filename , 'rb' ) as f :
for chunk in iter ( lambda : f . read ( 128 * sha1 . block_size ) , b'' ) :
sha1 . update ( chunk )
return sha1 . hexdigest ( )
|
def read ( self ) :
"""Reads data from the CSV file ."""
|
companies = [ ]
with open ( self . file ) as f :
reader = unicodecsv . reader ( f )
for line in reader :
if len ( line ) >= 1 :
cnpj = self . format ( line [ 0 ] )
if self . valid ( cnpj ) :
companies . append ( cnpj )
return companies
|
def BSearch ( a , x , lo = 0 , hi = None ) :
"""Returns index of x in a , or - 1 if x not in a .
Arguments :
a - - ordered numeric sequence
x - - element to search within a
lo - - lowest index to consider in search *
hi - - highest index to consider in search *
* bisect . bisect _ left capability that we don ' t need to loose ."""
|
if len ( a ) == 0 :
return - 1
hi = hi if hi is not None else len ( a )
pos = bisect_left ( a , x , lo , hi )
return pos if pos != hi and a [ pos ] == x else - 1
|
def cash ( self ) :
"""[ float ] 可用资金"""
|
return sum ( account . cash for account in six . itervalues ( self . _accounts ) )
|
def updateD_H ( self , x ) :
"""Compute Hessian for update of D
See [ 2 ] for derivation of Hessian"""
|
self . precompute ( x )
H = zeros ( ( len ( x ) , len ( x ) ) )
Ai = zeros ( self . A . shape [ 0 ] )
Aj = zeros ( Ai . shape )
for i in range ( len ( x ) ) :
Ai = self . A [ : , i ]
ti = dot ( self . AD , outer ( self . R [ : , i ] , Ai ) ) + dot ( outer ( Ai , self . R [ i , : ] ) , self . ADt )
for j in range ( i , len ( x ) ) :
Aj = self . A [ : , j ]
tj = outer ( Ai , Aj )
H [ i , j ] = ( self . E * ( self . R [ i , j ] * tj + self . R [ j , i ] * tj . T ) - ti * ( dot ( self . AD , outer ( self . R [ : , j ] , Aj ) ) + dot ( outer ( Aj , self . R [ j , : ] ) , self . ADt ) ) ) . sum ( )
H [ j , i ] = H [ i , j ]
H *= - 2
e = eigvals ( H ) . min ( )
H = H + ( eye ( H . shape [ 0 ] ) * e )
return H
|
def _json_default_encoder ( func ) :
"""Monkey - Patch the core json encoder library .
This isn ' t as bad as it sounds .
We override the default method so that if an object
falls through and can ' t be encoded normally , we see if it is
a Future object and return the result to be encoded .
I set a special attribute on the Struct object so I can tell
that ' s what it is .
If that doesn ' t work , I fall back to the earlier behavior .
The nice thing about patching the library this way is that it
won ' t inerfere with existing code and it can itself be wrapped
by other methods .
So it ' s very extensible .
: param func : the JSONEncoder . default method .
: return : an object that can be json serialized ."""
|
@ wraps ( func )
def inner ( self , o ) :
try :
return o . _redpipe_struct_as_dict
# noqa
except AttributeError :
pass
return func ( self , o )
return inner
|
def findwithin ( data ) :
"""Returns an integer representing a binary vector , where 1 = within -
subject factor , 0 = between . Input equals the entire data 2D list ( i . e . ,
column 0 = random factor , column - 1 = measured values ( those two are skipped ) .
Note : input data is in | Stat format . . . a list of lists ( " 2D list " ) with
one row per measured value , first column = subject identifier , last column =
score , one in - between column per factor ( these columns contain level
designations on each factor ) . See also stats . anova . _ _ doc _ _ .
Usage : lfindwithin ( data ) data in | Stat format"""
|
numfact = len ( data [ 0 ] ) - 1
withinvec = 0
for col in range ( 1 , numfact ) :
examplelevel = pstat . unique ( pstat . colex ( data , col ) ) [ 0 ]
rows = pstat . linexand ( data , col , examplelevel )
# get 1 level of this factor
factsubjs = pstat . unique ( pstat . colex ( rows , 0 ) )
allsubjs = pstat . unique ( pstat . colex ( data , 0 ) )
if len ( factsubjs ) == len ( allsubjs ) : # fewer Ss than scores on this factor ?
withinvec = withinvec + ( 1 << col )
return withinvec
|
async def get_encryption_aes_key ( self ) -> Tuple [ bytes , Dict [ str , str ] , str ] :
"""Get encryption key to encrypt an S3 object
: return : Raw AES key bytes , Stringified JSON x - amz - matdesc , Base64 encoded x - amz - key"""
|
random_bytes = os . urandom ( 32 )
padder = PKCS7 ( AES . block_size ) . padder ( )
padded_result = await self . _loop . run_in_executor ( None , lambda : ( padder . update ( random_bytes ) + padder . finalize ( ) ) )
aesecb = self . _cipher . encryptor ( )
encrypted_result = await self . _loop . run_in_executor ( None , lambda : ( aesecb . update ( padded_result ) + aesecb . finalize ( ) ) )
return random_bytes , { } , base64 . b64encode ( encrypted_result ) . decode ( )
|
def create ( width , height , padding = 0 , padding_mode = 'constant' , mode = 'x' , tags = None ) :
"""Vel factory function"""
|
return RandomCrop ( size = ( width , height ) , padding = padding , padding_mode = padding_mode , mode = mode , tags = tags )
|
def _serialize ( self , array_parent , # type : ET . Element
value , # type : List
state # type : _ ProcessorState
) : # type : ( . . . ) - > None
"""Serialize the array value and add it to the array parent element ."""
|
if not value : # Nothing to do . Avoid attempting to iterate over a possibly
# None value .
return
for i , item_value in enumerate ( value ) :
state . push_location ( self . _item_processor . element_path , i )
item_element = self . _item_processor . serialize ( item_value , state )
array_parent . append ( item_element )
state . pop_location ( )
|
def create_configuration ( self , node , ports ) :
"""Create RAID configuration on the bare metal .
This method creates the desired RAID configuration as read from
node [ ' target _ raid _ config ' ] .
: param node : A dictionary of the node object
: param ports : A list of dictionaries containing information of ports
for the node
: returns : The current RAID configuration of the below format .
raid _ config = {
' logical _ disks ' : [ {
' size _ gb ' : 100,
' raid _ level ' : 1,
' physical _ disks ' : [
'5I : 0:1 ' ,
'5I : 0:2 ' ] ,
' controller ' : ' Smart array controller '"""
|
target_raid_config = node . get ( 'target_raid_config' , { } ) . copy ( )
return hpssa_manager . create_configuration ( raid_config = target_raid_config )
|
def target_exists ( self , target_id = 0 ) :
"""Returns True or False indicating whether or not the specified
target is present and valid .
` target _ id ` is a target ID ( or None for the first target )"""
|
try :
target = self . _target ( target_id = target_id )
except Exception as e :
log . error ( "Exception checking if target exists: {} {}" . format ( type ( e ) , e ) )
return False
return target is not None
|
def resolve_to_callable ( callable_name ) :
"""Resolve string : callable _ name : to a callable .
: param callable _ name : String representing callable name as registered
in ramses registry or dotted import path of callable . Can be
wrapped in double curly brackets , e . g . ' { { my _ callable } } ' ."""
|
from . import registry
clean_callable_name = callable_name . replace ( '{{' , '' ) . replace ( '}}' , '' ) . strip ( )
try :
return registry . get ( clean_callable_name )
except KeyError :
try :
from zope . dottedname . resolve import resolve
return resolve ( clean_callable_name )
except ImportError :
raise ImportError ( 'Failed to load callable `{}`' . format ( clean_callable_name ) )
|
def _after_request ( self , response ) :
"""The signal handler for the request _ finished signal ."""
|
if not getattr ( g , '_has_exception' , False ) :
extra = self . summary_extra ( )
self . summary_logger . info ( '' , extra = extra )
return response
|
def plot_pauli_transfer_matrix ( ptransfermatrix , ax , labels , title ) :
"""Visualize the Pauli Transfer Matrix of a process .
: param numpy . ndarray ptransfermatrix : The Pauli Transfer Matrix
: param ax : The matplotlib axes .
: param labels : The labels for the operator basis states .
: param title : The title for the plot
: return : The modified axis object .
: rtype : AxesSubplot"""
|
im = ax . imshow ( ptransfermatrix , interpolation = "nearest" , cmap = rigetti_3_color_cm , vmin = - 1 , vmax = 1 )
dim = len ( labels )
plt . colorbar ( im , ax = ax )
ax . set_xticks ( range ( dim ) )
ax . set_xlabel ( "Input Pauli Operator" , fontsize = 20 )
ax . set_yticks ( range ( dim ) )
ax . set_ylabel ( "Output Pauli Operator" , fontsize = 20 )
ax . set_title ( title , fontsize = 25 )
ax . set_xticklabels ( labels , rotation = 45 )
ax . set_yticklabels ( labels )
ax . grid ( False )
return ax
|
def _wait_for_macaroon ( wait_url ) :
'''Returns a macaroon from a legacy wait endpoint .'''
|
headers = { BAKERY_PROTOCOL_HEADER : str ( bakery . LATEST_VERSION ) }
resp = requests . get ( url = wait_url , headers = headers )
if resp . status_code != 200 :
raise InteractionError ( 'cannot get {}' . format ( wait_url ) )
return bakery . Macaroon . from_dict ( resp . json ( ) . get ( 'Macaroon' ) )
|
def _get_service_config ( self ) :
"""Will get configuration for the service from a service key ."""
|
key = self . _get_or_create_service_key ( )
config = { }
config [ 'service_key' ] = [ { 'name' : self . name } ]
config . update ( key [ 'entity' ] [ 'credentials' ] )
return config
|
def offset ( polygons , distance , join = 'miter' , tolerance = 2 , precision = 0.001 , join_first = False , max_points = 199 , layer = 0 , datatype = 0 ) :
"""Shrink or expand a polygon or polygon set .
Parameters
polygons : polygon or array - like
Polygons to be offset . Must be a ` ` PolygonSet ` ` ,
` ` CellReference ` ` , ` ` CellArray ` ` , or an array . The array may
contain any of the previous objects or an array - like [ N ] [ 2 ] of
vertices of a polygon .
distance : number
Offset distance . Positive to expand , negative to shrink .
join : { ' miter ' , ' bevel ' , ' round ' }
Type of join used to create the offset polygon .
tolerance : number
For miter joints , this number must be at least 2 and it
represents the maximun distance in multiples of offset betwen
new vertices and their original position before beveling to
avoid spikes at acute joints . For round joints , it indicates
the curvature resolution in number of points per full circle .
precision : float
Desired precision for rounding vertice coordinates .
join _ first : bool
Join all paths before offseting to avoid unecessary joins in
adjacent polygon sides .
max _ points : integer
If greater than 4 , fracture the resulting polygons to ensure
they have at most ` ` max _ points ` ` vertices . This is not a
tessellating function , so this number should be as high as
possible . For example , it should be set to 199 for polygons
being drawn in GDSII files .
layer : integer
The GDSII layer number for the resulting element .
datatype : integer
The GDSII datatype for the resulting element ( between 0 and
255 ) .
Returns
out : ` ` PolygonSet ` ` or ` ` None ` `
Return the offset shape as a set of polygons ."""
|
poly = [ ]
if isinstance ( polygons , PolygonSet ) :
poly . extend ( polygons . polygons )
elif isinstance ( polygons , CellReference ) or isinstance ( polygons , CellArray ) :
poly . extend ( polygons . get_polygons ( ) )
else :
for obj in polygons :
if isinstance ( obj , PolygonSet ) :
poly . extend ( obj . polygons )
elif isinstance ( obj , CellReference ) or isinstance ( obj , CellArray ) :
poly . extend ( obj . get_polygons ( ) )
else :
poly . append ( obj )
result = clipper . offset ( poly , distance , join , tolerance , 1 / precision , 1 if join_first else 0 )
return None if len ( result ) == 0 else PolygonSet ( result , layer , datatype , verbose = False ) . fracture ( max_points , precision )
|
def find_installed_packages ( self ) :
"""Find the installed system packages .
: returns : A list of strings with system package names .
: raises : : exc : ` . SystemDependencyError ` when the command to list the
installed system packages fails ."""
|
list_command = subprocess . Popen ( self . list_command , shell = True , stdout = subprocess . PIPE )
stdout , stderr = list_command . communicate ( )
if list_command . returncode != 0 :
raise SystemDependencyError ( "The command to list the installed system packages failed! ({command})" , command = self . list_command )
installed_packages = sorted ( stdout . decode ( ) . split ( ) )
logger . debug ( "Found %i installed system package(s): %s" , len ( installed_packages ) , installed_packages )
return installed_packages
|
def collect_outs ( self ) :
"""Collect and store the outputs from this rule ."""
|
# TODO : this should probably live in CacheManager .
for outfile in self . rule . output_files or [ ] :
outfile_built = os . path . join ( self . buildroot , outfile )
if not os . path . exists ( outfile_built ) :
raise error . TargetBuildFailed ( self . address , 'Output file is missing: %s' % outfile )
# git _ sha = gitrepo . RepoState ( ) . GetRepo ( self . address . repo ) . repo . commit ( )
# git _ sha is insufficient , and is actually not all that useful .
# More factors to include in hash :
# - commit / state of source repo of all dependencies
# ( or all input files ? )
# - Actually I like that idea : hash all the input files !
# - versions of build tools used ( ? )
metahash = self . _metahash ( )
log . debug ( '[%s]: Metahash: %s' , self . address , metahash . hexdigest ( ) )
# TODO : record git repo state and buildoptions in cachemgr
# TODO : move cachemgr to outer controller ( ? )
self . cachemgr . putfile ( outfile_built , self . buildroot , metahash )
|
def convert_join ( value ) :
"""Fix a Join ; )"""
|
if not isinstance ( value , list ) or len ( value ) != 2 : # Cowardly refuse
return value
sep , parts = value [ 0 ] , value [ 1 ]
if isinstance ( parts , six . string_types ) :
return parts
if not isinstance ( parts , list ) : # This looks tricky , just return the join as it was
return { "Fn::Join" : value , }
plain_string = True
args = ODict ( )
new_parts = [ ]
for part in parts :
part = clean ( part )
if isinstance ( part , dict ) :
plain_string = False
if "Ref" in part :
new_parts . append ( "${{{}}}" . format ( part [ "Ref" ] ) )
elif "Fn::GetAtt" in part :
params = part [ "Fn::GetAtt" ]
new_parts . append ( "${{{}}}" . format ( "." . join ( params ) ) )
else :
for key , val in args . items ( ) : # we want to bail if a conditional can evaluate to AWS : : NoValue
if isinstance ( val , dict ) :
if "Fn::If" in val and "AWS::NoValue" in str ( val [ "Fn::If" ] ) :
return { "Fn::Join" : value , }
if val == part :
param_name = key
break
else :
param_name = "Param{}" . format ( len ( args ) + 1 )
args [ param_name ] = part
new_parts . append ( "${{{}}}" . format ( param_name ) )
elif isinstance ( part , six . string_types ) :
new_parts . append ( part . replace ( "${" , "${!" ) )
else : # Doing something weird ; refuse
return { "Fn::Join" : value }
source = sep . join ( new_parts )
if plain_string :
return source
if args :
return ODict ( ( ( "Fn::Sub" , [ source , args ] ) , ) )
return ODict ( ( ( "Fn::Sub" , source ) , ) )
|
def delete ( cont , path = None , profile = None ) :
'''Delete a container , or delete an object from a container .
CLI Example to delete a container : :
salt myminion swift . delete mycontainer
CLI Example to delete an object from a container : :
salt myminion swift . delete mycontainer remoteobject'''
|
swift_conn = _auth ( profile )
if path is None :
return swift_conn . delete_container ( cont )
else :
return swift_conn . delete_object ( cont , path )
|
def _parse_mibs ( iLOIP , snmp_credentials ) :
"""Parses the MIBs .
: param iLOIP : IP address of the server on which SNMP discovery
has to be executed .
: param snmp _ credentials : a Dictionary of SNMP credentials .
auth _ user : SNMP user
auth _ protocol : Auth Protocol
auth _ prot _ pp : Pass phrase value for AuthProtocol .
priv _ protocol : Privacy Protocol .
auth _ priv _ pp : Pass phrase value for Privacy Protocol .
: returns the dictionary of parsed MIBs .
: raises exception . InvalidInputError if pysnmp is unable to get
SNMP data due to wrong inputs provided .
: raises exception . IloError if pysnmp raises any exception ."""
|
result = { }
usm_user_obj = _create_usm_user_obj ( snmp_credentials )
try :
for ( errorIndication , errorStatus , errorIndex , varBinds ) in hlapi . nextCmd ( hlapi . SnmpEngine ( ) , usm_user_obj , hlapi . UdpTransportTarget ( ( iLOIP , 161 ) , timeout = 3 , retries = 3 ) , hlapi . ContextData ( ) , # cpqida cpqDaPhyDrvTable Drive Array Physical Drive Table
hlapi . ObjectType ( hlapi . ObjectIdentity ( '1.3.6.1.4.1.232.3.2.5.1' ) ) , # cpqscsi SCSI Physical Drive Table
hlapi . ObjectType ( hlapi . ObjectIdentity ( '1.3.6.1.4.1.232.5.2.4.1' ) ) , # cpqscsi SAS Physical Drive Table
hlapi . ObjectType ( hlapi . ObjectIdentity ( '1.3.6.1.4.1.232.5.5.2.1' ) ) , lexicographicMode = False , ignoreNonIncreasingOid = True ) :
if errorIndication :
LOG . error ( errorIndication )
msg = "SNMP failed to traverse MIBs %s" , errorIndication
raise exception . IloSNMPInvalidInputFailure ( msg )
else :
if errorStatus :
msg = ( 'Parsing MIBs failed. %s at %s' % ( errorStatus . prettyPrint ( ) , errorIndex and varBinds [ - 1 ] [ int ( errorIndex ) - 1 ] or '?' ) )
LOG . error ( msg )
raise exception . IloSNMPInvalidInputFailure ( msg )
else :
for varBindTableRow in varBinds :
name , val = tuple ( varBindTableRow )
oid , label , suffix = ( mibViewController . getNodeName ( name ) )
key = name . prettyPrint ( )
# Don ' t traverse outside the tables we requested
if not ( key . find ( "SNMPv2-SMI::enterprises.232.3" ) >= 0 or ( key . find ( "SNMPv2-SMI::enterprises.232.5" ) >= 0 ) ) :
break
if key not in result :
result [ key ] = { }
result [ key ] [ label [ - 1 ] ] = { }
result [ key ] [ label [ - 1 ] ] [ suffix ] = val
except Exception as e :
msg = "SNMP library failed with error %s" , e
LOG . error ( msg )
raise exception . IloSNMPExceptionFailure ( msg )
return result
|
def build ( c , clean = False , browse = False , nitpick = False , opts = None , source = None , target = None , ) :
"""Build the project ' s Sphinx docs ."""
|
if clean :
_clean ( c )
if opts is None :
opts = ""
if nitpick :
opts += " -n -W -T"
cmd = "sphinx-build{0} {1} {2}" . format ( ( " " + opts ) if opts else "" , source or c . sphinx . source , target or c . sphinx . target , )
c . run ( cmd , pty = True )
if browse :
_browse ( c )
|
def cql ( self , cql , start = 0 , limit = None , expand = None , include_archived_spaces = None , excerpt = None ) :
"""Get results from cql search result with all related fields
Search for entities in Confluence using the Confluence Query Language ( CQL )
: param cql :
: param start : OPTIONAL : The start point of the collection to return . Default : 0.
: param limit : OPTIONAL : The limit of the number of issues to return , this may be restricted by
fixed system limits . Default by built - in method : 25
: param excerpt : the excerpt strategy to apply to the result , one of : indexed , highlight , none .
This defaults to highlight
: param expand : OPTIONAL : the properties to expand on the search result ,
this may cause database requests for some properties
: param include _ archived _ spaces : OPTIONAL : whether to include content in archived spaces in the result ,
this defaults to false
: return :"""
|
params = { }
if start is not None :
params [ 'start' ] = int ( start )
if limit is not None :
params [ 'limit' ] = int ( limit )
if cql is not None :
params [ 'cql' ] = cql
if expand is not None :
params [ 'expand' ] = expand
if include_archived_spaces is not None :
params [ 'includeArchivedSpaces' ] = include_archived_spaces
if excerpt is not None :
params [ 'excerpt' ] = excerpt
return self . get ( 'rest/api/search' , params = params )
|
def clear ( self # type : ORMTask
) :
"""Delete all objects created by this task .
Iterate over ` self . object _ classes ` and delete all objects of the listed classes ."""
|
# mark this task as incomplete
self . mark_incomplete ( )
# delete objects
for object_class in self . object_classes :
self . session . query ( object_class ) . delete ( )
self . close_session ( )
|
def add_project ( self , path ) :
"""Adds a project .
: param path : Project path .
: type path : unicode
: return : Method success .
: rtype : bool"""
|
if not foundations . common . path_exists ( path ) :
return False
path = os . path . normpath ( path )
if self . __model . get_project_nodes ( path ) :
self . __engine . notifications_manager . warnify ( "{0} | '{1}' project is already opened!" . format ( self . __class__ . __name__ , path ) )
return False
LOGGER . info ( "{0} | Adding '{1}' project!" . format ( self . __class__ . __name__ , path ) )
project_node = self . __model . register_project ( path )
if not project_node :
return False
self . __model . set_project_nodes ( project_node )
return True
|
def from_str ( cls , value ) :
"""Create a MOC from a str .
This grammar is expressed is the ` MOC IVOA < http : / / ivoa . net / documents / MOC / 20190215 / WD - MOC - 1.1-20190215 . pdf > ` _ _
specification at section 2.3.2.
Parameters
value : str
The MOC as a string following the grammar rules .
Returns
moc : ` ~ mocpy . moc . MOC ` or ` ~ mocpy . tmoc . TimeMOC `
The resulting MOC
Examples
> > > from mocpy import MOC
> > > moc = MOC . from _ str ( " 2/2-25,28,29 4/0 6 / " )"""
|
# Import lark parser when from _ str is called
# at least one time
from lark import Lark , Transformer
class ParsingException ( Exception ) :
pass
class TreeToJson ( Transformer ) :
def value ( self , items ) :
res = { }
for item in items :
if item is not None : # Do not take into account the " sep " branches
res . update ( item )
return res
def sep ( self , items ) :
pass
def depthpix ( self , items ) :
depth = str ( items [ 0 ] )
pixs_l = items [ 1 : ] [ 0 ]
return { depth : pixs_l }
def uniq_pix ( self , pix ) :
if pix :
return [ int ( pix [ 0 ] ) ]
def range_pix ( self , range_pix ) :
lower_bound = int ( range_pix [ 0 ] )
upper_bound = int ( range_pix [ 1 ] )
return np . arange ( lower_bound , upper_bound + 1 , dtype = int )
def pixs ( self , items ) :
ipixs = [ ]
for pix in items :
if pix is not None : # Do not take into account the " sep " branches
ipixs . extend ( pix )
return ipixs
# Initialize the parser when from _ str is called
# for the first time
if AbstractMOC . LARK_PARSER_STR is None :
AbstractMOC . LARK_PARSER_STR = Lark ( r"""
value: depthpix (sep+ depthpix)*
depthpix : INT "/" sep* pixs
pixs : pix (sep+ pix)*
pix : INT? -> uniq_pix
| (INT "-" INT) -> range_pix
sep : " " | "," | "\n" | "\r"
%import common.INT
""" , start = 'value' )
try :
tree = AbstractMOC . LARK_PARSER_STR . parse ( value )
except Exception as err :
raise ParsingException ( "Could not parse {0}. \n Check the grammar section 2.3.2 of http://ivoa.net/documents/MOC/20190215/WD-MOC-1.1-20190215.pdf to see the correct syntax for writing a MOC from a str" . format ( value ) )
moc_json = TreeToJson ( ) . transform ( tree )
return cls . from_json ( moc_json )
|
def build_url_field ( self , field_name , model_class ) :
"""Create a field representing the object ' s own URL ."""
|
field_class = self . serializer_url_field
field_kwargs = get_url_kwargs ( model_class )
return field_class , field_kwargs
|
def _patch_file ( path , content ) :
"""Will backup the file then patch it"""
|
f = open ( path )
existing_content = f . read ( )
f . close ( )
if existing_content == content : # already patched
log . warn ( 'Already patched.' )
return False
log . warn ( 'Patching...' )
_rename_path ( path )
f = open ( path , 'w' )
try :
f . write ( content )
finally :
f . close ( )
return True
|
def metadata_converter_help_content ( ) :
"""Helper method that returns just the content in extent mode .
This method was added so that the text could be reused in the
wizard .
: returns : A message object without brand element .
: rtype : safe . messaging . message . Message"""
|
message = m . Message ( )
paragraph = m . Paragraph ( tr ( 'This tool will convert InaSAFE 4.x keyword metadata into the ' 'metadata format used by InaSAFE 3.5. The primary reason for doing ' 'this is to prepare data for use in GeoSAFE - the online version of ' 'InaSAFE.' ) )
message . add ( paragraph )
paragraph = m . Paragraph ( tr ( 'You should note that this tool will not touch the original data or ' 'metadata associated with a layer. Instead it will make a copy of the ' 'original layer to the place that you nominate, and create a new ' 'keywords XML file to accompany that data. This new keywords file ' 'will contain InaSAFE keywords in the 3.5 format.' ) )
message . add ( paragraph )
return message
|
def determine_scale ( scale , img , mark ) :
"""Scales an image using a specified ratio , ' F ' or ' R ' . If ` scale ` is
' F ' , the image is scaled to be as big as possible to fit in ` img `
without falling off the edges . If ` scale ` is ' R ' , the watermark
resizes to a percentage of minimum size of source image . Returns
the scaled ` mark ` ."""
|
if scale :
try :
scale = float ( scale )
except ( ValueError , TypeError ) :
pass
if isinstance ( scale , six . string_types ) and scale . upper ( ) == 'F' : # scale watermark to full , but preserve the aspect ratio
scale = min ( float ( img . size [ 0 ] ) / mark . size [ 0 ] , float ( img . size [ 1 ] ) / mark . size [ 1 ] )
elif isinstance ( scale , six . string_types ) and scale . upper ( ) == 'R' : # scale watermark to % of source image and preserve the aspect ratio
scale = min ( float ( img . size [ 0 ] ) / mark . size [ 0 ] , float ( img . size [ 1 ] ) / mark . size [ 1 ] ) / 100 * settings . WATERMARK_PERCENTAGE
elif type ( scale ) not in ( float , int ) :
raise ValueError ( 'Invalid scale value "%s"! Valid values are "F" ' 'for ratio-preserving scaling, "R%%" for percantage aspect ' 'ratio of source image and floating-point numbers and ' 'integers greater than 0.' % scale )
# determine the new width and height
w = int ( mark . size [ 0 ] * float ( scale ) )
h = int ( mark . size [ 1 ] * float ( scale ) )
# apply the new width and height , and return the new ` mark `
return ( w , h )
else :
return mark . size
|
def runExperimentPool ( numObjects , numLocations , numFeatures , numColumns , longDistanceConnectionsRange = [ 0.0 ] , numWorkers = 7 , nTrials = 1 , numPoints = 10 , locationNoiseRange = [ 0.0 ] , featureNoiseRange = [ 0.0 ] , enableFeedback = [ True ] , ambiguousLocationsRange = [ 0 ] , numInferenceRpts = 1 , settlingTime = 3 , l2Params = None , l4Params = None , resultsName = "convergence_results.pkl" ) :
"""Allows you to run a number of experiments using multiple processes .
For each parameter except numWorkers , pass in a list containing valid values
for that parameter . The cross product of everything is run , and each
combination is run nTrials times .
Returns a list of dict containing detailed results from each experiment .
Also pickles and saves the results in resultsName for later analysis .
Example :
results = runExperimentPool (
numObjects = [ 10 ] ,
numLocations = [ 5 ] ,
numFeatures = [ 5 ] ,
numColumns = [ 2,3,4,5,6 ] ,
numWorkers = 8,
nTrials = 5)"""
|
# Create function arguments for every possibility
args = [ ]
for c in reversed ( numColumns ) :
for o in reversed ( numObjects ) :
for l in numLocations :
for f in numFeatures :
for p in longDistanceConnectionsRange :
for t in range ( nTrials ) :
for locationNoise in locationNoiseRange :
for featureNoise in featureNoiseRange :
for ambiguousLocations in ambiguousLocationsRange :
for feedback in enableFeedback :
args . append ( { "numObjects" : o , "numLocations" : l , "numFeatures" : f , "numColumns" : c , "trialNum" : t , "numPoints" : numPoints , "longDistanceConnections" : p , "plotInferenceStats" : False , "locationNoise" : locationNoise , "featureNoise" : featureNoise , "enableFeedback" : feedback , "numAmbiguousLocations" : ambiguousLocations , "numInferenceRpts" : numInferenceRpts , "l2Params" : l2Params , "l4Params" : l4Params , "settlingTime" : settlingTime , } )
numExperiments = len ( args )
print "{} experiments to run, {} workers" . format ( numExperiments , numWorkers )
# Run the pool
if numWorkers > 1 :
pool = Pool ( processes = numWorkers )
rs = pool . map_async ( runExperiment , args , chunksize = 1 )
while not rs . ready ( ) :
remaining = rs . _number_left
pctDone = 100.0 - ( 100.0 * remaining ) / numExperiments
print " =>" , remaining , "experiments remaining, percent complete=" , pctDone
time . sleep ( 5 )
pool . close ( )
# No more work
pool . join ( )
result = rs . get ( )
else :
result = [ ]
for arg in args :
result . append ( runExperiment ( arg ) )
# print " Full results : "
# pprint . pprint ( result , width = 150)
# Pickle results for later use
with open ( resultsName , "wb" ) as f :
cPickle . dump ( result , f )
return result
|
def _build_fields ( self ) :
"""Builds a list of valid fields"""
|
declared_fields = self . solr . _send_request ( 'get' , ADMIN_URL )
result = decoder . decode ( declared_fields )
self . field_list = self . _parse_fields ( result , 'fields' )
# Build regular expressions to match dynamic fields .
# dynamic field names may have exactly one wildcard , either at
# the beginning or the end of the name
self . _dynamic_field_regexes = [ ]
for wc_pattern in self . _parse_fields ( result , 'dynamicFields' ) :
if wc_pattern [ 0 ] == "*" :
self . _dynamic_field_regexes . append ( re . compile ( ".*%s\Z" % wc_pattern [ 1 : ] ) )
elif wc_pattern [ - 1 ] == "*" :
self . _dynamic_field_regexes . append ( re . compile ( "\A%s.*" % wc_pattern [ : - 1 ] ) )
|
def _generate_sql ( self , keys , changed_keys ) :
"""Generate forward operations for changing / creating SQL items ."""
|
for key in reversed ( keys ) :
app_label , sql_name = key
new_item = self . to_sql_graph . nodes [ key ]
sql_deps = [ n . key for n in self . to_sql_graph . node_map [ key ] . parents ]
reverse_sql = new_item . reverse_sql
if key in changed_keys :
operation_cls = AlterSQL
kwargs = { }
# in case of replace mode , AlterSQL will hold sql , reverse _ sql and
# state _ reverse _ sql , the latter one will be used for building state forward
# instead of reverse _ sql .
if new_item . replace :
kwargs [ 'state_reverse_sql' ] = reverse_sql
reverse_sql = self . from_sql_graph . nodes [ key ] . sql
else :
operation_cls = CreateSQL
kwargs = { 'dependencies' : list ( sql_deps ) }
operation = operation_cls ( sql_name , new_item . sql , reverse_sql = reverse_sql , ** kwargs )
sql_deps . append ( key )
self . add_sql_operation ( app_label , sql_name , operation , sql_deps )
|
def feature_parser ( uni_feature , word_surface ) : # type : ( text _ type , text _ type ) - > Tuple [ Tuple [ text _ type , text _ type , text _ type ] , text _ type ]
"""Parse the POS feature output by Mecab
: param uni _ feature unicode :
: return ( ( pos1 , pos2 , pos3 ) , word _ stem ) :"""
|
list_feature_items = uni_feature . split ( ',' )
# if word has no feature at all
if len ( list_feature_items ) == 1 :
return ( '*' ) , ( '*' )
pos1 = list_feature_items [ 0 ]
pos2 = list_feature_items [ 1 ]
pos3 = list_feature_items [ 2 ]
tuple_pos = ( pos1 , pos2 , pos3 )
# if without constraint ( output is normal mecab dictionary like )
if len ( list_feature_items ) == 9 :
word_stem = list_feature_items [ 6 ]
# if with constraint ( output format depends on Usedict . txt )
else :
word_stem = word_surface
return tuple_pos , word_stem
|
def porttree_matches ( name ) :
'''Returns a list containing the matches for a given package name from the
portage tree . Note that the specific version of the package will not be
provided for packages that have several versions in the portage tree , but
rather the name of the package ( i . e . " dev - python / paramiko " ) .'''
|
matches = [ ]
for category in _porttree ( ) . dbapi . categories :
if _porttree ( ) . dbapi . cp_list ( category + "/" + name ) :
matches . append ( category + "/" + name )
return matches
|
def write_config ( self ) :
"""Writes the provisioner ' s config file to disk and returns None .
: return : None"""
|
template = util . render_template ( self . _get_config_template ( ) , config_options = self . config_options )
util . write_file ( self . config_file , template )
|
def get_language_parameter ( request , query_language_key = 'language' , object = None , default = None ) :
"""Get the language parameter from the current request ."""
|
# This is the same logic as the django - admin uses .
# The only difference is the origin of the request parameter .
if not is_multilingual_project ( ) : # By default , the objects are stored in a single static language .
# This makes the transition to multilingual easier as well .
# The default language can operate as fallback language too .
return default or appsettings . PARLER_LANGUAGES . get_default_language ( )
else : # In multilingual mode , take the provided language of the request .
code = request . GET . get ( query_language_key )
if not code : # forms : show first tab by default
code = default or appsettings . PARLER_LANGUAGES . get_first_language ( )
return normalize_language_code ( code )
|
def _login ( session ) :
"""Login ."""
|
_LOGGER . info ( "logging in (no valid cookie found)" )
session . cookies . clear ( )
resp = session . post ( SSO_URL , { 'USER' : session . auth . username , 'PASSWORD' : session . auth . password , 'TARGET' : TARGET_URL } )
parsed = BeautifulSoup ( resp . text , HTML_PARSER )
relay_state = parsed . find ( 'input' , { 'name' : 'RelayState' } ) . get ( 'value' )
saml_response = parsed . find ( 'input' , { 'name' : 'SAMLResponse' } ) . get ( 'value' )
session . post ( SIGNIN_URL , { 'RelayState' : relay_state , 'SAMLResponse' : saml_response } )
session . get ( SIGNIN_URL )
_save_cookies ( session . cookies , session . auth . cookie_path )
|
def _delay_for_ratelimits ( cls , start ) :
"""If request was shorter than max request time , delay"""
|
stop = datetime . now ( )
duration_microseconds = ( stop - start ) . microseconds
if duration_microseconds < cls . REQUEST_TIME_MICROSECONDS :
time . sleep ( ( cls . REQUEST_TIME_MICROSECONDS - duration_microseconds ) / MICROSECONDS_PER_SECOND )
|
def del_membership ( self , user , role ) :
"""dismember user from a group"""
|
if not self . has_membership ( user , role ) :
return True
targetRecord = AuthMembership . objects ( creator = self . client , user = user ) . first ( )
if not targetRecord :
return True
for group in targetRecord . groups :
if group . role == role :
targetRecord . groups . remove ( group )
targetRecord . save ( )
return True
|
def browse_website ( self , browser = None ) :
"""Launch web browser at project ' s homepage
@ param browser : name of web browser to use
@ type browser : string
@ returns : 0 if homepage found , 1 if no homepage found"""
|
if len ( self . all_versions ) :
metadata = self . pypi . release_data ( self . project_name , self . all_versions [ 0 ] )
self . logger . debug ( "DEBUG: browser: %s" % browser )
if metadata . has_key ( "home_page" ) :
self . logger . info ( "Launching browser: %s" % metadata [ "home_page" ] )
if browser == 'konqueror' :
browser = webbrowser . Konqueror ( )
else :
browser = webbrowser . get ( )
browser . open ( metadata [ "home_page" ] , 2 )
return 0
self . logger . error ( "No homepage URL found." )
return 1
|
def pip_version ( self ) :
"""Get the pip version in the environment . Useful for knowing which args we can use
when installing ."""
|
from . vendor . packaging . version import parse as parse_version
pip = next ( iter ( pkg for pkg in self . get_installed_packages ( ) if pkg . key == "pip" ) , None )
if pip is not None :
pip_version = parse_version ( pip . version )
return parse_version ( "18.0" )
|
def _coefficient_handler_factory ( trans_table , parse_func , assertion = lambda c , ctx : True , ion_type = None , append_first_if_not = None ) :
"""Generates a handler co - routine which tokenizes a numeric coefficient .
Args :
trans _ table ( dict ) : lookup table for the handler for the next component of this numeric token , given the
ordinal of the first character in that component .
parse _ func ( callable ) : Called upon ending the numeric value . Accepts the current token value and returns a
thunk that lazily parses the token .
assertion ( callable ) : Accepts the first character ' s ordinal and the current context . Returns True if this is
a legal start to the component .
ion _ type ( Optional [ IonType ] ) : The type of the value if it were to end on this coefficient .
append _ first _ if _ not ( Optional [ int ] ) : The ordinal of a character that should not be appended to the token if
it occurs first in this component ( e . g . an underscore in many cases ) ."""
|
def transition ( prev , c , ctx , trans ) :
if prev == _UNDERSCORE :
_illegal_character ( c , ctx , 'Underscore before %s.' % ( _chr ( c ) , ) )
return ctx . immediate_transition ( trans_table [ c ] ( c , ctx ) )
return _numeric_handler_factory ( _DIGITS , transition , assertion , ( _DOT , ) , parse_func , ion_type = ion_type , append_first_if_not = append_first_if_not )
|
def post_config_hook ( self ) :
"""Initialization"""
|
self . _no_force_on_change = True
self . active_comb = None
self . active_layout = None
self . active_mode = "extend"
self . displayed = None
self . max_width = 0
|
def import_uploads ( self , uploads = None , upload_ids = None , synchronous = True , ** kwargs ) :
"""Import uploads into a repository
It expects either a list of uploads or upload _ ids ( but not both ) .
: param uploads : Array of uploads to be imported
: param upload _ ids : Array of upload ids to be imported
: param synchronous : What should happen if the server returns an HTTP
202 ( accepted ) status code ? Wait for the task to complete if
` ` True ` ` . Immediately return the server ' s response otherwise .
: param kwargs : Arguments to pass to requests .
: returns : The server ' s response , with all JSON decoded .
: raises : ` ` requests . exceptions . HTTPError ` ` If the server responds with
an HTTP 4XX or 5XX message ."""
|
kwargs = kwargs . copy ( )
# shadow the passed - in kwargs
kwargs . update ( self . _server_config . get_client_kwargs ( ) )
if uploads :
data = { 'uploads' : uploads }
elif upload_ids :
data = { 'upload_ids' : upload_ids }
response = client . put ( self . path ( 'import_uploads' ) , data , ** kwargs )
json = _handle_response ( response , self . _server_config , synchronous )
return json
|
def daily_freezethaw_cycles ( tasmax , tasmin , freq = 'YS' ) :
r"""Number of days with a diurnal freeze - thaw cycle
The number of days where Tmax > 0 ° C and Tmin < 0 ° C .
Parameters
tasmax : xarray . DataArray
Maximum daily temperature [ ° C ] or [ K ]
tasmin : xarray . DataArray
Minimum daily temperature values [ ° C ] or [ K ]
freq : str
Resampling frequency
Returns
xarray . DataArray
Number of days with a diurnal freeze - thaw cycle
Notes
Let : math : ` TX _ { i } ` be the maximum temperature at day : math : ` i ` and : math : ` TN _ { i } ` be
the daily minimum temperature at day : math : ` i ` . Then the number of freeze thaw cycles
during period : math : ` \ phi ` is given by :
. . math : :
\ sum _ { i \ in \ phi } [ TX _ { i } > 0 ° C ] [ TN _ { i } < 0 ° C ]
where : math : ` [ P ] ` is 1 if : math : ` P ` is true , and 0 if false ."""
|
frz = utils . convert_units_to ( '0 degC' , tasmax )
ft = ( tasmin < frz ) * ( tasmax > frz ) * 1
out = ft . resample ( time = freq ) . sum ( dim = 'time' )
return out
|
def _machine_bytes ( ) :
"""Get the machine portion of an ObjectId ."""
|
machine_hash = hashlib . md5 ( )
if PY3 : # gethostname ( ) returns a unicode string in python 3 . x
# while update ( ) requires a byte string .
machine_hash . update ( socket . gethostname ( ) . encode ( ) )
else : # Calling encode ( ) here will fail with non - ascii hostnames
machine_hash . update ( socket . gethostname ( ) )
return machine_hash . digest ( ) [ 0 : 3 ]
|
def validate_bool ( b ) :
"""Convert b to a boolean or raise"""
|
if isinstance ( b , six . string_types ) :
b = b . lower ( )
if b in ( 't' , 'y' , 'yes' , 'on' , 'true' , '1' , 1 , True ) :
return True
elif b in ( 'f' , 'n' , 'no' , 'off' , 'false' , '0' , 0 , False ) :
return False
else :
raise ValueError ( 'Could not convert "%s" to boolean' % b )
|
def Y_dist ( self , new_y_distance ) :
"""Use preset values for the distance between lines ."""
|
self . parent . value ( 'y_distance' , new_y_distance )
self . parent . traces . display ( )
|
def merge ( id , card , cardscript = None ) :
"""Find the xmlcard and the card definition of \a id
Then return a merged class of the two"""
|
if card is None :
card = cardxml . CardXML ( id )
if cardscript is None :
cardscript = get_script_definition ( id )
if cardscript :
card . scripts = type ( id , ( cardscript , ) , { } )
else :
card . scripts = type ( id , ( ) , { } )
scriptnames = ( "activate" , "combo" , "deathrattle" , "draw" , "inspire" , "play" , "enrage" , "update" , "powered_up" )
for script in scriptnames :
actions = getattr ( card . scripts , script , None )
if actions is None : # Set the action by default to avoid runtime hasattr ( ) calls
setattr ( card . scripts , script , [ ] )
elif not callable ( actions ) :
if not hasattr ( actions , "__iter__" ) : # Ensure the actions are always iterable
setattr ( card . scripts , script , ( actions , ) )
for script in ( "events" , "secret" ) :
events = getattr ( card . scripts , script , None )
if events is None :
setattr ( card . scripts , script , [ ] )
elif not hasattr ( events , "__iter__" ) :
setattr ( card . scripts , script , [ events ] )
if not hasattr ( card . scripts , "cost_mod" ) :
card . scripts . cost_mod = None
if not hasattr ( card . scripts , "Hand" ) :
card . scripts . Hand = type ( "Hand" , ( ) , { } )
if not hasattr ( card . scripts . Hand , "events" ) :
card . scripts . Hand . events = [ ]
if not hasattr ( card . scripts . Hand . events , "__iter__" ) :
card . scripts . Hand . events = [ card . scripts . Hand . events ]
if not hasattr ( card . scripts . Hand , "update" ) :
card . scripts . Hand . update = ( )
if not hasattr ( card . scripts . Hand . update , "__iter__" ) :
card . scripts . Hand . update = ( card . scripts . Hand . update , )
# Set choose one cards
if hasattr ( cardscript , "choose" ) :
card . choose_cards = cardscript . choose [ : ]
else :
card . choose_cards = [ ]
if hasattr ( cardscript , "tags" ) :
for tag , value in cardscript . tags . items ( ) :
card . tags [ tag ] = value
# Set some additional events based on the base tags . . .
if card . poisonous :
card . scripts . events . append ( POISONOUS )
return card
|
def query_signing ( self , contract_id = None , plan_id = None , contract_code = None , openid = None , version = "1.0" ) :
"""查询签约关系 api
: param contract _ id : 可选 委托代扣协议id 委托代扣签约成功后由微信返回的委托代扣协议id , 选择contract _ id查询 , 则此参数必填
: param plan _ id : 可选 模板id 商户在微信商户平台配置的代扣模板id , 选择plan _ id + contract _ code查询 , 则此参数必填
: param contract _ code : 可选 签约协议号 商户请求签约时传入的签约协议号 , 商户侧须唯一 。 选择plan _ id + contract _ code查询 , 则此参数必填
: param openid : 可选 openid 用户标识 , 必须保证与传入appid对应
: param version : 版本号 固定值1.0
: return : 返回的结果信息"""
|
if not contract_id and not ( plan_id and contract_code ) and not ( plan_id and openid ) :
raise ValueError ( "contract_id and (plan_id, contract_code) and (plan_id, openid) must be a choice." )
data = { "appid" : self . appid , "mch_id" : self . mch_id , "contract_id" : contract_id , "plan_id" : plan_id , "contract_code" : contract_code , "openid" : openid , "version" : version , "nonce_str" : None , }
return self . _post ( 'papay/querycontract' , data = data )
|
def acquire_win ( lock_file ) : # pragma : no cover
"""Acquire a lock file on windows ."""
|
try :
fd = os . open ( lock_file , OPEN_MODE )
except OSError :
pass
else :
try :
msvcrt . locking ( fd , msvcrt . LK_NBLCK , 1 )
except ( IOError , OSError ) :
os . close ( fd )
else :
return fd
|
def _prepare_variables ( self ) :
"""Prepare Variables for YellowFin .
Returns :
Grad * * 2 , Norm , Norm * * 2 , Mean ( Norm * * 2 ) ops"""
|
self . _moving_averager = tf . train . ExponentialMovingAverage ( decay = self . _beta , zero_debias = self . _zero_debias )
# assert self . _ grad is not None and len ( self . _ grad ) > 0
# List for the returned Operations
prepare_variables_op = [ ]
# Get per var g * * 2 and norm * * 2
self . _grad_squared = [ ]
self . _grad_norm_squared = [ ]
# Gradient squared
for v , g in zip ( self . _vars , self . _grad ) :
if g is None :
continue
with tf . colocate_with ( v ) :
self . _grad_squared . append ( tf . square ( g ) )
# Norm squared .
self . _grad_norm_squared = [ tf . reduce_sum ( g_sq ) for g_sq in self . _grad_squared ]
if self . _sparsity_debias :
avg_op_sparsity = self . _grad_sparsity ( )
prepare_variables_op . append ( avg_op_sparsity )
# The following running average on squared norm of gradient
# is shared by grad _ var and dist _ to _ opt
avg_op = self . _moving_averager . apply ( self . _grad_norm_squared )
with tf . control_dependencies ( [ avg_op ] ) :
self . _grad_norm_squared_avg = [ self . _moving_averager . average ( val ) for val in self . _grad_norm_squared ]
self . _grad_norm_squared = tf . add_n ( self . _grad_norm_squared )
self . _grad_norm_squared_avg = tf . add_n ( self . _grad_norm_squared_avg )
prepare_variables_op . append ( avg_op )
return tf . group ( * prepare_variables_op )
|
def embed_file ( self , input_file : IO , output_file_path : str , output_format : str = "all" , batch_size : int = DEFAULT_BATCH_SIZE , forget_sentences : bool = False , use_sentence_keys : bool = False ) -> None :
"""Computes ELMo embeddings from an input _ file where each line contains a sentence tokenized by whitespace .
The ELMo embeddings are written out in HDF5 format , where each sentence embedding
is saved in a dataset with the line number in the original file as the key .
Parameters
input _ file : ` ` IO ` ` , required
A file with one tokenized sentence per line .
output _ file _ path : ` ` str ` ` , required
A path to the output hdf5 file .
output _ format : ` ` str ` ` , optional , ( default = " all " )
The embeddings to output . Must be one of " all " , " top " , or " average " .
batch _ size : ` ` int ` ` , optional , ( default = 64)
The number of sentences to process in ELMo at one time .
forget _ sentences : ` ` bool ` ` , optional , ( default = False ) .
If use _ sentence _ keys is False , whether or not to include a string
serialized JSON dictionary that associates sentences with their
line number ( its HDF5 key ) . The mapping is placed in the
" sentence _ to _ index " HDF5 key . This is useful if
you want to use the embeddings without keeping the original file
of sentences around .
use _ sentence _ keys : ` ` bool ` ` , optional , ( default = False ) .
Whether or not to use full sentences as keys . By default ,
the line numbers of the input file are used as ids , which is more robust ."""
|
assert output_format in [ "all" , "top" , "average" ]
# Tokenizes the sentences .
sentences = [ line . strip ( ) for line in input_file ]
blank_lines = [ i for ( i , line ) in enumerate ( sentences ) if line == "" ]
if blank_lines :
raise ConfigurationError ( f"Your input file contains empty lines at indexes " f"{blank_lines}. Please remove them." )
split_sentences = [ sentence . split ( ) for sentence in sentences ]
# Uses the sentence index as the key .
if use_sentence_keys :
logger . warning ( "Using sentences as keys can fail if sentences " "contain forward slashes or colons. Use with caution." )
embedded_sentences = zip ( sentences , self . embed_sentences ( split_sentences , batch_size ) )
else :
embedded_sentences = ( ( str ( i ) , x ) for i , x in enumerate ( self . embed_sentences ( split_sentences , batch_size ) ) )
sentence_to_index = { }
logger . info ( "Processing sentences." )
with h5py . File ( output_file_path , 'w' ) as fout :
for key , embeddings in Tqdm . tqdm ( embedded_sentences ) :
if use_sentence_keys and key in fout . keys ( ) :
raise ConfigurationError ( f"Key already exists in {output_file_path}. " f"To encode duplicate sentences, do not pass " f"the --use-sentence-keys flag." )
if not forget_sentences and not use_sentence_keys :
sentence = sentences [ int ( key ) ]
sentence_to_index [ sentence ] = key
if output_format == "all" :
output = embeddings
elif output_format == "top" :
output = embeddings [ - 1 ]
elif output_format == "average" :
output = numpy . average ( embeddings , axis = 0 )
fout . create_dataset ( str ( key ) , output . shape , dtype = 'float32' , data = output )
if not forget_sentences and not use_sentence_keys :
sentence_index_dataset = fout . create_dataset ( "sentence_to_index" , ( 1 , ) , dtype = h5py . special_dtype ( vlen = str ) )
sentence_index_dataset [ 0 ] = json . dumps ( sentence_to_index )
input_file . close ( )
|
def get_sequence_time ( cycles , unit_converter = None , eres = None ) :
"""Calculates the time the move sequence will take to complete .
Calculates the amount of time it will take to complete the given
move sequence . Types of motion supported are moves from one position
to another ( the motion will always come to a stop before doing the
next motion ) , waiting a given interval of time till starting the
next move , and looping over a sequence of moves .
Parameters
cycles : list of dicts
The ` ` list ` ` of cycles of motion to do one after another . See
` ` compile _ sequence ` ` for format .
unit _ converter : UnitConverter , optional
` ` GeminiMotorDrive . utilities . UnitConverter ` ` to use to convert
the units in ` cycles ` to motor units . ` ` None ` ` indicates that
they are already in motor units .
eres : int
Encoder resolution . Only relevant if ` unit _ converter ` is
` ` None ` ` .
Returns
time : float
Time the move sequence will take in seconds .
See Also
compile _ sequence
GeminiMotorDrive . utilities . UnitConverter
move _ time"""
|
# If we are doing unit conversion , then that is equivalent to motor
# units but with eres equal to one .
if unit_converter is not None :
eres = 1
# Starting with 0 time , steadily add the time of each movement .
tme = 0.0
# Go through each cycle and collect times .
for cycle in cycles : # Add all the wait times .
tme += cycle [ 'iterations' ] * sum ( cycle [ 'wait_times' ] )
# Add the time for each individual move .
for move in cycle [ 'moves' ] :
tme += cycle [ 'iterations' ] * move_time ( move , eres = eres )
# Done .
return tme
|
def make_tophat_ei ( lower , upper ) :
"""Return a ufunc - like tophat function on the defined range , left - exclusive
and right - inclusive . Returns 1 if lower < x < = upper , 0 otherwise ."""
|
if not np . isfinite ( lower ) :
raise ValueError ( '"lower" argument must be finite number; got %r' % lower )
if not np . isfinite ( upper ) :
raise ValueError ( '"upper" argument must be finite number; got %r' % upper )
def range_tophat_ei ( x ) :
x = np . asarray ( x )
x1 = np . atleast_1d ( x )
r = ( ( lower < x1 ) & ( x1 <= upper ) ) . astype ( x . dtype )
if x . ndim == 0 :
return np . asscalar ( r )
return r
range_tophat_ei . __doc__ = ( 'Ranged tophat function, left-exclusive and ' 'right-inclusive. Returns 1 if %g < x <= %g, ' '0 otherwise.' ) % ( lower , upper )
return range_tophat_ei
|
def compute_weight ( self , r , ytr = None , mask = None ) :
"""Returns the weight ( w ) using OLS of r * w = gp . _ ytr"""
|
ytr = self . _ytr if ytr is None else ytr
mask = self . _mask if mask is None else mask
return compute_weight ( r , ytr , mask )
|
def combine ( ctx , src , dst ) :
"""Combine several smother reports ."""
|
c = coverage . Coverage ( config_file = ctx . obj [ 'rcfile' ] )
result = Smother ( c )
for infile in src :
result |= Smother . load ( infile )
result . write ( dst )
|
def get_tile_metadata_name ( self ) :
""": return : name of tile metadata file
: rtype : str"""
|
if self . safe_type == EsaSafeType . OLD_TYPE :
name = _edit_name ( self . tile_id , 'MTD' , delete_end = True )
else :
name = 'MTD_TL'
return '{}.xml' . format ( name )
|
def from_value ( self , value ) :
"""Function infers TDS type from Python value .
: param value : value from which to infer TDS type
: return : An instance of subclass of : class : ` BaseType `"""
|
if value is None :
sql_type = NVarCharType ( size = 1 )
else :
sql_type = self . _from_class_value ( value , type ( value ) )
return sql_type
|
def print_all_metadata ( fname ) :
"""high level that prints all as long list"""
|
print ( "Filename :" , fname )
print ( "Basename :" , os . path . basename ( fname ) )
print ( "Path :" , os . path . dirname ( fname ) )
print ( "Size :" , os . path . getsize ( fname ) )
img = Image . open ( fname )
# get the image ' s width and height in pixels
width , height = img . size
# get the largest dimension
# max _ dim = max ( img . size )
print ( "Width :" , width )
print ( "Height :" , height )
print ( "Format :" , img . format )
print ( "palette :" , img . palette )
print_stats ( img )
# print _ exif _ data ( img )
exif_data = get_exif_data ( img )
( lat , lon ) = get_lat_lon ( exif_data )
print ( "GPS Lat :" , lat )
print ( "GPS Long :" , lon )
|
def get_asset_content_form_for_create ( self , asset_id = None , asset_content_record_types = None ) :
"""Gets an asset content form for creating new assets .
arg : asset _ id ( osid . id . Id ) : the ` ` Id ` ` of an ` ` Asset ` `
arg : asset _ content _ record _ types ( osid . type . Type [ ] ) : array of
asset content record types
return : ( osid . repository . AssetContentForm ) - the asset content
form
raise : NotFound - ` ` asset _ id ` ` is not found
raise : NullArgument - ` ` asset _ id ` ` or
` ` asset _ content _ record _ types ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - unable to get form for requested record
types
* compliance : mandatory - - This method must be implemented . *"""
|
if AWS_ASSET_CONTENT_RECORD_TYPE in asset_content_record_types :
asset_content_record_types . remove ( AWS_ASSET_CONTENT_RECORD_TYPE )
return AssetContentForm ( self . _provider_session . get_asset_content_form_for_create ( asset_id , asset_content_record_types ) , self . _config_map , self . get_repository_id ( ) )
else :
return self . _provider_session . get_asset_content_form_for_create ( asset_id , asset_content_record_types )
|
def arquire_attributes ( self , attributes , active = True ) :
"""Claims a list of attributes for the current client .
Can also disable attributes . Returns update response object ."""
|
attribute_update = self . _post_object ( self . update_api . attributes . acquire , attributes )
return ExistAttributeResponse ( attribute_update )
|
def relations ( cls ) :
"""Return a ` list ` of relationship names or the given model"""
|
return [ c . key for c in cls . __mapper__ . iterate_properties if isinstance ( c , RelationshipProperty ) ]
|
def get_thumbnail_source ( self , obj ) :
"""Obtains the source image field for the thumbnail .
: param obj : An object with a thumbnail _ field defined .
: return : Image field for thumbnail or None if not found ."""
|
if hasattr ( self , 'thumbnail_field' ) and self . thumbnail_field :
return resolve ( obj , self . thumbnail_field )
# try get _ list _ image , from ListableMixin
if hasattr ( obj , 'get_list_image' ) :
return resolve ( obj , "get_list_image" )
logger . warning ( 'ThumbnailAdminMixin.thumbnail_field unspecified' )
return None
|
def _close ( self , args ) :
"""request a connection close
This method indicates that the sender wants to close the
connection . This may be due to internal conditions ( e . g . a
forced shut - down ) or due to an error handling a specific
method , i . e . an exception . When a close is due to an
exception , the sender provides the class and method id of the
method which caused the exception .
RULE :
After sending this method any received method except the
Close - OK method MUST be discarded .
RULE :
The peer sending this method MAY use a counter or timeout
to detect failure of the other peer to respond correctly
with the Close - OK method .
RULE :
When a server receives the Close method from a client it
MUST delete all server - side resources associated with the
client ' s context . A client CANNOT reconnect to a context
after sending or receiving a Close method .
PARAMETERS :
reply _ code : short
The reply code . The AMQ reply codes are defined in AMQ
RFC 011.
reply _ text : shortstr
The localised reply text . This text can be logged as an
aid to resolving issues .
class _ id : short
failing method class
When the close is provoked by a method exception , this
is the class of the method .
method _ id : short
failing method ID
When the close is provoked by a method exception , this
is the ID of the method ."""
|
reply_code = args . read_short ( )
reply_text = args . read_shortstr ( )
class_id = args . read_short ( )
method_id = args . read_short ( )
self . _x_close_ok ( )
raise AMQPConnectionException ( reply_code , reply_text , ( class_id , method_id ) )
|
def _massage_metakeys ( dct , prfx ) :
"""Returns a copy of the supplied dictionary , prefixing any keys that do
not begin with the specified prefix accordingly ."""
|
lowprefix = prfx . lower ( )
ret = { }
for k , v in list ( dct . items ( ) ) :
if not k . lower ( ) . startswith ( lowprefix ) :
k = "%s%s" % ( prfx , k )
ret [ k ] = v
return ret
|
def create_napp ( cls , meta_package = False ) :
"""Bootstrap a basic NApp structure for you to develop your NApp .
This will create , on the current folder , a clean structure of a NAPP ,
filling some contents on this structure ."""
|
templates_path = SKEL_PATH / 'napp-structure/username/napp'
ui_templates_path = os . path . join ( templates_path , 'ui' )
username = None
napp_name = None
print ( '--------------------------------------------------------------' )
print ( 'Welcome to the bootstrap process of your NApp.' )
print ( '--------------------------------------------------------------' )
print ( 'In order to answer both the username and the napp name,' )
print ( 'You must follow this naming rules:' )
print ( ' - name starts with a letter' )
print ( ' - name contains only letters, numbers or underscores' )
print ( ' - at least three characters' )
print ( '--------------------------------------------------------------' )
print ( '' )
while not cls . valid_name ( username ) :
username = input ( 'Please, insert your NApps Server username: ' )
while not cls . valid_name ( napp_name ) :
napp_name = input ( 'Please, insert your NApp name: ' )
description = input ( 'Please, insert a brief description for your' 'NApp [optional]: ' )
if not description : # pylint : disable = fixme
description = '# TODO: <<<< Insert your NApp description here >>>>'
# pylint : enable = fixme
context = { 'username' : username , 'napp' : napp_name , 'description' : description }
# : Creating the directory structure ( username / napp _ name )
os . makedirs ( username , exist_ok = True )
# : Creating ` ` _ _ init _ _ . py ` ` files
with open ( os . path . join ( username , '__init__.py' ) , 'w' ) as init_file :
init_file . write ( f'"""Napps for the user {username}.""""' )
os . makedirs ( os . path . join ( username , napp_name ) )
# : Creating the other files based on the templates
templates = os . listdir ( templates_path )
templates . remove ( 'ui' )
templates . remove ( 'openapi.yml.template' )
if meta_package :
templates . remove ( 'main.py.template' )
templates . remove ( 'settings.py.template' )
for tmp in templates :
fname = os . path . join ( username , napp_name , tmp . rsplit ( '.template' ) [ 0 ] )
with open ( fname , 'w' ) as file :
content = cls . render_template ( templates_path , tmp , context )
file . write ( content )
if not meta_package :
NAppsManager . create_ui_structure ( username , napp_name , ui_templates_path , context )
print ( )
print ( f'Congratulations! Your NApp has been bootstrapped!\nNow you ' 'can go to the directory {username}/{napp_name} and begin to ' 'code your NApp.' )
print ( 'Have fun!' )
|
def send ( self , uid , event , payload = None ) :
"""Send an event to a connected controller . Use pymlgame event type and correct payload .
To send a message to the controller use pymlgame . E _ MESSAGE event and a string as payload .
: param uid : Unique id of the controller
: param event : Event type
: param payload : Payload of the event
: type uid : str
: type event : Event
: type payload : str
: return : Number of bytes send or False
: rtype : int"""
|
sock = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if uid in self . controllers . keys ( ) :
addr = self . controllers [ uid ] [ 0 ]
port = self . controllers [ uid ] [ 1 ]
if event == E_MESSAGE : # print ( ' / message / { } = > { } : { } ' . format ( payload , addr , port ) )
return sock . sendto ( '/message/{}' . format ( payload ) . encode ( 'utf-8' ) , ( addr , port ) )
elif event == E_RUMBLE : # print ( ' / rumble / { } = > { } : { } ' . format ( payload , addr , port ) )
return sock . sendto ( '/rumble/{}' . format ( payload ) . encode ( 'utf-8' ) , ( addr , port ) )
else :
pass
else :
pass
return False
|
def subtract_lists ( list1 , list2 ) :
"""Compute the subtracted result of two lists element by element .
This function employs map function in conjunction with a lambda function to perform subtraction operation
between corresponding elements of the two lists .
Args :
list1 : The first list of numbers .
list2 : The second list of numbers .
Returns :
A list containing the result of subtraction operation between the corresponding elements of the two input lists .
Examples :
> > > subtract _ lists ( [ 1 , 2 , 3 ] , [ 4 , 5 , 6 ] )
[ - 3 , - 3 , - 3]
> > > subtract _ lists ( [ 1 , 2 ] , [ 3 , 4 ] )
[ - 2 , - 2]
> > > subtract _ lists ( [ 90 , 120 ] , [ 50 , 70 ] )
[40 , 50]"""
|
subtraction_result = map ( ( lambda num1 , num2 : ( num1 - num2 ) ) , list1 , list2 )
return list ( subtraction_result )
|
def list_portgroups ( kwargs = None , call = None ) :
'''List all the distributed virtual portgroups for this VMware environment
CLI Example :
. . code - block : : bash
salt - cloud - f list _ portgroups my - vmware - config'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The list_portgroups function must be called with ' '-f or --function.' )
return { 'Portgroups' : salt . utils . vmware . list_portgroups ( _get_si ( ) ) }
|
def momentum ( data , period ) :
"""Momentum .
Formula :
DATA [ i ] - DATA [ i - period ]"""
|
catch_errors . check_for_period_error ( data , period )
momentum = [ data [ idx ] - data [ idx + 1 - period ] for idx in range ( period - 1 , len ( data ) ) ]
momentum = fill_for_noncomputable_vals ( data , momentum )
return momentum
|
def FilterItems ( self , filterFn ) :
"""Filter items in a ReservoirBucket , using a filtering function .
Filtering items from the reservoir bucket must update the
internal state variable self . _ num _ items _ seen , which is used for determining
the rate of replacement in reservoir sampling . Ideally , self . _ num _ items _ seen
would contain the exact number of items that have ever seen by the
ReservoirBucket and satisfy filterFn . However , the ReservoirBucket does not
have access to all items seen - - it only has access to the subset of items
that have survived sampling ( self . items ) . Therefore , we estimate
self . _ num _ items _ seen by scaling it by the same ratio as the ratio of items
not removed from self . items .
Args :
filterFn : A function that returns True for items to be kept .
Returns :
The number of items removed from the bucket ."""
|
with self . _mutex :
size_before = len ( self . items )
self . items = list ( filter ( filterFn , self . items ) )
size_diff = size_before - len ( self . items )
# Estimate a correction the number of items seen
prop_remaining = len ( self . items ) / float ( size_before ) if size_before > 0 else 0
self . _num_items_seen = int ( round ( self . _num_items_seen * prop_remaining ) )
return size_diff
|
def search_schema_path ( self , index , ** options ) :
"""Builds a Yokozuna search Solr schema URL .
: param index : a name of a yz solr schema
: type index : string
: param options : optional list of additional arguments
: type index : dict
: rtype URL string"""
|
if not self . yz_wm_schema :
raise RiakError ( "Yokozuna search is unsupported by this Riak node" )
return mkpath ( self . yz_wm_schema , "schema" , quote_plus ( index ) , ** options )
|
def daemons_check ( self ) :
"""Manage the list of Alignak launched daemons
Check if the daemon process is running
: return : True if all daemons are running , else False"""
|
# First look if it ' s not too early to ping
start = time . time ( )
if self . daemons_last_check and self . daemons_last_check + self . conf . daemons_check_period > start :
logger . debug ( "Too early to check daemons, check period is %.2f seconds" , self . conf . daemons_check_period )
return True
logger . debug ( "Alignak launched daemons check" )
result = True
procs = [ psutil . Process ( ) ]
for daemon in list ( self . my_daemons . values ( ) ) : # Get only the daemon ( not useful for its children processes . . . )
# procs = daemon [ ' process ' ] . children ( )
procs . append ( daemon [ 'process' ] )
for proc in procs :
try :
logger . debug ( "Process %s is %s" , proc . name ( ) , proc . status ( ) )
# logger . debug ( " Process listening : " , proc . name ( ) , proc . status ( ) )
# for connection in proc . connections ( ) :
# l _ addr , l _ port = connection . laddr if connection . laddr else ( ' ' , 0)
# r _ addr , r _ port = connection . raddr if connection . raddr else ( ' ' , 0)
# logger . debug ( " - % s : % s < - > % s : % s , % s " , l _ addr , l _ port , r _ addr , r _ port ,
# connection . status )
# Reset the daemon connection if it got broked . . .
if not daemon [ 'satellite' ] . con :
if self . daemon_connection_init ( daemon [ 'satellite' ] ) : # Set my satellite as alive : )
daemon [ 'satellite' ] . set_alive ( )
except psutil . NoSuchProcess :
pass
except psutil . AccessDenied : # Probably stopping . . .
if not self . will_stop and proc == daemon [ 'process' ] :
logger . warning ( "Daemon %s/%s is not running!" , daemon [ 'satellite' ] . type , daemon [ 'satellite' ] . name )
logger . debug ( "Access denied - Process %s is %s" , proc . name ( ) , proc . status ( ) )
if not self . start_daemon ( daemon [ 'satellite' ] ) : # Set my satellite as dead : (
daemon [ 'satellite' ] . set_dead ( )
result = False
else :
logger . info ( "I restarted %s/%s" , daemon [ 'satellite' ] . type , daemon [ 'satellite' ] . name )
logger . info ( "Pausing %.2f seconds..." , 0.5 )
time . sleep ( 0.5 )
else :
logger . info ( "Child process %s is %s" , proc . name ( ) , proc . status ( ) )
# Set the last check as now
self . daemons_last_check = start
logger . debug ( "Checking daemons duration: %.2f seconds" , time . time ( ) - start )
return result
|
def template_cycles ( self ) -> int :
"""The number of cycles dedicated to template ."""
|
return sum ( ( int ( re . sub ( r'\D' , '' , op ) ) for op in self . template_tokens ) )
|
def _build_optml_volumes ( self , host , subdirs ) :
"""Generate a list of : class : ` ~ sagemaker . local _ session . Volume ` required for the container to start .
It takes a folder with the necessary files for training and creates a list of opt volumes that
the Container needs to start .
Args :
host ( str ) : container for which the volumes will be generated .
subdirs ( list ) : list of subdirectories that will be mapped . For example : [ ' input ' , ' output ' , ' model ' ]
Returns : ( list ) List of : class : ` ~ sagemaker . local _ session . Volume `"""
|
volumes = [ ]
for subdir in subdirs :
host_dir = os . path . join ( self . container_root , host , subdir )
container_dir = '/opt/ml/{}' . format ( subdir )
volume = _Volume ( host_dir , container_dir )
volumes . append ( volume )
return volumes
|
def close ( self ) :
"""Commit and close the connection .
. . seealso : : : py : meth : ` sqlite3 . Connection . close `"""
|
if self . __delayed_connection_path and self . __connection is None :
self . __initialize_connection ( )
return
try :
self . check_connection ( )
except ( SystemError , NullDatabaseConnectionError ) :
return
logger . debug ( "close connection to a SQLite database: path='{}'" . format ( self . database_path ) )
self . commit ( )
self . connection . close ( )
self . __initialize_connection ( )
|
def _send_features ( self , features ) :
"""Send a query to the backend api with a list of observed features in this log file
: param features : Features found in the log file
: return : Response text from ThreshingFloor API"""
|
# Hit the auth endpoint with a list of features
try :
r = requests . post ( self . base_uri + self . api_endpoint , json = features , headers = { 'x-api-key' : self . api_key } )
except requests . exceptions . ConnectionError :
raise TFAPIUnavailable ( "The ThreshingFloor API appears to be unavailable." )
if r . status_code != 200 :
sys . stderr . write ( "%s\n" % r . text )
raise TFAPIUnavailable ( "Request failed and returned a status of: {STATUS_CODE}" . format ( STATUS_CODE = r . status_code ) )
return json . loads ( r . text )
|
def rgb ( red , green , blue , content ) :
"""Colors a content using rgb for h
: param red : [ 0-5]
: type red : int
: param green : [ 0-5]
: type green : int
: param blue : [ 0-5]
: type blue : int
: param content : Whatever you want to say . . .
: type content : unicode
: return : ansi string
: rtype : unicode"""
|
color = 16 + 36 * red + 6 * green + blue
return encode ( '38;5;' + str ( color ) ) + content + encode ( DEFAULT )
|
def load_random ( self , num_bytes , offset = 0 ) :
"""Ask YubiHSM to generate a number of random bytes to any offset of it ' s internal
buffer .
The data is stored internally in the YubiHSM in temporary memory -
this operation would typically be followed by one or more L { generate _ aead }
commands to actually retreive the generated secret ( in encrypted form ) .
@ param num _ bytes : Number of bytes to generate
@ type num _ bytes : integer
@ returns : Number of bytes in YubiHSM internal buffer after load
@ rtype : integer
@ see : L { pyhsm . buffer _ cmd . YHSM _ Cmd _ Buffer _ Random _ Load }"""
|
return pyhsm . buffer_cmd . YHSM_Cmd_Buffer_Random_Load ( self . stick , num_bytes , offset ) . execute ( )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.