signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _g ( self , h , xp , s ) :
"""Density function for blow and hop moves""" | nphi = sum ( self . phi )
return ( nphi / 2.0 ) * log ( 2 * pi ) + nphi * log ( s ) + 0.5 * sum ( ( h - xp ) ** 2 ) / ( s ** 2 ) |
def siblings ( self ) :
"""Returns all the siblings of this element as a list .""" | return list ( filter ( lambda x : id ( x ) != id ( self ) , self . parent . childs ) ) |
def send_messages ( self , access_token , messages , timeout , current ) :
"""Send messages to server , along with user authentication .""" | is_submit = current and self . args . submit and not self . args . revise
is_revision = current and self . args . revise
data = { 'assignment' : self . assignment . endpoint , 'messages' : messages , 'submit' : is_submit }
if is_revision :
address = self . REVISION_ENDPOINT . format ( server = self . assignment . server_url )
else :
address = self . BACKUP_ENDPOINT . format ( server = self . assignment . server_url )
address_params = { 'client_name' : 'ok-client' , 'client_version' : client . __version__ , }
headers = { 'Authorization' : 'Bearer {}' . format ( access_token ) }
log . info ( 'Sending messages to %s' , address )
response = requests . post ( address , headers = headers , params = address_params , json = data , timeout = timeout )
response . raise_for_status ( )
return response . json ( ) |
def _keys_via_value_nonrecur ( d , v ) :
'''# non - recursive
d = { 1 : ' a ' , 2 : ' b ' , 3 : ' a ' }
_ keys _ via _ value _ nonrecur ( d , ' a ' )''' | rslt = [ ]
for key in d :
if ( d [ key ] == v ) :
rslt . append ( key )
return ( rslt ) |
def get_build_container_tag ( self ) :
"""Return the build container tag""" | if self . __prefix :
return "{0}-{1}-{2}" . format ( self . __prefix , self . __branch , self . __version )
else :
return "{0}-{1}" . format ( self . __branch , self . __version ) |
def verify ( x , t , y , pi , errorOnFail = True ) :
"""Verifies a zero - knowledge proof where p \ in G1.
@ errorOnFail : Raise an exception if the proof does not hold .""" | # Unpack the proof
p , c , u = pi
# Verify types
assertType ( x , G1Element )
assertType ( y , GtElement )
assertType ( p , G1Element )
assertScalarType ( c )
assertScalarType ( u )
# TODO : beta can be pre - computed while waiting for a server response .
Q = generatorG1 ( )
beta = pair ( x , hashG2 ( t ) )
# Recompute c '
t1 = Q * u + p * c
t2 = beta ** u * y ** c
t1 . normalize ( )
cPrime = hashZ ( Q , p , beta , y , t1 , t2 )
# Check computed @ c ' against server ' s value @ c
if cPrime == c :
return True
if errorOnFail :
raise Exception ( "zero-knowledge proof failed verification." )
else :
return False |
def multicore ( function , cores , multiargs , ** singleargs ) :
"""wrapper for multicore process execution
Parameters
function
individual function to be applied to each process item
cores : int
the number of subprocesses started / CPUs used ;
this value is reduced in case the number of subprocesses is smaller
multiargs : dict
a dictionary containing sub - function argument names as keys and lists of arguments to be
distributed among the processes as values
singleargs
all remaining arguments which are invariant among the subprocesses
Returns
None or list
the return of the function for all subprocesses
Notes
- all ` multiargs ` value lists must be of same length , i . e . all argument keys must be explicitly defined for each
subprocess
- all function arguments passed via ` singleargs ` must be provided with the full argument name and its value
( i . e . argname = argval ) ; default function args are not accepted
- if the processes return anything else than None , this function will return a list of results
- if all processes return None , this function will be of type void
Examples
> > > def add ( x , y , z ) :
> > > return x + y + z
> > > multicore ( add , cores = 2 , multiargs = { ' x ' : [ 1 , 2 ] } , y = 5 , z = 9)
[15 , 16]
> > > multicore ( add , cores = 2 , multiargs = { ' x ' : [ 1 , 2 ] , ' y ' : [ 5 , 6 ] } , z = 9)
[15 , 17]
See Also
: mod : ` pathos . multiprocessing `""" | tblib . pickling_support . install ( )
# compare the function arguments with the multi and single arguments and raise errors if mismatches occur
if sys . version_info >= ( 3 , 0 ) :
check = inspect . getfullargspec ( function )
varkw = check . varkw
else :
check = inspect . getargspec ( function )
varkw = check . keywords
if not check . varargs and not varkw :
multiargs_check = [ x for x in multiargs if x not in check . args ]
singleargs_check = [ x for x in singleargs if x not in check . args ]
if len ( multiargs_check ) > 0 :
raise AttributeError ( 'incompatible multi arguments: {0}' . format ( ', ' . join ( multiargs_check ) ) )
if len ( singleargs_check ) > 0 :
raise AttributeError ( 'incompatible single arguments: {0}' . format ( ', ' . join ( singleargs_check ) ) )
# compare the list lengths of the multi arguments and raise errors if they are of different length
arglengths = list ( set ( [ len ( multiargs [ x ] ) for x in multiargs ] ) )
if len ( arglengths ) > 1 :
raise AttributeError ( 'multi argument lists of different length' )
# prevent starting more threads than necessary
cores = cores if arglengths [ 0 ] >= cores else arglengths [ 0 ]
# create a list of dictionaries each containing the arguments for individual
# function calls to be passed to the multicore processes
processlist = [ dictmerge ( dict ( [ ( arg , multiargs [ arg ] [ i ] ) for arg in multiargs ] ) , singleargs ) for i in range ( len ( multiargs [ list ( multiargs . keys ( ) ) [ 0 ] ] ) ) ]
if platform . system ( ) == 'Windows' : # in Windows parallel processing needs to strictly be in a " if _ _ name _ _ = = ' _ _ main _ _ ' : " wrapper
# it was thus necessary to outsource this to a different script and try to serialize all input for sharing objects
# https : / / stackoverflow . com / questions / 38236211 / why - multiprocessing - process - behave - differently - on - windows - and - linux - for - global - o
# a helper script to perform the parallel processing
script = os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , 'multicore_helper.py' )
# a temporary file to write the serialized function variables
tmpfile = os . path . join ( tempfile . gettempdir ( ) , 'spatialist_dump' )
# check if everything can be serialized
if not dill . pickles ( [ function , cores , processlist ] ) :
raise RuntimeError ( 'cannot fully serialize function arguments;\n' ' see https://github.com/uqfoundation/dill for supported types' )
# write the serialized variables
with open ( tmpfile , 'wb' ) as tmp :
dill . dump ( [ function , cores , processlist ] , tmp , byref = False )
# run the helper script
proc = sp . Popen ( [ sys . executable , script ] , stdin = sp . PIPE , stderr = sp . PIPE )
out , err = proc . communicate ( )
if proc . returncode != 0 :
raise RuntimeError ( err . decode ( ) )
# retrieve the serialized output of the processing which was written to the temporary file by the helper script
with open ( tmpfile , 'rb' ) as tmp :
result = dill . load ( tmp )
return result
else :
results = None
def wrapper ( ** kwargs ) :
try :
return function ( ** kwargs )
except Exception as e :
return ExceptionWrapper ( e )
# block printing of the executed function
with HiddenPrints ( ) : # start pool of processes and do the work
try :
pool = mp . Pool ( processes = cores )
except NameError :
raise ImportError ( "package 'pathos' could not be imported" )
results = pool . imap ( lambda x : wrapper ( ** x ) , processlist )
pool . close ( )
pool . join ( )
i = 0
out = [ ]
for item in results :
if isinstance ( item , ExceptionWrapper ) :
item . ee = type ( item . ee ) ( str ( item . ee ) + "\n(called function '{}' with args {})" . format ( function . __name__ , processlist [ i ] ) )
raise ( item . re_raise ( ) )
out . append ( item )
i += 1
# evaluate the return of the processing function ;
# if any value is not None then the whole list of results is returned
eval = [ x for x in out if x is not None ]
if len ( eval ) == 0 :
return None
else :
return out |
def hmac_md5 ( s , salt ) :
"""获取一个字符串的 使用 salt 加密的 hmac MD5 值
: param :
* s : ( string ) 需要进行 hash 的字符串
* salt : ( string ) 随机字符串
: return :
* result : ( string ) 32位小写 MD5 值""" | hmac_md5 = hmac . new ( salt . encode ( 'utf-8' ) , s . encode ( 'utf-8' ) , digestmod = hashlib . md5 ) . hexdigest ( )
return hmac_md5 |
def parse ( argv ) :
"""Parse cli args .""" | args = docopt ( __doc__ , argv = argv )
try :
call ( sys . argv [ 2 ] , args )
except KytosException as exception :
print ( "Error parsing args: {}" . format ( exception ) )
exit ( ) |
def items_lower ( self ) :
'''Returns a generator iterating over keys and values , with the keys all
being lowercase .''' | return ( ( key , val [ 1 ] ) for key , val in six . iteritems ( self . _data ) ) |
def start_delta_string ( self ) :
"""A convenient string representation of how long after the run started we started .
: API : public""" | delta = int ( self . start_time ) - int ( self . root ( ) . start_time )
return '{:02}:{:02}' . format ( int ( delta / 60 ) , delta % 60 ) |
def cancel_spot_requests ( self , requests ) :
"""Cancel one or more EC2 spot instance requests .
: param requests : List of EC2 spot instance request IDs .
: type requests : list""" | ec2_requests = self . retry_on_ec2_error ( self . ec2 . get_all_spot_instance_requests , request_ids = requests )
for req in ec2_requests :
req . cancel ( ) |
def Copy_to_Clipboard ( self , event = None ) :
"copy bitmap of canvas to system clipboard" | bmp_obj = wx . BitmapDataObject ( )
bmp_obj . SetBitmap ( self . bitmap )
if not wx . TheClipboard . IsOpened ( ) :
open_success = wx . TheClipboard . Open ( )
if open_success :
wx . TheClipboard . SetData ( bmp_obj )
wx . TheClipboard . Close ( )
wx . TheClipboard . Flush ( ) |
def setValues ( self , values ) :
"""Set the tuples in this set . Valid only for non - indexed sets .
Args :
values : A list of tuples or a : class : ` ~ amplpy . DataFrame ` .
In the case of a : class : ` ~ amplpy . DataFrame ` , the number of indexing
columns of the must be equal to the arity of the set . In the case of
a list of tuples , the arity of each tuple must be equal to the arity
of the set .
For example , considering the following AMPL entities and corresponding
Python objects :
. . code - block : : ampl
set A : = 1 . . 2;
param p { i in A } : = i + 10;
set AA ;
The following is valid :
. . code - block : : python
A , AA = ampl . getSet ( ' A ' ) , ampl . getSet ( ' AA ' )
AA . setValues ( A . getValues ( ) ) # AA has now the members { 1 , 2}""" | if isinstance ( values , ( list , set ) ) :
if any ( isinstance ( value , basestring ) for value in values ) :
values = list ( map ( str , values ) )
self . _impl . setValuesStr ( values , len ( values ) )
elif all ( isinstance ( value , Real ) for value in values ) :
values = list ( map ( float , values ) )
self . _impl . setValuesDbl ( values , len ( values ) )
elif all ( isinstance ( value , tuple ) for value in values ) :
self . _impl . setValues ( Utils . toTupleArray ( values ) , len ( values ) )
else :
raise TypeError
else :
if np is not None and isinstance ( values , np . ndarray ) :
self . setValues ( DataFrame . fromNumpy ( values ) . toList ( ) )
return
Entity . setValues ( self , values ) |
def mouseMoveEvent ( self , event ) :
"""Sets the value for the slider at the event position .
: param event | < QMouseEvent >""" | self . setValue ( self . valueAt ( event . pos ( ) . x ( ) ) ) |
def get_variables_path ( export_dir ) :
"""Returns the path for storing variables checkpoints .""" | return os . path . join ( tf . compat . as_bytes ( export_dir ) , tf . compat . as_bytes ( tf_v1 . saved_model . constants . VARIABLES_DIRECTORY ) , tf . compat . as_bytes ( tf_v1 . saved_model . constants . VARIABLES_FILENAME ) ) |
def _parse_args ( func , variables , annotations = None ) :
"""Return a list of arguments with the variable it reads .
NOTE : Multiple arguments may read the same variable .""" | arg_read_var = [ ]
for arg_name , anno in ( annotations or func . __annotations__ ) . items ( ) :
if arg_name == 'return' :
continue
var , read = _parse_arg ( func , variables , arg_name , anno )
arg = Argument ( name = arg_name , read = read )
arg_read_var . append ( ( arg , var ) )
return arg_read_var |
def speed_of_gait ( self , x , wavelet_type = 'db3' , wavelet_level = 6 ) :
"""This method assess the speed of gait following : cite : ` g - MartinSB11 ` .
It extracts the gait speed from the energies of the approximation coefficients of wavelet functions .
Prefferably you should use the magnitude of x , y and z ( mag _ acc _ sum ) here , as the time series .
: param x : The time series to assess freeze of gait on . This could be x , y , z or mag _ sum _ acc .
: type x : pandas . Series
: param wavelet _ type : The type of wavelet to use . See https : / / pywavelets . readthedocs . io / en / latest / ref / wavelets . html for a full list ( ' db3 ' default ) .
: type wavelet _ type : str
: param wavelet _ level : The number of cycles the used wavelet should have . See https : / / pywavelets . readthedocs . io / en / latest / ref / wavelets . html for a fill list ( 6 default ) .
: type wavelet _ level : int
: return : The speed of gait [ measured in meters / second ] .
: rtype : float""" | coeffs = wavedec ( x . values , wavelet = wavelet_type , level = wavelet_level )
energy = [ sum ( coeffs [ wavelet_level - i ] ** 2 ) / len ( coeffs [ wavelet_level - i ] ) for i in range ( wavelet_level ) ]
WEd1 = energy [ 0 ] / ( 5 * np . sqrt ( 2 ) )
WEd2 = energy [ 1 ] / ( 4 * np . sqrt ( 2 ) )
WEd3 = energy [ 2 ] / ( 3 * np . sqrt ( 2 ) )
WEd4 = energy [ 3 ] / ( 2 * np . sqrt ( 2 ) )
WEd5 = energy [ 4 ] / np . sqrt ( 2 )
WEd6 = energy [ 5 ] / np . sqrt ( 2 )
gait_speed = 0.5 * np . sqrt ( WEd1 + ( WEd2 / 2 ) + ( WEd3 / 3 ) + ( WEd4 / 4 ) + ( WEd5 / 5 ) + ( WEd6 / 6 ) )
return gait_speed |
def _close_and_clean ( self , cleanup ) :
"""Closes the project , and cleanup the disk if cleanup is True
: param cleanup : Whether to delete the project directory""" | tasks = [ ]
for node in self . _nodes :
tasks . append ( asyncio . async ( node . manager . close_node ( node . id ) ) )
if tasks :
done , _ = yield from asyncio . wait ( tasks )
for future in done :
try :
future . result ( )
except ( Exception , GeneratorExit ) as e :
log . error ( "Could not close node {}" . format ( e ) , exc_info = 1 )
if cleanup and os . path . exists ( self . path ) :
self . _deleted = True
try :
yield from wait_run_in_executor ( shutil . rmtree , self . path )
log . info ( "Project {id} with path '{path}' deleted" . format ( path = self . _path , id = self . _id ) )
except OSError as e :
raise aiohttp . web . HTTPInternalServerError ( text = "Could not delete the project directory: {}" . format ( e ) )
else :
log . info ( "Project {id} with path '{path}' closed" . format ( path = self . _path , id = self . _id ) )
if self . _used_tcp_ports :
log . warning ( "Project {} has TCP ports still in use: {}" . format ( self . id , self . _used_tcp_ports ) )
if self . _used_udp_ports :
log . warning ( "Project {} has UDP ports still in use: {}" . format ( self . id , self . _used_udp_ports ) )
# clean the remaining ports that have not been cleaned by their respective node .
port_manager = PortManager . instance ( )
for port in self . _used_tcp_ports . copy ( ) :
port_manager . release_tcp_port ( port , self )
for port in self . _used_udp_ports . copy ( ) :
port_manager . release_udp_port ( port , self ) |
def open ( self , connection = None ) :
"""Open the client . The client can create a new Connection
or an existing Connection can be passed in . This existing Connection
may have an existing CBS authentication Session , which will be
used for this client as well . Otherwise a new Session will be
created .
: param connection : An existing Connection that may be shared between
multiple clients .
: type connetion : ~ uamqp . connection . Connection""" | # pylint : disable = protected - access
if self . _session :
return
# already open .
_logger . debug ( "Opening client connection." )
if connection :
_logger . debug ( "Using existing connection." )
self . _auth = connection . auth
self . _ext_connection = True
self . _connection = connection or self . connection_type ( self . _hostname , self . _auth , container_id = self . _name , max_frame_size = self . _max_frame_size , channel_max = self . _channel_max , idle_timeout = self . _idle_timeout , properties = self . _properties , remote_idle_timeout_empty_frame_send_ratio = self . _remote_idle_timeout_empty_frame_send_ratio , error_policy = self . _error_policy , debug = self . _debug_trace , encoding = self . _encoding )
if not self . _connection . cbs and isinstance ( self . _auth , authentication . CBSAuthMixin ) :
self . _connection . cbs = self . _auth . create_authenticator ( self . _connection , debug = self . _debug_trace , incoming_window = self . _incoming_window , outgoing_window = self . _outgoing_window , handle_max = self . _handle_max , on_attach = self . _on_attach )
self . _session = self . _auth . _session
elif self . _connection . cbs :
self . _session = self . _auth . _session
else :
self . _session = self . session_type ( self . _connection , incoming_window = self . _incoming_window , outgoing_window = self . _outgoing_window , handle_max = self . _handle_max , on_attach = self . _on_attach )
if self . _keep_alive_interval :
self . _keep_alive_thread = threading . Thread ( target = self . _keep_alive )
self . _keep_alive_thread . start ( ) |
def load ( self , df , centerings ) :
"""Call ` load ` method with ` centerings ` filtered to keys in ` self . filter _ ` .""" | return super ( ) . load ( df , { key : value for key , value in centerings . items ( ) if key in self . filter_ } ) |
def items ( self , prefix = None , delimiter = None ) :
"""Get an iterator for the items within this bucket .
Args :
prefix : an optional prefix to match items .
delimiter : an optional string to simulate directory - like semantics . The returned items
will be those whose names do not contain the delimiter after the prefix . For
the remaining items , the names will be returned truncated after the delimiter
with duplicates removed ( i . e . as pseudo - directories ) .
Returns :
An iterable list of items within this bucket .""" | return _item . Items ( self . _name , prefix , delimiter , context = self . _context ) |
def ncp_bcd ( X , rank , random_state = None , init = 'rand' , ** options ) :
"""Fits nonnegative CP Decomposition using the Block Coordinate Descent ( BCD )
Method .
Parameters
X : ( I _ 1 , . . . , I _ N ) array _ like
A real array with nonnegative entries and ` ` X . ndim > = 3 ` ` .
rank : integer
The ` rank ` sets the number of components to be computed .
random _ state : integer , RandomState instance or None , optional ( default ` ` None ` ` )
If integer , random _ state is the seed used by the random number generator ;
If RandomState instance , random _ state is the random number generator ;
If None , the random number generator is the RandomState instance used by np . random .
init : str , or KTensor , optional ( default ` ` ' rand ' ` ` ) .
Specifies initial guess for KTensor factor matrices .
If ` ` ' randn ' ` ` , Gaussian random numbers are used to initialize .
If ` ` ' rand ' ` ` , uniform random numbers are used to initialize .
If KTensor instance , a copy is made to initialize the optimization .
options : dict , specifying fitting options .
tol : float , optional ( default ` ` tol = 1E - 5 ` ` )
Stopping tolerance for reconstruction error .
max _ iter : integer , optional ( default ` ` max _ iter = 500 ` ` )
Maximum number of iterations to perform before exiting .
min _ iter : integer , optional ( default ` ` min _ iter = 1 ` ` )
Minimum number of iterations to perform before exiting .
max _ time : integer , optional ( default ` ` max _ time = np . inf ` ` )
Maximum computational time before exiting .
verbose : bool ` ` { ' True ' , ' False ' } ` ` , optional ( default ` ` verbose = True ` ` )
Display progress .
Returns
result : FitResult instance
Object which holds the fitted results . It provides the factor matrices
in form of a KTensor , ` ` result . factors ` ` .
Notes
This implemenation is using the Block Coordinate Descent Method .
References
Xu , Yangyang , and Wotao Yin . " A block coordinate descent method for
regularized multiconvex optimization with applications to
negative tensor factorization and completion . "
SIAM Journal on imaging sciences 6.3 ( 2013 ) : 1758-1789.
Examples""" | # Check inputs .
optim_utils . _check_cpd_inputs ( X , rank )
# Store norm of X for computing objective function .
N = X . ndim
# Initialize problem .
U , normX = optim_utils . _get_initial_ktensor ( init , X , rank , random_state )
result = FitResult ( U , 'NCP_BCD' , ** options )
# Block coordinate descent
Um = U . copy ( )
# Extrapolations of compoenents
extraw = 1
# Used for extrapolation weight update
weights_U = np . ones ( N )
# Extrapolation weights
L = np . ones ( N )
# Lipschitz constants
obj_bcd = 0.5 * normX ** 2
# Initial objective value
# Main optimization loop .
while result . still_optimizing :
obj_bcd_old = obj_bcd
# Old objective value
U_old = U . copy ( )
extraw_old = extraw
for n in range ( N ) : # Select all components , but U _ n
components = [ U [ j ] for j in range ( N ) if j != n ]
# i ) compute the N - 1 gram matrices
grams = sci . multiply . reduce ( [ arr . T . dot ( arr ) for arr in components ] )
# Update gradient Lipschnitz constant
L0 = L
# Lipschitz constants
L [ n ] = linalg . norm ( grams , 2 )
# ii ) Compute Khatri - Rao product
kr = khatri_rao ( components )
p = unfold ( X , n ) . dot ( kr )
# Compute Gradient .
grad = Um [ n ] . dot ( grams ) - p
# Enforce nonnegativity ( project onto nonnegative orthant ) .
U [ n ] = sci . maximum ( 0.0 , Um [ n ] - grad / L [ n ] )
# Compute objective function and update optimization result .
# grams * = U [ X . ndim - 1 ] . T . dot ( U [ X . ndim - 1 ] )
# obj = np . sqrt ( sci . sum ( grams ) - 2 * sci . sum ( U [ X . ndim - 1 ] * p ) + normX * * 2 ) / normX
obj = linalg . norm ( X - U . full ( ) ) / normX
result . update ( obj )
# Correction and extrapolation .
grams *= U [ N - 1 ] . T . dot ( U [ N - 1 ] )
obj_bcd = 0.5 * ( sci . sum ( grams ) - 2 * sci . sum ( U [ N - 1 ] * p ) + normX ** 2 )
extraw = ( 1 + sci . sqrt ( 1 + 4 * extraw_old ** 2 ) ) / 2.0
if obj_bcd >= obj_bcd_old : # restore previous A to make the objective nonincreasing
Um = sci . copy ( U_old )
else : # apply extrapolation
w = ( extraw_old - 1.0 ) / extraw
# Extrapolation weight
for n in range ( N ) :
weights_U [ n ] = min ( w , 1.0 * sci . sqrt ( L0 [ n ] / L [ n ] ) )
# choose smaller weights for convergence
Um [ n ] = U [ n ] + weights_U [ n ] * ( U [ n ] - U_old [ n ] )
# extrapolation
# Finalize and return the optimization result .
return result . finalize ( ) |
def _parse_deaths ( self , rows ) :
"""Parses the character ' s recent deaths
Parameters
rows : : class : ` list ` of : class : ` bs4 . Tag `
A list of all rows contained in the table .""" | for row in rows :
cols = row . find_all ( 'td' )
death_time_str = cols [ 0 ] . text . replace ( "\xa0" , " " ) . strip ( )
death_time = parse_tibia_datetime ( death_time_str )
death = str ( cols [ 1 ] ) . replace ( "\xa0" , " " )
death_info = death_regexp . search ( death )
if death_info :
level = int ( death_info . group ( "level" ) )
killers_desc = death_info . group ( "killers" )
else :
continue
death = Death ( self . name , level , time = death_time )
assists_name_list = [ ]
# Check if the killers list contains assists
assist_match = death_assisted . search ( killers_desc )
if assist_match : # Filter out assists
killers_desc = assist_match . group ( "killers" )
# Split assists into a list .
assists_name_list = self . _split_list ( assist_match . group ( "assists" ) )
killers_name_list = self . _split_list ( killers_desc )
for killer in killers_name_list :
killer_dict = self . _parse_killer ( killer )
death . killers . append ( Killer ( ** killer_dict ) )
for assist in assists_name_list : # Extract names from character links in assists list .
assist_dict = { "name" : link_content . search ( assist ) . group ( 1 ) , "player" : True }
death . assists . append ( Killer ( ** assist_dict ) )
try :
self . deaths . append ( death )
except ValueError : # Some pvp deaths have no level , so they are raising a ValueError , they will be ignored for now .
continue |
def qteKillMiniApplet ( self ) :
"""Remove the mini applet .
If a different applet is to be restored / focused then call
` ` qteMakeAppletActive ` ` for that applet * after * calling this
method .
| Args |
* * * None * *
| Returns |
* * * None * *
| Raises |
* * * None * *""" | # Sanity check : is the handle valid ?
if self . _qteMiniApplet is None :
return
# Sanity check : is it really a mini applet ?
if not self . qteIsMiniApplet ( self . _qteMiniApplet ) :
msg = ( 'Mini applet does not have its mini applet flag set.' ' Ignored.' )
self . qteLogger . warning ( msg )
if self . _qteMiniApplet not in self . _qteAppletList : # Something is wrong because the mini applet is not part
# of the applet list .
msg = 'Custom mini applet not in applet list --> Bug.'
self . qteLogger . warning ( msg )
else : # Inform the mini applet that it is about to be killed .
try :
self . _qteMiniApplet . qteToBeKilled ( )
except Exception :
msg = 'qteToBeKilledRoutine is faulty'
self . qteLogger . exception ( msg , exc_info = True , stack_info = True )
# Shorthands to calling window .
win = self . _qteMiniApplet . _qteCallingWindow
# We need to move the focus from the mini applet back to a
# regular applet . Therefore , first look for the next
# visible applet in the current window ( ie . the last one
# that was made active ) .
app = self . qteNextApplet ( windowObj = win )
if app is not None : # Found another ( visible or invisible ) applet - - > make
# it active / visible .
self . qteMakeAppletActive ( app )
else : # No visible applet available in this window - - > look
# for an invisible one .
app = self . qteNextApplet ( skipInvisible = False , skipVisible = True )
if app is not None : # Found an invisible applet - - > make it
# active / visible .
self . qteMakeAppletActive ( app )
else : # There is no other visible applet in this window .
# The focus manager will therefore make a new applet
# active .
self . _qteActiveApplet = None
self . _qteAppletList . remove ( self . _qteMiniApplet )
# Close the mini applet applet and schedule it for deletion .
self . _qteMiniApplet . close ( )
self . _qteMiniApplet . deleteLater ( )
# Clear the handle to the mini applet .
self . _qteMiniApplet = None |
def search_prospects ( self , search_type , query , offset = None , orgoffset = None ) :
"""Supports doing a search for prospects by city , reion , or country .
search _ type should be one of ' city ' ' region ' ' country ' .
This method is intended to be called with one of the outputs from the
get _ options _ for _ query method above .""" | params = { 'q' : query }
if offset and orgoffset :
params [ 'orgOffset' ] = orgoffset
params [ 'timeOffset' ] = offset
return self . _call ( 'search/%s' % search_type , params ) |
def list_domains ( container_id = None ) :
'''List domains that CertCentral knows about . You can filter by
container _ id ( also known as " Division " ) by passing a container _ id .
CLI Example :
. . code - block : : bash
salt - run digicert . list _ domains''' | if container_id :
url = '{0}/domain?{1}' . format ( _base_url ( ) , container_id )
else :
url = '{0}/domain' . format ( _base_url ( ) )
orgs = _paginate ( url , "domains" , method = 'GET' , decode = True , decode_type = 'json' , header_dict = { 'X-DC-DEVKEY' : _api_key ( ) , 'Content-Type' : 'application/json' , } )
ret = { 'domains' : orgs }
return ret |
def get_queue ( self ) :
"""获取新闻标题候选队列
Return :
queue - - 新闻标题候选队列 , list类型""" | queue = [ ]
for i in range ( 0 , self . index ) :
unit = self . unit_raw [ i ]
c = CDM ( unit )
# 过滤
if c . get_alpha ( ) > 0 and c . PTN in range ( self . title_min , self . title_max ) :
queue . append ( unit )
if queue == [ ] :
pass
else :
log ( 'debug' , '\n获取标题候选队列成功:【{}】\n' . format ( queue ) )
return queue |
def _sendTo ( self , proto ) :
"""When sent , call the C { startProtocol } method on the virtual transport
object .
@ see : L { vertex . ptcp . PTCP . startProtocol }
@ see : L { vertex . q2q . VirtualTransport . startProtocol }
@ param proto : the AMP protocol that this is being sent on .""" | # XXX This is overriding a private interface
super ( ConnectionStartBox , self ) . _sendTo ( proto )
self . virtualTransport . startProtocol ( ) |
def array_keys ( self ) :
"""Return an iterator over member names for arrays only .
Examples
> > > import zarr
> > > g1 = zarr . group ( )
> > > g2 = g1 . create _ group ( ' foo ' )
> > > g3 = g1 . create _ group ( ' bar ' )
> > > d1 = g1 . create _ dataset ( ' baz ' , shape = 100 , chunks = 10)
> > > d2 = g1 . create _ dataset ( ' quux ' , shape = 200 , chunks = 20)
> > > sorted ( g1 . array _ keys ( ) )
[ ' baz ' , ' quux ' ]""" | for key in sorted ( listdir ( self . _store , self . _path ) ) :
path = self . _key_prefix + key
if contains_array ( self . _store , path ) :
yield key |
def monitor_key_get ( service , key ) :
"""Gets the value of an existing key in the monitor cluster .
: param service : six . string _ types . The Ceph user name to run the command under
: param key : six . string _ types . The key to search for .
: return : Returns the value of that key or None if not found .""" | try :
output = check_output ( [ 'ceph' , '--id' , service , 'config-key' , 'get' , str ( key ) ] ) . decode ( 'UTF-8' )
return output
except CalledProcessError as e :
log ( "Monitor config-key get failed with message: {}" . format ( e . output ) )
return None |
def V_horiz_guppy ( D , L , a , h , headonly = False ) :
r'''Calculates volume of a tank with guppy heads , according to [ 1 ] _ .
. . math : :
V _ f = A _ fL + \ frac { 2aR ^ 2 } { 3 } \ cos ^ { - 1 } \ left ( 1 - \ frac { h } { R } \ right )
+ \ frac { 2a } { 9R } \ sqrt { 2Rh - h ^ 2 } ( 2h - 3R ) ( h + R )
. . math : :
Af = R ^ 2 \ cos ^ { - 1 } \ frac { R - h } { R } - ( R - h ) \ sqrt { 2Rh - h ^ 2}
Parameters
D : float
Diameter of the main cylindrical section , [ m ]
L : float
Length of the main cylindrical section , [ m ]
a : float
Distance the guppy head extends on one side , [ m ]
h : float
Height , as measured up to where the fluid ends , [ m ]
headonly : bool , optional
Function returns only the volume of a single head side if True
Returns
V : float
Volume [ m ^ 3]
Examples
Matching example from [ 1 ] _ , with inputs in inches and volume in gallons .
> > > V _ horiz _ guppy ( D = 108 . , L = 156 . , a = 42 . , h = 36 ) / 231.
1931.7208029476762
References
. . [ 1 ] Jones , D . " Calculating Tank Volume . " Text . Accessed December 22 , 2015.
http : / / www . webcalc . com . br / blog / Tank _ Volume . PDF''' | R = 0.5 * D
Af = R * R * acos ( ( R - h ) / R ) - ( R - h ) * ( 2. * R * h - h * h ) ** 0.5
Vf = 2. * a * R * R / 3. * acos ( 1. - h / R ) + 2. * a / 9. / R * ( 2 * R * h - h ** 2 ) ** 0.5 * ( 2 * h - 3 * R ) * ( h + R )
if headonly :
Vf = Vf / 2.
else :
Vf += Af * L
return Vf |
def get_field_names ( self , declared_fields , info ) :
"""We override the parent to omit explicity defined meta fields ( such
as SerializerMethodFields ) from the list of declared fields""" | meta_fields = getattr ( self . Meta , 'meta_fields' , [ ] )
declared = OrderedDict ( )
for field_name in set ( declared_fields . keys ( ) ) :
field = declared_fields [ field_name ]
if field_name not in meta_fields :
declared [ field_name ] = field
fields = super ( ModelSerializer , self ) . get_field_names ( declared , info )
return list ( fields ) + list ( getattr ( self . Meta , 'meta_fields' , list ( ) ) ) |
def init_write_line ( self ) :
"""init _ write _ line ( ) initializes fields relevant to output generation""" | format_list = self . _format_list
output_info = self . gen_output_fmt ( format_list )
self . _output_fmt = "" . join ( [ sub [ 0 ] for sub in output_info ] )
self . _out_gen_fmt = [ sub [ 1 ] for sub in output_info if sub [ 1 ] is not None ]
self . _out_widths = [ sub [ 2 ] for sub in output_info if sub [ 2 ] is not None ]
self . _write_line_init = True |
def connectChunk ( key , chunk ) :
"""Parse Card Chunk Method""" | upLinks = [ ]
schunk = chunk [ 0 ] . strip ( ) . split ( )
for idx in range ( 4 , len ( schunk ) ) :
upLinks . append ( schunk [ idx ] )
result = { 'link' : schunk [ 1 ] , 'downLink' : schunk [ 2 ] , 'numUpLinks' : schunk [ 3 ] , 'upLinks' : upLinks }
return result |
def convert ( schema ) :
"""Convert a voluptuous schema to a dictionary .""" | # pylint : disable = too - many - return - statements , too - many - branches
if isinstance ( schema , vol . Schema ) :
schema = schema . schema
if isinstance ( schema , Mapping ) :
val = [ ]
for key , value in schema . items ( ) :
description = None
if isinstance ( key , vol . Marker ) :
pkey = key . schema
description = key . description
else :
pkey = key
pval = convert ( value )
pval [ 'name' ] = pkey
if description is not None :
pval [ 'description' ] = description
if isinstance ( key , ( vol . Required , vol . Optional ) ) :
pval [ key . __class__ . __name__ . lower ( ) ] = True
if key . default is not vol . UNDEFINED :
pval [ 'default' ] = key . default ( )
val . append ( pval )
return val
if isinstance ( schema , vol . All ) :
val = { }
for validator in schema . validators :
val . update ( convert ( validator ) )
return val
if isinstance ( schema , ( vol . Clamp , vol . Range ) ) :
val = { }
if schema . min is not None :
val [ 'valueMin' ] = schema . min
if schema . max is not None :
val [ 'valueMax' ] = schema . max
return val
if isinstance ( schema , vol . Length ) :
val = { }
if schema . min is not None :
val [ 'lengthMin' ] = schema . min
if schema . max is not None :
val [ 'lengthMax' ] = schema . max
return val
if isinstance ( schema , vol . Datetime ) :
return { 'type' : 'datetime' , 'format' : schema . format , }
if isinstance ( schema , vol . In ) :
if isinstance ( schema . container , Mapping ) :
return { 'type' : 'select' , 'options' : list ( schema . container . items ( ) ) , }
return { 'type' : 'select' , 'options' : [ ( item , item ) for item in schema . container ] }
if schema in ( vol . Lower , vol . Upper , vol . Capitalize , vol . Title , vol . Strip ) :
return { schema . __name__ . lower ( ) : True , }
if isinstance ( schema , vol . Coerce ) :
schema = schema . type
if schema in TYPES_MAP :
return { 'type' : TYPES_MAP [ schema ] }
raise ValueError ( 'Unable to convert schema: {}' . format ( schema ) ) |
def setup_user_manager ( app ) :
"""Setup flask - user manager .""" | from flask_user import SQLAlchemyAdapter
from rio . models import User
init = dict ( db_adapter = SQLAlchemyAdapter ( db , User ) , )
user_manager . init_app ( app , ** init ) |
def load_ply ( file_obj , resolver = None , fix_texture = True , * args , ** kwargs ) :
"""Load a PLY file from an open file object .
Parameters
file _ obj : an open file - like object
Source data , ASCII or binary PLY
resolver : trimesh . visual . resolvers . Resolver
Object which can resolve assets
fix _ texture : bool
If True , will re - index vertices and faces
so vertices with different UV coordinates
are disconnected .
Returns
mesh _ kwargs : dict
Data which can be passed to
Trimesh constructor , eg : a = Trimesh ( * * mesh _ kwargs )""" | # OrderedDict which is populated from the header
elements , is_ascii , image_name = parse_header ( file_obj )
# functions will fill in elements from file _ obj
if is_ascii :
ply_ascii ( elements , file_obj )
else :
ply_binary ( elements , file_obj )
# try to load the referenced image
image = None
if image_name is not None :
try :
data = resolver . get ( image_name )
image = PIL . Image . open ( util . wrap_as_stream ( data ) )
except BaseException :
log . warning ( 'unable to load image!' , exc_info = True )
kwargs = elements_to_kwargs ( elements , fix_texture = fix_texture , image = image )
return kwargs |
def message ( self , text ) :
"""Public message .""" | self . client . publish ( self . keys . external , '{}: {}' . format ( self . resource , text ) ) |
def _parse_from_table ( html_chunk , what ) :
"""Go thru table data in ` html _ chunk ` and try to locate content of the
neighbor cell of the cell containing ` what ` .
Returns :
str : Table data or None .""" | ean_tag = html_chunk . find ( "tr" , fn = must_contain ( "th" , what , "td" ) )
if not ean_tag :
return None
return get_first_content ( ean_tag [ 0 ] . find ( "td" ) ) |
def get_sonos_favorites ( self , start = 0 , max_items = 100 ) :
"""Get Sonos favorites .
See : meth : ` get _ favorite _ radio _ shows ` for return type and remarks .""" | message = 'The output type of this method will probably change in ' 'the future to use SoCo data structures'
warnings . warn ( message , stacklevel = 2 )
return self . __get_favorites ( SONOS_FAVORITES , start , max_items ) |
def derive_field_name ( self , field_name ) :
"""Derives a new event from this one setting the ` ` field _ name ` ` attribute .
Args :
field _ name ( Union [ amazon . ion . symbols . SymbolToken , unicode ] ) : The field name to set .
Returns :
IonEvent : The newly generated event .""" | cls = type ( self )
# We use ordinals to avoid thunk materialization .
return cls ( self [ 0 ] , self [ 1 ] , self [ 2 ] , field_name , self [ 4 ] , self [ 5 ] ) |
def send_heartbeats ( heartbeats , args , configs , use_ntlm_proxy = False ) :
"""Send heartbeats to WakaTime API .
Returns ` SUCCESS ` when heartbeat was sent , otherwise returns an error code .""" | if len ( heartbeats ) == 0 :
return SUCCESS
api_url = args . api_url
if not api_url :
api_url = 'https://api.wakatime.com/api/v1/users/current/heartbeats.bulk'
log . debug ( 'Sending heartbeats to api at %s' % api_url )
timeout = args . timeout
if not timeout :
timeout = 60
data = [ h . sanitize ( ) . dict ( ) for h in heartbeats ]
log . debug ( data )
# setup api request
request_body = json . dumps ( data )
api_key = u ( base64 . b64encode ( str . encode ( args . key ) if is_py3 else args . key ) )
auth = u ( 'Basic {api_key}' ) . format ( api_key = api_key )
headers = { 'User-Agent' : get_user_agent ( args . plugin ) , 'Content-Type' : 'application/json' , 'Accept' : 'application/json' , 'Authorization' : auth , }
hostname = get_hostname ( args )
if hostname :
headers [ 'X-Machine-Name' ] = u ( hostname ) . encode ( 'utf-8' )
# add Olson timezone to request
try :
tz = tzlocal . get_localzone ( )
except :
tz = None
if tz :
headers [ 'TimeZone' ] = u ( tz . zone ) . encode ( 'utf-8' )
session_cache = SessionCache ( )
session = session_cache . get ( )
should_try_ntlm = False
proxies = { }
if args . proxy :
if use_ntlm_proxy :
from . packages . requests_ntlm import HttpNtlmAuth
username = args . proxy . rsplit ( ':' , 1 )
password = ''
if len ( username ) == 2 :
password = username [ 1 ]
username = username [ 0 ]
session . auth = HttpNtlmAuth ( username , password , session )
else :
should_try_ntlm = '\\' in args . proxy
proxies [ 'https' ] = args . proxy
ssl_verify = not args . nosslverify
if args . ssl_certs_file and ssl_verify :
ssl_verify = args . ssl_certs_file
# send request to api
response , code = None , None
try :
response = session . post ( api_url , data = request_body , headers = headers , proxies = proxies , timeout = timeout , verify = ssl_verify )
except RequestException :
if should_try_ntlm :
return send_heartbeats ( heartbeats , args , configs , use_ntlm_proxy = True )
else :
exception_data = { sys . exc_info ( ) [ 0 ] . __name__ : u ( sys . exc_info ( ) [ 1 ] ) , }
if log . isEnabledFor ( logging . DEBUG ) :
exception_data [ 'traceback' ] = traceback . format_exc ( )
if args . offline :
queue = Queue ( args , configs )
queue . push_many ( heartbeats )
if log . isEnabledFor ( logging . DEBUG ) :
log . warn ( exception_data )
else :
log . error ( exception_data )
except : # delete cached session when requests raises unknown exception
if should_try_ntlm :
return send_heartbeats ( heartbeats , args , configs , use_ntlm_proxy = True )
else :
exception_data = { sys . exc_info ( ) [ 0 ] . __name__ : u ( sys . exc_info ( ) [ 1 ] ) , 'traceback' : traceback . format_exc ( ) , }
if args . offline :
queue = Queue ( args , configs )
queue . push_many ( heartbeats )
log . warn ( exception_data )
else :
code = response . status_code if response is not None else None
content = response . text if response is not None else None
if _success ( code ) :
results = _get_results ( response )
_process_server_results ( heartbeats , code , content , results , args , configs )
session_cache . save ( session )
return SUCCESS
else :
log . debug ( { 'response_code' : code , 'response_text' : content , } )
if should_try_ntlm :
return send_heartbeats ( heartbeats , args , configs , use_ntlm_proxy = True )
_handle_unsent_heartbeats ( heartbeats , code , content , args , configs )
session_cache . delete ( )
return AUTH_ERROR if code == 401 else API_ERROR |
def IFFT_filter ( Signal , SampleFreq , lowerFreq , upperFreq , PyCUDA = False ) :
"""Filters data using fft - > zeroing out fft bins - > ifft
Parameters
Signal : ndarray
Signal to be filtered
SampleFreq : float
Sample frequency of signal
lowerFreq : float
Lower frequency of bandpass to allow through filter
upperFreq : float
Upper frequency of bandpass to allow through filter
PyCUDA : bool , optional
If True , uses PyCUDA to accelerate the FFT and IFFT
via using your NVIDIA - GPU
If False , performs FFT and IFFT with conventional
scipy . fftpack
Returns
FilteredData : ndarray
Array containing the filtered data""" | if PyCUDA == True :
Signalfft = calc_fft_with_PyCUDA ( Signal )
else :
print ( "starting fft" )
Signalfft = scipy . fftpack . fft ( Signal )
print ( "starting freq calc" )
freqs = _np . fft . fftfreq ( len ( Signal ) ) * SampleFreq
print ( "starting bin zeroing" )
Signalfft [ _np . where ( freqs < lowerFreq ) ] = 0
Signalfft [ _np . where ( freqs > upperFreq ) ] = 0
if PyCUDA == True :
FilteredSignal = 2 * calc_ifft_with_PyCUDA ( Signalfft )
else :
print ( "starting ifft" )
FilteredSignal = 2 * scipy . fftpack . ifft ( Signalfft )
print ( "done" )
return _np . real ( FilteredSignal ) |
def check ( state_engine , nameop , block_id , checked_ops ) :
"""Revoke a name - - make it available for registration .
* it must be well - formed
* its namespace must be ready .
* the name must be registered
* it must be sent by the name owner
NAME _ REVOKE isn ' t allowed during an import , so the name ' s namespace must be ready .
Return True if accepted
Return False if not""" | name = nameop [ 'name' ]
sender = nameop [ 'sender' ]
namespace_id = get_namespace_from_name ( name )
# name must be well - formed
if not is_b40 ( name ) or "+" in name or name . count ( "." ) > 1 :
log . warning ( "Malformed name '%s': non-base-38 characters" % name )
return False
# name must exist
name_rec = state_engine . get_name ( name )
if name_rec is None :
log . warning ( "Name '%s' does not exist" % name )
return False
# namespace must be ready
if not state_engine . is_namespace_ready ( namespace_id ) :
log . warning ( "Namespace '%s' is not ready" % namespace_id )
return False
# name must not be revoked
if state_engine . is_name_revoked ( name ) :
log . warning ( "Name '%s' is revoked" % name )
return False
# name must not be expired as of * this * block
if state_engine . is_name_expired ( name , block_id ) :
log . warning ( "Name '%s' is expired" % name )
return False
# name must not be in grace period in this block
if state_engine . is_name_in_grace_period ( name , block_id ) :
log . warning ( "Name '{}' is in the renewal grace period. It can only be renewed at this time." . format ( name ) )
return False
# the name must be registered
if not state_engine . is_name_registered ( name ) :
log . warning ( "Name '%s' is not registered" % name )
return False
# the sender must own this name
if not state_engine . is_name_owner ( name , sender ) :
log . warning ( "Name '%s' is not owned by %s" % ( name , sender ) )
return False
# apply state transition
nameop [ 'revoked' ] = True
nameop [ 'value_hash' ] = None
return True |
def requiv_contact_max ( b , component , solve_for = None , ** kwargs ) :
"""Create a constraint to determine the critical ( at L2/3 ) value of
requiv at which a constact will overflow . This will only be used
for contacts for requiv _ max
: parameter b : the : class : ` phoebe . frontend . bundle . Bundle `
: parameter str component : the label of the star in which this
constraint should be built
: parameter str solve _ for : if ' requiv _ max ' should not be the derived / constrained
parameter , provide which other parameter should be derived
: returns : lhs ( Parameter ) , rhs ( ConstraintParameter ) , args ( list of arguments
that were passed to this function )""" | hier = b . get_hierarchy ( )
if not len ( hier . get_value ( ) ) : # TODO : change to custom error type to catch in bundle . add _ component
# TODO : check whether the problem is 0 hierarchies or more than 1
raise NotImplementedError ( "constraint for requiv_contact_max requires hierarchy" )
component_ps = _get_system_ps ( b , component )
parentorbit = hier . get_parent_of ( component )
parentorbit_ps = _get_system_ps ( b , parentorbit )
requiv_max = component_ps . get_parameter ( qualifier = 'requiv_max' )
q = parentorbit_ps . get_parameter ( qualifier = 'q' )
sma = parentorbit_ps . get_parameter ( qualifier = 'sma' )
if solve_for in [ None , requiv_max ] :
lhs = requiv_max
rhs = roche_requiv_contact_L23 ( q , sma , hier . get_primary_or_secondary ( component , return_ind = True ) )
else :
raise NotImplementedError ( "requiv_contact_max can only be solved for requiv_max" )
return lhs , rhs , { 'component' : component } |
def get_filters_params ( self , params = None ) :
"""Returns all params except IGNORED _ PARAMS""" | if not params :
params = self . params
lookup_params = params . copy ( )
# a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored .
for ignored in IGNORED_PARAMS :
if ignored in lookup_params :
del lookup_params [ ignored ]
return lookup_params |
def _save_cache ( self , filename , section_number_of_pages , page_references ) :
"""Save the current state of the page references to ` < filename > . rtc `""" | cache_path = Path ( filename ) . with_suffix ( self . CACHE_EXTENSION )
with cache_path . open ( 'wb' ) as file :
cache = ( section_number_of_pages , page_references )
pickle . dump ( cache , file ) |
def snyder_ac ( self , structure ) :
"""Calculates Snyder ' s acoustic sound velocity ( in SI units )
Args :
structure : pymatgen structure object
Returns : Snyder ' s acoustic sound velocity ( in SI units )""" | nsites = structure . num_sites
volume = structure . volume
natoms = structure . composition . num_atoms
num_density = 1e30 * nsites / volume
tot_mass = sum ( [ e . atomic_mass for e in structure . species ] )
avg_mass = 1.6605e-27 * tot_mass / natoms
return 0.38483 * avg_mass * ( ( self . long_v ( structure ) + 2. * self . trans_v ( structure ) ) / 3. ) ** 3. / ( 300. * num_density ** ( - 2. / 3. ) * nsites ** ( 1. / 3. ) ) |
def upload ( self ) :
"""上传操作""" | self . blockStatus = [ ]
if config . get_default ( 'default_zone' ) . up_host :
host = config . get_default ( 'default_zone' ) . up_host
else :
host = config . get_default ( 'default_zone' ) . get_up_host_by_token ( self . up_token )
offset = self . recovery_from_record ( )
for block in _file_iter ( self . input_stream , config . _BLOCK_SIZE , offset ) :
length = len ( block )
crc = crc32 ( block )
ret , info = self . make_block ( block , length , host )
if ret is None and not info . need_retry ( ) :
return ret , info
if info . connect_failed ( ) :
if config . get_default ( 'default_zone' ) . up_host_backup :
host = config . get_default ( 'default_zone' ) . up_host_backup
else :
host = config . get_default ( 'default_zone' ) . get_up_host_backup_by_token ( self . up_token )
if info . need_retry ( ) or crc != ret [ 'crc32' ] :
ret , info = self . make_block ( block , length , host )
if ret is None or crc != ret [ 'crc32' ] :
return ret , info
self . blockStatus . append ( ret )
offset += length
self . record_upload_progress ( offset )
if ( callable ( self . progress_handler ) ) :
self . progress_handler ( ( ( len ( self . blockStatus ) - 1 ) * config . _BLOCK_SIZE ) + length , self . size )
return self . make_file ( host ) |
def labels ( self ) :
"""Returns the list of labels that will be used to represent
this axis ' information .
: return [ < str > , . . ]""" | if self . _labels is None :
self . _labels = map ( self . labelFormat ( ) . format , self . values ( ) )
return self . _labels |
def split_and_operate ( self , mask , f , inplace ) :
"""split the block per - column , and apply the callable f
per - column , return a new block for each . Handle
masking which will not change a block unless needed .
Parameters
mask : 2 - d boolean mask
f : callable accepting ( 1d - mask , 1d values , indexer )
inplace : boolean
Returns
list of blocks""" | if mask is None :
mask = np . ones ( self . shape , dtype = bool )
new_values = self . values
def make_a_block ( nv , ref_loc ) :
if isinstance ( nv , Block ) :
block = nv
elif isinstance ( nv , list ) :
block = nv [ 0 ]
else : # Put back the dimension that was taken from it and make
# a block out of the result .
try :
nv = _block_shape ( nv , ndim = self . ndim )
except ( AttributeError , NotImplementedError ) :
pass
block = self . make_block ( values = nv , placement = ref_loc )
return block
# ndim = = 1
if self . ndim == 1 :
if mask . any ( ) :
nv = f ( mask , new_values , None )
else :
nv = new_values if inplace else new_values . copy ( )
block = make_a_block ( nv , self . mgr_locs )
return [ block ]
# ndim > 1
new_blocks = [ ]
for i , ref_loc in enumerate ( self . mgr_locs ) :
m = mask [ i ]
v = new_values [ i ]
# need a new block
if m . any ( ) :
nv = f ( m , v , i )
else :
nv = v if inplace else v . copy ( )
block = make_a_block ( nv , [ ref_loc ] )
new_blocks . append ( block )
return new_blocks |
def current_timestamp ( self ) -> datetime :
"""Get the current state timestamp .""" | timestamp = DB . get_hash_value ( self . _key , 'current_timestamp' )
return datetime_from_isoformat ( timestamp ) |
def create_folder ( self , uri , recursive = False ) :
"""Create folder .
uri - - MediaFire URI
Keyword arguments :
recursive - - set to True to create intermediate folders .""" | logger . info ( "Creating %s" , uri )
# check that folder exists already
try :
resource = self . get_resource_by_uri ( uri )
if isinstance ( resource , Folder ) :
return resource
else :
raise NotAFolderError ( uri )
except ResourceNotFoundError :
pass
location = self . _parse_uri ( uri )
folder_name = posixpath . basename ( location )
parent_uri = 'mf://' + posixpath . dirname ( location )
try :
parent_node = self . get_resource_by_uri ( parent_uri )
if not isinstance ( parent_node , Folder ) :
raise NotAFolderError ( parent_uri )
parent_key = parent_node [ 'folderkey' ]
except ResourceNotFoundError :
if recursive :
result = self . create_folder ( parent_uri , recursive = True )
parent_key = result [ 'folderkey' ]
else :
raise
# We specify exact location , so don ' t allow duplicates
result = self . api . folder_create ( folder_name , parent_key = parent_key , action_on_duplicate = 'skip' )
logger . info ( "Created folder '%s' [mf:%s]" , result [ 'name' ] , result [ 'folder_key' ] )
return self . get_resource_by_key ( result [ 'folder_key' ] ) |
def gbest_idx ( swarm ) :
"""gbest Neighbourhood topology function .
Args :
swarm : list : The list of particles .
Returns :
int : The index of the gbest particle .""" | best = 0
cmp = comparator ( swarm [ best ] . best_fitness )
for ( idx , particle ) in enumerate ( swarm ) :
if cmp ( particle . best_fitness , swarm [ best ] . best_fitness ) :
best = idx
return best |
def get_internal_ip ( ) :
"""Get the local IP addresses .""" | nics = { }
for interface_name in interfaces ( ) :
addresses = ifaddresses ( interface_name )
try :
nics [ interface_name ] = { 'ipv4' : addresses [ AF_INET ] , 'link_layer' : addresses [ AF_LINK ] , 'ipv6' : addresses [ AF_INET6 ] , }
except KeyError :
pass
return nics |
def load_reader ( reader_configs , ** reader_kwargs ) :
"""Import and setup the reader from * reader _ info * .""" | reader_info = read_reader_config ( reader_configs )
reader_instance = reader_info [ 'reader' ] ( config_files = reader_configs , ** reader_kwargs )
return reader_instance |
def wait_for_close ( raiden : 'RaidenService' , payment_network_id : PaymentNetworkID , token_address : TokenAddress , channel_ids : List [ ChannelID ] , retry_timeout : float , ) -> None :
"""Wait until all channels are closed .
Note :
This does not time out , use gevent . Timeout .""" | return wait_for_channel_in_states ( raiden = raiden , payment_network_id = payment_network_id , token_address = token_address , channel_ids = channel_ids , retry_timeout = retry_timeout , target_states = CHANNEL_AFTER_CLOSE_STATES , ) |
def id_ ( reset = False ) :
'''. . versionadded : : 2016.3.0
Return monit unique id .
reset : False
Reset current id and generate a new id when it ' s True .
CLI Example :
. . code - block : : bash
salt ' * ' monit . id [ reset = True ]''' | if reset :
id_pattern = re . compile ( r'Monit id (?P<id>[^ ]+)' )
cmd = 'echo y|monit -r'
out = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = True )
ret = id_pattern . search ( out [ 'stdout' ] ) . group ( 'id' )
return ret if ret else False
else :
cmd = 'monit -i'
out = __salt__ [ 'cmd.run' ] ( cmd )
ret = out . split ( ':' ) [ - 1 ] . strip ( )
return ret |
def save_button_clicked ( self , classification ) :
"""Action for save button clicked .
: param classification : The classification that being edited .
: type classification : dict""" | # Save current edit
if self . layer_mode == layer_mode_continuous :
thresholds = self . get_threshold ( )
classification_class = { 'classes' : thresholds , 'active' : True }
if self . thresholds . get ( self . active_exposure [ 'key' ] ) : # Set other class to not active
for current_classification in list ( self . thresholds . get ( self . active_exposure [ 'key' ] ) . values ( ) ) :
current_classification [ 'active' ] = False
else :
self . thresholds [ self . active_exposure [ 'key' ] ] = { }
self . thresholds [ self . active_exposure [ 'key' ] ] [ classification [ 'key' ] ] = classification_class
else :
value_maps = self . get_value_map ( )
classification_class = { 'classes' : value_maps , 'active' : True }
if self . value_maps . get ( self . active_exposure [ 'key' ] ) : # Set other class to not active
for current_classification in list ( self . value_maps . get ( self . active_exposure [ 'key' ] ) . values ( ) ) :
current_classification [ 'active' ] = False
else :
self . value_maps [ self . active_exposure [ 'key' ] ] = { }
self . value_maps [ self . active_exposure [ 'key' ] ] [ classification [ 'key' ] ] = classification_class
# Back to choose mode
self . cancel_button_clicked ( ) |
def saturation ( self , value ) :
"""Volume of water to volume of voids""" | value = clean_float ( value )
if value is None :
return
try :
unit_moisture_weight = self . unit_moist_weight - self . unit_dry_weight
unit_moisture_volume = unit_moisture_weight / self . _pw
saturation = unit_moisture_volume / self . _calc_unit_void_volume ( )
if saturation is not None and not ct . isclose ( saturation , value , rel_tol = self . _tolerance ) :
raise ModelError ( "New saturation (%.3f) is inconsistent " "with calculated value (%.3f)" % ( value , saturation ) )
except TypeError :
pass
old_value = self . saturation
self . _saturation = value
try :
self . recompute_all_weights_and_void ( )
self . _add_to_stack ( "saturation" , value )
except ModelError as e :
self . _saturation = old_value
raise ModelError ( e ) |
def snapshot_create ( repository , snapshot , body = None , hosts = None , profile = None ) :
'''. . versionadded : : 2017.7.0
Create snapshot in specified repository by supplied definition .
repository
Repository name
snapshot
Snapshot name
body
Snapshot definition as in https : / / www . elastic . co / guide / en / elasticsearch / reference / current / modules - snapshots . html
CLI example : :
salt myminion elasticsearch . snapshot _ create testrepo testsnapshot ' { " indices " : " index _ 1 , index _ 2 " , " ignore _ unavailable " : true , " include _ global _ state " : false } ' ''' | es = _get_instance ( hosts , profile )
try :
response = es . snapshot . create ( repository = repository , snapshot = snapshot , body = body )
return response . get ( 'accepted' , False )
except elasticsearch . TransportError as e :
raise CommandExecutionError ( "Cannot create snapshot {0} in repository {1}, server returned code {2} with message {3}" . format ( snapshot , repository , e . status_code , e . error ) ) |
def _remote ( self , name ) :
"""Return a remote for which ' name ' matches the short _ name or url""" | from ambry . orm import Remote
from sqlalchemy import or_
from ambry . orm . exc import NotFoundError
from sqlalchemy . orm . exc import NoResultFound , MultipleResultsFound
if not name . strip ( ) :
raise NotFoundError ( "Empty remote name" )
try :
try :
r = self . database . session . query ( Remote ) . filter ( Remote . short_name == name ) . one ( )
except NoResultFound as e :
r = None
if not r :
r = self . database . session . query ( Remote ) . filter ( Remote . url == name ) . one ( )
except NoResultFound as e :
raise NotFoundError ( str ( e ) + '; ' + name )
except MultipleResultsFound as e :
self . logger . error ( "Got multiple results for search for remote '{}': {}" . format ( name , e ) )
return None
return r |
def validate_source ( ident , comment = None ) :
'''Validate a source for automatic harvesting''' | source = get_source ( ident )
source . validation . on = datetime . now ( )
source . validation . comment = comment
source . validation . state = VALIDATION_ACCEPTED
if current_user . is_authenticated :
source . validation . by = current_user . _get_current_object ( )
source . save ( )
schedule ( ident , cron = current_app . config [ 'HARVEST_DEFAULT_SCHEDULE' ] )
launch ( ident )
return source |
def inline_query ( self ) :
"""The query ` ` str ` ` for : tl : ` KeyboardButtonSwitchInline ` objects .""" | if isinstance ( self . button , types . KeyboardButtonSwitchInline ) :
return self . button . query |
def check_running ( self , role , number ) :
"""Check that a certain number of instances in a role are running .""" | instances = self . get_instances_in_role ( role , "running" )
if len ( instances ) != number :
print "Expected %s instances in role %s, but was %s %s" % ( number , role , len ( instances ) , instances )
return False
else :
return instances |
def scan_volumes ( cryst , lo = 0.98 , hi = 1.02 , n = 5 , scale_volumes = True ) :
'''Provide set of crystals along volume axis from lo to hi ( inclusive ) .
No volume cell optimization is performed . Bounds are specified as
fractions ( 1.10 = 10 % increase ) . If scale _ volumes = = False the scalling
is applied to lattice vectors instead of volumes .
: param lo : lower bound of the V / V _ 0 in the scan
: param hi : upper bound of the V / V _ 0 in the scan
: param n : number of volume sample points
: param scale _ volumes : If True scale the unit cell volume or ,
if False , scale the length of lattice axes .
: returns : a list of deformed systems''' | scale = linspace ( lo , hi , num = n )
if scale_volumes :
scale **= ( 1.0 / 3.0 )
uc = cryst . get_cell ( )
systems = [ Atoms ( cryst ) for s in scale ]
for n , s in enumerate ( scale ) :
systems [ n ] . set_cell ( s * uc , scale_atoms = True )
return systems |
def show_firmware_version_output_show_firmware_version_control_processor_vendor ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_firmware_version = ET . Element ( "show_firmware_version" )
config = show_firmware_version
output = ET . SubElement ( show_firmware_version , "output" )
show_firmware_version = ET . SubElement ( output , "show-firmware-version" )
control_processor_vendor = ET . SubElement ( show_firmware_version , "control-processor-vendor" )
control_processor_vendor . text = kwargs . pop ( 'control_processor_vendor' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_all_groups ( path_prefix = '/' , region = None , key = None , keyid = None , profile = None ) :
'''Get and return all IAM group details , starting at the optional path .
. . versionadded : : 2016.3.0
CLI Example :
salt - call boto _ iam . get _ all _ groups''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if not conn :
return None
_groups = conn . get_all_groups ( path_prefix = path_prefix )
groups = _groups . list_groups_response . list_groups_result . groups
marker = getattr ( _groups . list_groups_response . list_groups_result , 'marker' , None )
while marker :
_groups = conn . get_all_groups ( path_prefix = path_prefix , marker = marker )
groups = groups + _groups . list_groups_response . list_groups_result . groups
marker = getattr ( _groups . list_groups_response . list_groups_result , 'marker' , None )
return groups |
def margin_area_LinesNumbers_widget ( self , value ) :
"""Setter for * * self . _ _ margin _ area _ LinesNumbers _ widget * * attribute .
: param value : Attribute value .
: type value : LinesNumbers _ QWidget""" | if value is not None :
assert type ( value ) is LinesNumbers_QWidget , "'{0}' attribute: '{1}' type is not 'LinesNumbers_QWidget'!" . format ( "checked" , value )
self . __margin_area_LinesNumbers_widget = value |
def plot_signal_sum ( ax , params , fname = 'LFPsum.h5' , unit = 'mV' , scaling_factor = 1. , ylabels = True , scalebar = True , vlimround = None , T = [ 800 , 1000 ] , ylim = [ - 1500 , 0 ] , color = 'k' , fancy = False , label = '' , transient = 200 , clip_on = False , rasterized = True , ** kwargs ) :
'''on axes plot the summed LFP contributions
args :
ax : matplotlib . axes . AxesSubplot object
fname : str / np . ndarray , path to h5 file or ndim = 2 numpy . ndarray
unit : str , scalebar unit
scaling _ factor : float , scaling factor ( e . g . to scale 10 % data set up )
ylabels : bool , show labels on y - axis
scalebar : bool , show scalebar in plot
vlimround : None / float , override autoscaling of data and scalebar
T : list , [ tstart , tstop ] , which timeinterval
ylim : list of floats , see plt . gca ( ) . set _ ylim
color : str / colorspec tuple , color of shown lines
fancy : bool ,
label : str , line labels
rasterized : bool , rasterize line plots if true
kwargs : additional keyword arguments passed to ax . plot ( )
returns :
vlimround : float , scalebar scaling factor , i . e . , to match up plots''' | if type ( fname ) == str and os . path . isfile ( fname ) :
f = h5py . File ( fname )
# load data
data = f [ 'data' ] . value
tvec = np . arange ( data . shape [ 1 ] ) * 1000. / f [ 'srate' ] . value
# for mean subtraction
datameanaxis1 = f [ 'data' ] . value [ : , tvec >= transient ] . mean ( axis = 1 )
# close dataset
f . close ( )
elif type ( fname ) == np . ndarray and fname . ndim == 2 :
data = fname
tvec = np . arange ( data . shape [ 1 ] ) * params . dt_output
datameanaxis1 = data [ : , tvec >= transient ] . mean ( axis = 1 )
else :
raise Exception , 'type(fname)={} not str or numpy.ndarray' . format ( type ( fname ) )
# slice
slica = ( tvec <= T [ 1 ] ) & ( tvec >= T [ 0 ] )
data = data [ : , slica ]
# subtract mean in each channel
# dataT = data . T - data . mean ( axis = 1)
dataT = data . T - datameanaxis1
data = dataT . T
# normalize
data = data * scaling_factor
zvec = np . r_ [ params . electrodeParams [ 'z' ] ]
zvec = np . r_ [ zvec , zvec [ - 1 ] + np . diff ( zvec ) [ - 1 ] ]
vlim = abs ( data ) . max ( )
if vlimround is None :
vlimround = 2. ** np . round ( np . log2 ( vlim ) )
else :
pass
yticklabels = [ ]
yticks = [ ]
if fancy :
colors = phlp . get_colors ( data . shape [ 0 ] )
else :
colors = [ color ] * data . shape [ 0 ]
for i , z in enumerate ( params . electrodeParams [ 'z' ] ) :
if i == 0 :
ax . plot ( tvec [ slica ] , data [ i ] * 100 / vlimround + z , color = colors [ i ] , rasterized = rasterized , label = label , clip_on = clip_on , ** kwargs )
else :
ax . plot ( tvec [ slica ] , data [ i ] * 100 / vlimround + z , color = colors [ i ] , rasterized = rasterized , clip_on = clip_on , ** kwargs )
yticklabels . append ( 'ch. %i' % ( i + 1 ) )
yticks . append ( z )
if scalebar :
ax . plot ( [ tvec [ slica ] [ - 1 ] , tvec [ slica ] [ - 1 ] ] , [ - 1300 , - 1400 ] , lw = 2 , color = 'k' , clip_on = False )
ax . text ( tvec [ slica ] [ - 1 ] + np . diff ( T ) * 0.02 , - 1350 , r'%g %s' % ( vlimround , unit ) , color = 'k' , rotation = 'vertical' , va = 'center' )
ax . axis ( ax . axis ( 'tight' ) )
ax . yaxis . set_ticks ( yticks )
if ylabels :
ax . yaxis . set_ticklabels ( yticklabels )
else :
ax . yaxis . set_ticklabels ( [ ] )
for loc , spine in ax . spines . iteritems ( ) :
if loc in [ 'right' , 'top' ] :
spine . set_color ( 'none' )
ax . xaxis . set_ticks_position ( 'bottom' )
ax . yaxis . set_ticks_position ( 'left' )
ax . set_xlabel ( r'$t$ (ms)' , labelpad = 0.1 )
ax . set_ylim ( ylim )
return vlimround |
def getOverlayTransformType ( self , ulOverlayHandle ) :
"""Returns the transform type of this overlay .""" | fn = self . function_table . getOverlayTransformType
peTransformType = VROverlayTransformType ( )
result = fn ( ulOverlayHandle , byref ( peTransformType ) )
return result , peTransformType |
def from_rollup_json ( cls , stream , json_data ) :
"""Rollup json data from the server looks slightly different
: param DataStream stream : The : class : ` ~ DataStream ` out of which this data is coming
: param dict json _ data : Deserialized JSON data from Device Cloud about this device
: raises ValueError : if the data is malformed
: return : ( : class : ` ~ DataPoint ` ) newly created : class : ` ~ DataPoint `""" | dp = cls . from_json ( stream , json_data )
# Special handling for timestamp
timestamp = isoformat ( dc_utc_timestamp_to_dt ( int ( json_data . get ( "timestamp" ) ) ) )
# Special handling for data , all rollup data is float type
type_converter = _get_decoder_method ( stream . get_data_type ( ) )
data = type_converter ( float ( json_data . get ( "data" ) ) )
# Update the special fields
dp . set_timestamp ( timestamp )
dp . set_data ( data )
return dp |
def create_mbed_detector ( ** kwargs ) :
"""! Factory used to create host OS specific mbed - lstools object
: param kwargs : keyword arguments to pass along to the constructors
@ return Returns MbedLsTools object or None if host OS is not supported""" | host_os = platform . system ( )
if host_os == "Windows" :
from . windows import StlinkDetectWindows
return StlinkDetectWindows ( ** kwargs )
elif host_os == "Linux" :
from . linux import StlinkDetectLinuxGeneric
return StlinkDetectLinuxGeneric ( ** kwargs )
elif host_os == "Darwin" :
from . darwin import StlinkDetectDarwin
return StlinkDetectDarwin ( ** kwargs )
else :
return None |
def repl_main ( args ) :
"""replacer main""" | pattern = args . pattern
repl = args . replacement
regexp = re . compile ( pattern )
def repl_action ( f ) :
return replace_in_file ( args , f , regexp , repl )
if args . paths :
for f in args . paths :
repl_action ( f )
else :
root = os . getcwd ( )
walk_files ( args , root , root , repl_action ) |
def dump ( module , stream , cls = PVLEncoder , ** kwargs ) :
"""Serialize ` ` module ` ` as a pvl module to the provided ` ` stream ` ` .
: param module : a ` ` ` PVLModule ` ` ` or ` ` ` dict ` ` ` like object to serialize
: param stream : a ` ` . write ( ) ` ` - supporting file - like object to serialize the
module to . If ` ` stream ` ` is a string it will be treated as a filename
: param cls : the encoder class used to serialize the pvl module . You may use
the default ` ` PVLEncoder ` ` class or provided encoder formats such as the
` ` ` IsisCubeLabelEncoder ` ` ` and ` ` ` PDSLabelEncoder ` ` ` classes . You may
also provided a custom sublcass of ` ` ` PVLEncoder ` ` `
: param * * kwargs : the keyword arguments to pass to the encoder class .""" | if isinstance ( stream , six . string_types ) :
with open ( stream , 'wb' ) as fp :
return cls ( ** kwargs ) . encode ( module , fp )
cls ( ** kwargs ) . encode ( module , stream ) |
def addbusdays ( self , date , offset ) :
"""Add business days to a given date , taking holidays into consideration .
Note :
By definition , a zero offset causes the function to return the
initial date , even it is not a business date . An offset of 1
represents the next business date , regardless of date being a
business date or not .
Args :
date ( date , datetime or str ) : Date to be incremented .
offset ( integer ) : Number of business days to add . Positive values
move the date forward and negative values move the date back .
Returns :
datetime : New incremented date .""" | date = parsefun ( date )
if offset == 0 :
return date
dateoffset = self . addworkdays ( date , offset )
holidays = self . holidays
# speed up
if not holidays :
return dateoffset
weekdaymap = self . weekdaymap
# speed up
datewk = dateoffset . weekday ( )
if offset > 0 : # i is the index of first holiday > date
# we don ' t care if the start date is a holiday
i = bisect . bisect_right ( holidays , date )
if i == len ( holidays ) :
warn ( 'Holiday list exhausted at end, ' 'addbusday(%s,%s) output may be incorrect.' % ( date , offset ) )
else :
while holidays [ i ] <= dateoffset :
dateoffset += datetime . timedelta ( days = weekdaymap [ datewk ] . offsetnext )
datewk = weekdaymap [ datewk ] . nextworkday
i += 1
if i == len ( holidays ) :
warn ( 'Holiday list exhausted at end, ' 'addbusday(%s,%s) output may be incorrect.' % ( date , offset ) )
break
else : # i is the index of first holiday > = date
# we don ' t care if the start date is a holiday
i = bisect . bisect_left ( holidays , date ) - 1
if i == - 1 :
warn ( 'Holiday list exhausted at start, ' 'addbusday(%s,%s) output may be incorrect.' % ( date , offset ) )
else :
while holidays [ i ] >= dateoffset :
dateoffset += datetime . timedelta ( days = weekdaymap [ datewk ] . offsetprev )
datewk = weekdaymap [ datewk ] . prevworkday
i -= 1
if i == - 1 :
warn ( 'Holiday list exhausted at start, ' 'addbusday(%s,%s) output may be incorrect.' % ( date , offset ) )
break
return dateoffset |
def facets ( mesh , engine = None ) :
"""Find the list of parallel adjacent faces .
Parameters
mesh : trimesh . Trimesh
engine : str
Which graph engine to use :
( ' scipy ' , ' networkx ' , ' graphtool ' )
Returns
facets : sequence of ( n , ) int
Groups of face indexes of
parallel adjacent faces .""" | # what is the radius of a circle that passes through the perpendicular
# projection of the vector between the two non - shared vertices
# onto the shared edge , with the face normal from the two adjacent faces
radii = mesh . face_adjacency_radius
# what is the span perpendicular to the shared edge
span = mesh . face_adjacency_span
# a very arbitrary formula for declaring two adjacent faces
# parallel in a way that is hopefully ( and anecdotally ) robust
# to numeric error
# a common failure mode is two faces that are very narrow with a slight
# angle between them , so here we divide by the perpendicular span
# to penalize very narrow faces , and then square it just for fun
parallel = np . ones ( len ( radii ) , dtype = np . bool )
# if span is zero we know faces are small / parallel
nonzero = np . abs ( span ) > tol . zero
# faces with a radii / span ratio larger than a threshold pass
parallel [ nonzero ] = ( radii [ nonzero ] / span [ nonzero ] ) ** 2 > tol . facet_threshold
# run connected components on the parallel faces to group them
components = connected_components ( mesh . face_adjacency [ parallel ] , nodes = np . arange ( len ( mesh . faces ) ) , min_len = 2 , engine = engine )
return components |
def _callable_once ( func ) :
"""Returns a function that is only callable once ; any other call will do nothing""" | def once ( * args , ** kwargs ) :
if not once . called :
once . called = True
return func ( * args , ** kwargs )
once . called = False
return once |
def reboot ( env , identifier , hard ) :
"""Reboot an active server .""" | hardware_server = env . client [ 'Hardware_Server' ]
mgr = SoftLayer . HardwareManager ( env . client )
hw_id = helpers . resolve_id ( mgr . resolve_ids , identifier , 'hardware' )
if not ( env . skip_confirmations or formatting . confirm ( 'This will power off the server with id %s. ' 'Continue?' % hw_id ) ) :
raise exceptions . CLIAbort ( 'Aborted.' )
if hard is True :
hardware_server . rebootHard ( id = hw_id )
elif hard is False :
hardware_server . rebootSoft ( id = hw_id )
else :
hardware_server . rebootDefault ( id = hw_id ) |
def create_product ( cls , product , ** kwargs ) :
"""Create Product
Create a new Product
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ product ( product , async = True )
> > > result = thread . get ( )
: param async bool
: param Product product : Attributes of product to create ( required )
: return : Product
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_product_with_http_info ( product , ** kwargs )
else :
( data ) = cls . _create_product_with_http_info ( product , ** kwargs )
return data |
def export_to_csv ( table , filename_or_fobj = None , encoding = "utf-8" , dialect = unicodecsv . excel , batch_size = 100 , callback = None , * args , ** kwargs ) :
"""Export a ` rows . Table ` to a CSV file .
If a file - like object is provided it MUST be in binary mode , like in
` open ( filename , mode = ' wb ' ) ` .
If not filename / fobj is provided , the function returns a string with CSV
contents .""" | # TODO : will work only if table . fields is OrderedDict
# TODO : should use fobj ? What about creating a method like json . dumps ?
return_data , should_close = False , None
if filename_or_fobj is None :
filename_or_fobj = BytesIO ( )
return_data = should_close = True
source = Source . from_file ( filename_or_fobj , plugin_name = "csv" , mode = "wb" , encoding = encoding , should_close = should_close , )
# TODO : may use ` io . BufferedWriter ` instead of ` ipartition ` so user can
# choose the real size ( in Bytes ) when to flush to the file system , instead
# number of rows
writer = unicodecsv . writer ( source . fobj , encoding = encoding , dialect = dialect )
if callback is None :
for batch in ipartition ( serialize ( table , * args , ** kwargs ) , batch_size ) :
writer . writerows ( batch )
else :
serialized = serialize ( table , * args , ** kwargs )
writer . writerow ( next ( serialized ) )
# First , write the header
total = 0
for batch in ipartition ( serialized , batch_size ) :
writer . writerows ( batch )
total += len ( batch )
callback ( total )
if return_data :
source . fobj . seek ( 0 )
result = source . fobj . read ( )
else :
source . fobj . flush ( )
result = source . fobj
if source . should_close :
source . fobj . close ( )
return result |
def inflate_plugin_list ( plugin_list , inflate_plugin ) :
"""Inflate a list of strings / dictionaries to a list of plugin instances .
Args :
plugin _ list ( list ) : a list of str / dict .
inflate _ plugin ( method ) : the method to inflate the plugin .
Returns :
list : a plugin instances list .
Raises :
ValueError : when a dictionary item contains more than one key .""" | plugins = [ ]
for plugin_def in plugin_list :
if isinstance ( plugin_def , str ) :
try :
plugins . append ( inflate_plugin ( plugin_def ) )
except PluginNotFoundError as e :
logger . error ( 'Could not import plugin identified by %s. ' 'Exception: %s.' , plugin_def , e )
elif isinstance ( plugin_def , dict ) :
if len ( plugin_def ) > 1 :
raise ValueError ( 'When using a plugin list, each dictionary item ' 'must contain only one key.' )
identifier = list ( plugin_def . keys ( ) ) [ 0 ]
definition = plugin_def [ identifier ]
try :
plugins . append ( inflate_plugin ( identifier , definition ) )
except PluginNotFoundError as e :
logger . error ( 'Could not import plugin identified by %s. ' 'Inflate method: %s. Exception: %s.' , identifier , inflate_plugin , e )
return plugins |
def fft_propagate ( fftfield , d , nm , res , method = "helmholtz" , ret_fft = False ) :
"""Propagates a 1D or 2D Fourier transformed field
Parameters
fftfield : 1 - dimensional or 2 - dimensional ndarray
Fourier transform of 1D Electric field component
d : float
Distance to be propagated in pixels ( negative for backwards )
nm : float
Refractive index of medium
res : float
Wavelength in pixels
method : str
Defines the method of propagation ;
one of
- " helmholtz " : the optical transfer function ` exp ( idkm ( M - 1 ) ) `
- " fresnel " : paraxial approximation ` exp ( idk2 / km ) `
ret _ fft : bool
Do not perform an inverse Fourier transform and return the field
in Fourier space .
Returns
Electric field at ` d ` . If ` ret _ fft ` is True , then the
Fourier transform of the electric field will be returned ( faster ) .""" | fshape = len ( fftfield . shape )
assert fshape in [ 1 , 2 ] , "Dimension of `fftfield` must be 1 or 2."
if fshape == 1 :
func = fft_propagate_2d
else :
func = fft_propagate_3d
names = func . __code__ . co_varnames [ : func . __code__ . co_argcount ]
loc = locals ( )
vardict = dict ( )
for name in names :
vardict [ name ] = loc [ name ]
return func ( ** vardict ) |
def omgprepare ( args ) :
"""% prog omgprepare ploidy anchorsfile blastfile
Prepare to run Sankoff ' s OMG algorithm to get orthologs .""" | from jcvi . formats . blast import cscore
from jcvi . formats . base import DictFile
p = OptionParser ( omgprepare . __doc__ )
p . add_option ( "--norbh" , action = "store_true" , help = "Disable RBH hits [default: %default]" )
p . add_option ( "--pctid" , default = 0 , type = "int" , help = "Percent id cutoff for RBH hits [default: %default]" )
p . add_option ( "--cscore" , default = 90 , type = "int" , help = "C-score cutoff for RBH hits [default: %default]" )
p . set_stripnames ( )
p . set_beds ( )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
ploidy , anchorfile , blastfile = args
norbh = opts . norbh
pctid = opts . pctid
cs = opts . cscore
qbed , sbed , qorder , sorder , is_self = check_beds ( anchorfile , p , opts )
fp = open ( ploidy )
genomeidx = dict ( ( x . split ( ) [ 0 ] , i ) for i , x in enumerate ( fp ) )
fp . close ( )
ploidy = DictFile ( ploidy )
geneinfo ( qbed , qorder , genomeidx , ploidy )
geneinfo ( sbed , sorder , genomeidx , ploidy )
pf = blastfile . rsplit ( "." , 1 ) [ 0 ]
cscorefile = pf + ".cscore"
cscore ( [ blastfile , "-o" , cscorefile , "--cutoff=0" , "--pct" ] )
ac = AnchorFile ( anchorfile )
pairs = set ( ( a , b ) for a , b , i in ac . iter_pairs ( ) )
logging . debug ( "Imported {0} pairs from `{1}`." . format ( len ( pairs ) , anchorfile ) )
weightsfile = pf + ".weights"
fp = open ( cscorefile )
fw = open ( weightsfile , "w" )
npairs = 0
for row in fp :
a , b , c , pct = row . split ( )
c , pct = float ( c ) , float ( pct )
c = int ( c * 100 )
if ( a , b ) not in pairs :
if norbh :
continue
if c < cs :
continue
if pct < pctid :
continue
c /= 10
# This severely penalizes RBH against synteny
print ( "\t" . join ( ( a , b , str ( c ) ) ) , file = fw )
npairs += 1
fw . close ( )
logging . debug ( "Write {0} pairs to `{1}`." . format ( npairs , weightsfile ) ) |
def setdefault ( obj , field , default ) :
"""Set an object ' s field to default if it doesn ' t have a value""" | setattr ( obj , field , getattr ( obj , field , default ) ) |
def create_model ( modelname , fields , indexes = None , basemodel = None , ** props ) :
"""Create model dynamically
: param fields : Just format like [
{ ' name ' : name , ' type ' : type , . . . } ,
type should be a string , eg . ' str ' , ' int ' , etc
kwargs will be passed to Property . _ _ init _ _ ( ) according field type ,
it ' ll be a dict
: param props : Model attributes , such as ' _ _ mapping _ only _ _ ' , ' _ _ replace _ _ '
: param indexes : Multiple fields index , single index can be set directly using ` index = True `
to a field , the value format should be :
{ ' name ' : name , ' fields ' : [ . . . ] , . . . } ,
e . g . [
{ ' name ' : ' audit _ idx ' , ' fields ' : [ ' table _ id ' , ' obj _ id ' ] }
for kwargs can be ommited .
: param basemodel : Will be the new Model base class , so new Model can inherited
parent methods , it can be a string or a real class object""" | assert not props or isinstance ( props , dict )
assert not indexes or isinstance ( indexes , list )
props = SortedDict ( props or { } )
props [ '__dynamic__' ] = True
props [ '__config__' ] = False
for p in fields :
kwargs = p . copy ( )
name = kwargs . pop ( 'name' )
_type = kwargs . pop ( 'type' )
# if the key is start with ' _ ' , then remove it
for k in kwargs . keys ( ) :
if k . startswith ( '_' ) :
kwargs . pop ( k , None )
field_type = get_field_type ( _type )
prop = field_type ( ** kwargs )
props [ name ] = prop
if basemodel :
model = import_attr ( basemodel )
# model . clear _ relation ( )
else :
model = Model
# try :
# old = get _ model ( modelname , signal = False )
# old . clear _ relation ( )
# except ModelNotFound as e :
# pass
cls = type ( str ( modelname . title ( ) ) , ( model , ) , props )
tablename = props . get ( '__tablename__' , modelname )
set_model ( cls , tablename , appname = __name__ , model_path = '' )
get_model ( modelname , signal = False , reload = True )
indexes = indexes or [ ]
for x in indexes :
kwargs = x . copy ( )
name = kwargs . pop ( 'name' )
fields = kwargs . pop ( 'fields' )
# if the key is start with ' _ ' , then remove it
for k in kwargs . keys ( ) :
if k . startswith ( '_' ) :
kwargs . pop ( k , None )
if not isinstance ( fields , ( list , tuple ) ) :
raise ValueError ( "Index value format is not right, the value is %r" % indexes )
props = [ ]
for y in fields :
props . append ( cls . c [ y ] )
Index ( name , * props , ** kwargs )
return cls |
def filter_step_asarray ( G , covY , pred , yt ) :
"""Filtering step of Kalman filter : array version .
Parameters
G : ( dy , dx ) numpy array
mean of Y _ t | X _ t is G * X _ t
covX : ( dx , dx ) numpy array
covariance of Y _ t | X _ t
pred : MeanAndCov object
predictive distribution at time t
Returns
pred : MeanAndCov object
filtering distribution at time t
logpyt : float
log density of Y _ t | Y _ { 0 : t - 1}
Note
This performs the filtering step for N distinctive predictive means :
filt . mean should be a ( N , dx ) or ( N ) array ; pred . mean in the output
will have the same shape .""" | pm = pred . mean [ : , np . newaxis ] if pred . mean . ndim == 1 else pred . mean
new_pred = MeanAndCov ( mean = pm , cov = pred . cov )
filt , logpyt = filter_step ( G , covY , new_pred , yt )
if pred . mean . ndim == 1 :
filt . mean . squeeze ( )
return filt , logpyt |
def publish ( self , message_type = ON_SEND , client_id = None , client_storage = None , * args , ** kwargs ) :
"""Publishes a message""" | self . publisher . publish ( message_type , client_id , client_storage , * args , ** kwargs ) |
def _get_bundles_by_type ( self , type ) :
"""Get a dictionary of bundles for requested type .
Args :
type : ' javascript ' or ' css '""" | bundles = { }
bundle_definitions = self . config . get ( type )
if bundle_definitions is None :
return bundles
# bundle name : common
for bundle_name , paths in bundle_definitions . items ( ) :
bundle_files = [ ]
# path : static / js / vendor / * . js
for path in paths : # pattern : / tmp / static / js / vendor / * . js
pattern = abspath = os . path . join ( self . basedir , path )
# assetdir : / tmp / static / js / vendor
# assetdir contents :
# - / tmp / static / js / vendor / t1 . js
# - / tmp / static / js / vendor / t2 . js
# - / tmp / static / js / vendor / index . html
assetdir = os . path . dirname ( abspath )
# expanded _ fnames after filtering using the pattern :
# - / tmp / static / js / vendor / t1 . js
# - / tmp / static / js / vendor / t2 . js
fnames = [ os . path . join ( assetdir , fname ) for fname in os . listdir ( assetdir ) ]
expanded_fnames = fnmatch . filter ( fnames , pattern )
bundle_files . extend ( sorted ( expanded_fnames ) )
bundles [ bundle_name ] = bundle_files
return bundles |
def distributeParams ( self , param_name , param_count , center , spread , dist_type ) :
'''Distributes heterogeneous values of one parameter to the AgentTypes in self . agents .
Parameters
param _ name : string
Name of the parameter to be assigned .
param _ count : int
Number of different values the parameter will take on .
center : float
A measure of centrality for the distribution of the parameter .
spread : float
A measure of spread or diffusion for the distribution of the parameter .
dist _ type : string
The type of distribution to be used . Can be " lognormal " or " uniform " ( can expand ) .
Returns
None''' | # Get a list of discrete values for the parameter
if dist_type == 'uniform' : # If uniform , center is middle of distribution , spread is distance to either edge
param_dist = approxUniform ( N = param_count , bot = center - spread , top = center + spread )
elif dist_type == 'lognormal' : # If lognormal , center is the mean and spread is the standard deviation ( in log )
tail_N = 3
param_dist = approxLognormal ( N = param_count - tail_N , mu = np . log ( center ) - 0.5 * spread ** 2 , sigma = spread , tail_N = tail_N , tail_bound = [ 0.0 , 0.9 ] , tail_order = np . e )
# Distribute the parameters to the various types , assigning consecutive types the same
# value if there are more types than values
replication_factor = len ( self . agents ) // param_count
# Note : the double division is intenger division in Python 3 and 2.7 , this makes it explicit
j = 0
b = 0
while j < len ( self . agents ) :
for n in range ( replication_factor ) :
self . agents [ j ] ( AgentCount = int ( self . Population * param_dist [ 0 ] [ b ] * self . TypeWeight [ n ] ) )
exec ( 'self.agents[j](' + param_name + '= param_dist[1][b])' )
j += 1
b += 1 |
def qn_to_qubo ( expr ) :
"""Convert Sympy ' s expr to QUBO .
Args :
expr : Sympy ' s quadratic expression with variable ` q0 ` , ` q1 ` , . . .
Returns :
[ [ float ] ] : Returns QUBO matrix .""" | try :
import sympy
except ImportError :
raise ImportError ( "This function requires sympy. Please install it." )
assert type ( expr ) == sympy . Add
to_i = lambda s : int ( str ( s ) [ 1 : ] )
max_i = max ( map ( to_i , expr . free_symbols ) ) + 1
qubo = [ [ 0. ] * max_i for _ in range ( max_i ) ]
for arg in expr . args :
syms = arg . free_symbols
assert len ( syms ) <= 2
if len ( syms ) == 2 :
assert type ( arg ) == sympy . Mul
i , j = list ( map ( to_i , syms ) )
if i > j :
i , j = j , i
if i == j :
if len ( arg . args ) == 2 :
qubo [ i ] [ i ] = float ( arg . args [ 0 ] )
elif len ( arg . args ) == 1 :
qubo [ i ] [ i ] = 1.0
else :
raise ValueError ( f"Too many args! arg.args = {arg.args}" )
continue
if len ( arg . args ) == 3 :
qubo [ i ] [ j ] = float ( arg . args [ 0 ] )
elif len ( arg . args ) == 2 :
qubo [ i ] [ j ]
if len ( syms ) == 1 :
if len ( arg . args ) == 2 :
assert type ( arg ) == sympy . Mul
i = to_i ( next ( iter ( syms ) ) )
qubo [ i ] [ i ] = float ( arg . args [ 0 ] )
elif len ( arg . args ) == 1 :
qubo [ i ] [ i ] = 1.0
else :
raise ValueError ( f"Too many args! arg.args = {arg.args}" )
return qubo |
def reindex_multifiles ( portal ) :
"""Reindex Multifiles to be searchable by the catalog""" | logger . info ( "Reindexing Multifiles ..." )
brains = api . search ( dict ( portal_type = "Multifile" ) , "bika_setup_catalog" )
total = len ( brains )
for num , brain in enumerate ( brains ) :
if num % 100 == 0 :
logger . info ( "Reindexing Multifile: {0}/{1}" . format ( num , total ) )
obj = api . get_object ( brain )
obj . reindexObject ( ) |
def description ( self ) :
"""string or None if unknown""" | name = None
try :
name = self . _TYPE_NAMES [ self . audioObjectType ]
except IndexError :
pass
if name is None :
return
if self . sbrPresentFlag == 1 :
name += "+SBR"
if self . psPresentFlag == 1 :
name += "+PS"
return text_type ( name ) |
def find ( self , path , all = False ) :
'''Looks for files in the app directories .''' | found = os . path . join ( settings . STATIC_ROOT , path )
if all :
return [ found ]
else :
return found |
def _get_arch ( ) :
"""Determines the current processor architecture .
@ rtype : str
@ return :
On error , returns :
- L { ARCH _ UNKNOWN } ( C { " unknown " } ) meaning the architecture could not be detected or is not known to WinAppDbg .
On success , returns one of the following values :
- L { ARCH _ I386 } ( C { " i386 " } ) for Intel 32 - bit x86 processor or compatible .
- L { ARCH _ AMD64 } ( C { " amd64 " } ) for Intel 64 - bit x86_64 processor or compatible .
May also return one of the following values if you get both Python and
WinAppDbg to work in such machines . . . let me know if you do ! : )
- L { ARCH _ MIPS } ( C { " mips " } ) for MIPS compatible processors .
- L { ARCH _ ALPHA } ( C { " alpha " } ) for Alpha processors .
- L { ARCH _ PPC } ( C { " ppc " } ) for PowerPC compatible processors .
- L { ARCH _ SHX } ( C { " shx " } ) for Hitachi SH processors .
- L { ARCH _ ARM } ( C { " arm " } ) for ARM compatible processors .
- L { ARCH _ IA64 } ( C { " ia64 " } ) for Intel Itanium processor or compatible .
- L { ARCH _ ALPHA64 } ( C { " alpha64 " } ) for Alpha64 processors .
- L { ARCH _ MSIL } ( C { " msil " } ) for the . NET virtual machine .
- L { ARCH _ SPARC } ( C { " sparc " } ) for Sun Sparc processors .
Probably IronPython returns C { ARCH _ MSIL } but I haven ' t tried it . Python
on Windows CE and Windows Mobile should return C { ARCH _ ARM } . Python on
Solaris using Wine would return C { ARCH _ SPARC } . Python in an Itanium
machine should return C { ARCH _ IA64 } both on Wine and proper Windows .
All other values should only be returned on Linux using Wine .""" | try :
si = GetNativeSystemInfo ( )
except Exception :
si = GetSystemInfo ( )
try :
return _arch_map [ si . id . w . wProcessorArchitecture ]
except KeyError :
return ARCH_UNKNOWN |
def start_process ( self , lpCmdLine , ** kwargs ) :
"""Starts a new process for instrumenting ( or debugging ) .
@ type lpCmdLine : str
@ param lpCmdLine : Command line to execute . Can ' t be an empty string .
@ type bConsole : bool
@ keyword bConsole : True to inherit the console of the debugger .
Defaults to C { False } .
@ type bDebug : bool
@ keyword bDebug : C { True } to attach to the new process .
To debug a process it ' s best to use the L { Debug } class instead .
Defaults to C { False } .
@ type bFollow : bool
@ keyword bFollow : C { True } to automatically attach to the child
processes of the newly created process . Ignored unless C { bDebug } is
C { True } . Defaults to C { False } .
@ type bInheritHandles : bool
@ keyword bInheritHandles : C { True } if the new process should inherit
it ' s parent process ' handles . Defaults to C { False } .
@ type bSuspended : bool
@ keyword bSuspended : C { True } to suspend the main thread before any code
is executed in the debugee . Defaults to C { False } .
@ type dwParentProcessId : int or None
@ keyword dwParentProcessId : C { None } if the debugger process should be
the parent process ( default ) , or a process ID to forcefully set as
the debugee ' s parent ( only available for Windows Vista and above ) .
@ type iTrustLevel : int
@ keyword iTrustLevel : Trust level .
Must be one of the following values :
- 0 : B { No trust } . May not access certain resources , such as
cryptographic keys and credentials . Only available since
Windows XP and 2003 , desktop editions .
- 1 : B { Normal trust } . Run with the same privileges as a normal
user , that is , one that doesn ' t have the I { Administrator } or
I { Power User } user rights . Only available since Windows XP
and 2003 , desktop editions .
- 2 : B { Full trust } . Run with the exact same privileges as the
current user . This is the default value .
@ type bAllowElevation : bool
@ keyword bAllowElevation : C { True } to allow the child process to keep
UAC elevation , if the debugger itself is running elevated . C { False }
to ensure the child process doesn ' t run with elevation . Defaults to
C { True } .
This flag is only meaningful on Windows Vista and above , and if the
debugger itself is running with elevation . It can be used to make
sure the child processes don ' t run elevated as well .
This flag DOES NOT force an elevation prompt when the debugger is
not running with elevation .
Note that running the debugger with elevation ( or the Python
interpreter at all for that matter ) is not normally required .
You should only need to if the target program requires elevation
to work properly ( for example if you try to debug an installer ) .
@ rtype : L { Process }
@ return : Process object .""" | # Get the flags .
bConsole = kwargs . pop ( 'bConsole' , False )
bDebug = kwargs . pop ( 'bDebug' , False )
bFollow = kwargs . pop ( 'bFollow' , False )
bSuspended = kwargs . pop ( 'bSuspended' , False )
bInheritHandles = kwargs . pop ( 'bInheritHandles' , False )
dwParentProcessId = kwargs . pop ( 'dwParentProcessId' , None )
iTrustLevel = kwargs . pop ( 'iTrustLevel' , 2 )
bAllowElevation = kwargs . pop ( 'bAllowElevation' , True )
if kwargs :
raise TypeError ( "Unknown keyword arguments: %s" % compat . keys ( kwargs ) )
if not lpCmdLine :
raise ValueError ( "Missing command line to execute!" )
# Sanitize the trust level flag .
if iTrustLevel is None :
iTrustLevel = 2
# The UAC elevation flag is only meaningful if we ' re running elevated .
try :
bAllowElevation = bAllowElevation or not self . is_admin ( )
except AttributeError :
bAllowElevation = True
warnings . warn ( "UAC elevation is only available in Windows Vista and above" , RuntimeWarning )
# Calculate the process creation flags .
dwCreationFlags = 0
dwCreationFlags |= win32 . CREATE_DEFAULT_ERROR_MODE
dwCreationFlags |= win32 . CREATE_BREAKAWAY_FROM_JOB
# # dwCreationFlags | = win32 . CREATE _ UNICODE _ ENVIRONMENT
if not bConsole :
dwCreationFlags |= win32 . DETACHED_PROCESS
# dwCreationFlags | = win32 . CREATE _ NO _ WINDOW # weird stuff happens
if bSuspended :
dwCreationFlags |= win32 . CREATE_SUSPENDED
if bDebug :
dwCreationFlags |= win32 . DEBUG_PROCESS
if not bFollow :
dwCreationFlags |= win32 . DEBUG_ONLY_THIS_PROCESS
# Change the parent process if requested .
# May fail on old versions of Windows .
lpStartupInfo = None
if dwParentProcessId is not None :
myPID = win32 . GetCurrentProcessId ( )
if dwParentProcessId != myPID :
if self . has_process ( dwParentProcessId ) :
ParentProcess = self . get_process ( dwParentProcessId )
else :
ParentProcess = Process ( dwParentProcessId )
ParentProcessHandle = ParentProcess . get_handle ( win32 . PROCESS_CREATE_PROCESS )
AttributeListData = ( ( win32 . PROC_THREAD_ATTRIBUTE_PARENT_PROCESS , ParentProcessHandle . _as_parameter_ ) , )
AttributeList = win32 . ProcThreadAttributeList ( AttributeListData )
StartupInfoEx = win32 . STARTUPINFOEX ( )
StartupInfo = StartupInfoEx . StartupInfo
StartupInfo . cb = win32 . sizeof ( win32 . STARTUPINFOEX )
StartupInfo . lpReserved = 0
StartupInfo . lpDesktop = 0
StartupInfo . lpTitle = 0
StartupInfo . dwFlags = 0
StartupInfo . cbReserved2 = 0
StartupInfo . lpReserved2 = 0
StartupInfoEx . lpAttributeList = AttributeList . value
lpStartupInfo = StartupInfoEx
dwCreationFlags |= win32 . EXTENDED_STARTUPINFO_PRESENT
pi = None
try : # Create the process the easy way .
if iTrustLevel >= 2 and bAllowElevation :
pi = win32 . CreateProcess ( None , lpCmdLine , bInheritHandles = bInheritHandles , dwCreationFlags = dwCreationFlags , lpStartupInfo = lpStartupInfo )
# Create the process the hard way . . .
else : # If we allow elevation , use the current process token .
# If not , get the token from the current shell process .
hToken = None
try :
if not bAllowElevation :
if bFollow :
msg = ( "Child processes can't be autofollowed" " when dropping UAC elevation." )
raise NotImplementedError ( msg )
if bConsole :
msg = ( "Child processes can't inherit the debugger's" " console when dropping UAC elevation." )
raise NotImplementedError ( msg )
if bInheritHandles :
msg = ( "Child processes can't inherit the debugger's" " handles when dropping UAC elevation." )
raise NotImplementedError ( msg )
try :
hWnd = self . get_shell_window ( )
except WindowsError :
hWnd = self . get_desktop_window ( )
shell = hWnd . get_process ( )
try :
hShell = shell . get_handle ( win32 . PROCESS_QUERY_INFORMATION )
with win32 . OpenProcessToken ( hShell ) as hShellToken :
hToken = win32 . DuplicateTokenEx ( hShellToken )
finally :
shell . close_handle ( )
# Lower trust level if requested .
if iTrustLevel < 2 :
if iTrustLevel > 0 :
dwLevelId = win32 . SAFER_LEVELID_NORMALUSER
else :
dwLevelId = win32 . SAFER_LEVELID_UNTRUSTED
with win32 . SaferCreateLevel ( dwLevelId = dwLevelId ) as hSafer :
hSaferToken = win32 . SaferComputeTokenFromLevel ( hSafer , hToken ) [ 0 ]
try :
if hToken is not None :
hToken . close ( )
except :
hSaferToken . close ( )
raise
hToken = hSaferToken
# If we have a computed token , call CreateProcessAsUser ( ) .
if bAllowElevation :
pi = win32 . CreateProcessAsUser ( hToken = hToken , lpCommandLine = lpCmdLine , bInheritHandles = bInheritHandles , dwCreationFlags = dwCreationFlags , lpStartupInfo = lpStartupInfo )
# If we have a primary token call CreateProcessWithToken ( ) .
# The problem is , there are many flags CreateProcess ( ) and
# CreateProcessAsUser ( ) accept but CreateProcessWithToken ( )
# and CreateProcessWithLogonW ( ) don ' t , so we need to work
# around them .
else : # Remove the debug flags .
dwCreationFlags &= ~ win32 . DEBUG_PROCESS
dwCreationFlags &= ~ win32 . DEBUG_ONLY_THIS_PROCESS
# Remove the console flags .
dwCreationFlags &= ~ win32 . DETACHED_PROCESS
# The process will be created suspended .
dwCreationFlags |= win32 . CREATE_SUSPENDED
# Create the process using the new primary token .
pi = win32 . CreateProcessWithToken ( hToken = hToken , dwLogonFlags = win32 . LOGON_WITH_PROFILE , lpCommandLine = lpCmdLine , dwCreationFlags = dwCreationFlags , lpStartupInfo = lpStartupInfo )
# Attach as a debugger , if requested .
if bDebug :
win32 . DebugActiveProcess ( pi . dwProcessId )
# Resume execution , if requested .
if not bSuspended :
win32 . ResumeThread ( pi . hThread )
# Close the token when we ' re done with it .
finally :
if hToken is not None :
hToken . close ( )
# Wrap the new process and thread in Process and Thread objects ,
# and add them to the corresponding snapshots .
aProcess = Process ( pi . dwProcessId , pi . hProcess )
aThread = Thread ( pi . dwThreadId , pi . hThread )
aProcess . _add_thread ( aThread )
self . _add_process ( aProcess )
# Clean up on error .
except :
if pi is not None :
try :
win32 . TerminateProcess ( pi . hProcess )
except WindowsError :
pass
pi . hThread . close ( )
pi . hProcess . close ( )
raise
# Return the new Process object .
return aProcess |
def dump ( self , stream , progress = None , lower = None , upper = None , incremental = False , deltas = False ) :
"""Dump the repository to a dumpfile stream .
: param stream : A file stream to which the dumpfile is written
: param progress : A file stream to which progress is written
: param lower : Must be a numeric version number
: param upper : Must be a numeric version number
See ` ` svnadmin help dump ` ` for details on the other arguments .""" | cmd = [ SVNADMIN , 'dump' , '.' ]
if progress is None :
cmd . append ( '-q' )
if lower is not None :
cmd . append ( '-r' )
if upper is None :
cmd . append ( str ( int ( lower ) ) )
else :
cmd . append ( '%d:%d' % ( int ( lower ) , int ( upper ) ) )
if incremental :
cmd . append ( '--incremental' )
if deltas :
cmd . append ( '--deltas' )
p = subprocess . Popen ( cmd , cwd = self . path , stdout = stream , stderr = progress )
p . wait ( )
if p . returncode != 0 :
raise subprocess . CalledProcessError ( p . returncode , cmd ) |
def connect ( self , broker , port = 1883 , client_id = "" , clean_session = True ) :
"""Connect to an MQTT broker . This is a pre - requisite step for publish
and subscribe keywords .
` broker ` MQTT broker host
` port ` broker port ( default 1883)
` client _ id ` if not specified , a random id is generated
` clean _ session ` specifies the clean session flag for the connection
Examples :
Connect to a broker with default port and client id
| Connect | 127.0.0.1 |
Connect to a broker by specifying the port and client id explicitly
| Connect | 127.0.0.1 | 1883 | test . client |
Connect to a broker with clean session flag set to false
| Connect | 127.0.0.1 | clean _ session = $ { false } |""" | logger . info ( 'Connecting to %s at port %s' % ( broker , port ) )
self . _connected = False
self . _unexpected_disconnect = False
self . _mqttc = mqtt . Client ( client_id , clean_session )
# set callbacks
self . _mqttc . on_connect = self . _on_connect
self . _mqttc . on_disconnect = self . _on_disconnect
if self . _username :
self . _mqttc . username_pw_set ( self . _username , self . _password )
self . _mqttc . connect ( broker , int ( port ) )
timer_start = time . time ( )
while time . time ( ) < timer_start + self . _loop_timeout :
if self . _connected or self . _unexpected_disconnect :
break ;
self . _mqttc . loop ( )
if self . _unexpected_disconnect :
raise RuntimeError ( "The client disconnected unexpectedly" )
logger . debug ( 'client_id: %s' % self . _mqttc . _client_id )
return self . _mqttc |
def download ( self , localfile : str , remotefile : str , overwrite : bool = True , ** kwargs ) :
"""This method downloads a remote file from the SAS servers file system .
localfile - path to the local file to create or overwrite
remotefile - path to remote file tp dpwnload
overwrite - overwrite the output file if it exists ?""" | logf = ''
logn = self . _logcnt ( )
logcodei = "%put E3969440A681A24088859985" + logn + ";"
logcodeo = "\nE3969440A681A24088859985" + logn
logcodeb = logcodeo . encode ( )
valid = self . _sb . file_info ( remotefile , quiet = True )
if valid is None :
return { 'Success' : False , 'LOG' : "File " + str ( remotefile ) + " does not exist." }
if valid == { } :
return { 'Success' : False , 'LOG' : "File " + str ( remotefile ) + " is a directory." }
if os . path . isdir ( localfile ) :
locf = localfile + os . sep + remotefile . rpartition ( self . _sb . hostsep ) [ 2 ]
else :
locf = localfile
try :
fd = open ( locf , 'wb' )
fd . write ( b'write can fail even if open worked, as it turns out' )
fd . close ( )
fd = open ( locf , 'wb' )
except OSError as e :
return { 'Success' : False , 'LOG' : "File " + str ( locf ) + " could not be opened or written to. Error was: " + str ( e ) }
code = "filename _sp_updn '" + remotefile + "' recfm=F encoding=binary lrecl=4096;"
ll = self . submit ( code , "text" )
logf = ll [ 'LOG' ]
self . stdin [ 0 ] . send ( b'tom says EOL=DNLOAD \n' )
self . stdin [ 0 ] . send ( b'\n' + logcodei . encode ( ) + b'\n' + b'tom says EOL=' + logcodeb + b'\n' )
done = False
datar = b''
bail = False
while not done :
while True :
if os . name == 'nt' :
try :
rc = self . pid . wait ( 0 )
self . pid = None
self . _sb . SASpid = None
return { 'Success' : False , 'LOG' : "SAS process has terminated unexpectedly. RC from wait was: " + str ( rc ) }
except :
pass
else :
rc = os . waitpid ( self . pid , os . WNOHANG )
if rc [ 1 ] :
self . pid = None
self . _sb . SASpid = None
return { 'Success' : False , 'LOG' : "SAS process has terminated unexpectedly. RC from wait was: " + str ( rc ) }
if bail :
if datar . count ( logcodeb ) >= 1 :
break
try :
data = self . stdout [ 0 ] . recv ( 4096 )
except ( BlockingIOError ) :
data = b''
if len ( data ) > 0 :
datar += data
if len ( datar ) > 8300 :
fd . write ( datar [ : 8192 ] )
datar = datar [ 8192 : ]
else :
sleep ( 0.1 )
try :
log = self . stderr [ 0 ] . recv ( 4096 ) . decode ( self . sascfg . encoding , errors = 'replace' )
except ( BlockingIOError ) :
log = b''
if len ( log ) > 0 :
logf += log
if logf . count ( logcodeo ) >= 1 :
bail = True
done = True
fd . write ( datar . rpartition ( logcodeb ) [ 0 ] )
fd . flush ( )
fd . close ( )
self . _log += logf
final = logf . partition ( logcodei )
z = final [ 0 ] . rpartition ( chr ( 10 ) )
prev = '%08d' % ( self . _log_cnt - 1 )
zz = z [ 0 ] . rpartition ( "\nE3969440A681A24088859985" + prev + '\n' )
logd = zz [ 2 ] . replace ( ";*\';*\";*/;" , '' )
ll = self . submit ( "filename _sp_updn;" , 'text' )
logd += ll [ 'LOG' ]
return { 'Success' : True , 'LOG' : logd } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.