signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def unescape ( cls , text : str ) -> str :
"""Replace escape sequence with corresponding characters .
Args :
text : Text to unescape .""" | chop = text . split ( "\\" , 1 )
try :
return ( chop [ 0 ] if len ( chop ) == 1 else chop [ 0 ] + cls . unescape_map [ chop [ 1 ] [ 0 ] ] + cls . unescape ( chop [ 1 ] [ 1 : ] ) )
except KeyError :
raise InvalidArgument ( text ) from None |
def show_stats ( self , verbose = False , ** kwargs ) :
"""Show statistics on the found proxies .
Useful for debugging , but you can also use if you ' re interested .
: param verbose : Flag indicating whether to print verbose stats
. . deprecated : : 0.2.0
Use : attr : ` verbose ` instead of : attr : ` full ` .""" | if kwargs :
verbose = True
warnings . warn ( '`full` in `show_stats` is deprecated, ' 'use `verbose` instead.' , DeprecationWarning , )
found_proxies = self . unique_proxies . values ( )
num_working_proxies = len ( [ p for p in found_proxies if p . is_working ] )
if not found_proxies :
print ( 'Proxy not found' )
return
errors = Counter ( )
for p in found_proxies :
errors . update ( p . stat [ 'errors' ] )
proxies_by_type = { 'SOCKS5' : [ ] , 'SOCKS4' : [ ] , 'HTTPS' : [ ] , 'HTTP' : [ ] , 'CONNECT:80' : [ ] , 'CONNECT:25' : [ ] , }
stat = { 'Wrong country' : [ ] , 'Wrong protocol/anonymity lvl' : [ ] , 'Connection success' : [ ] , 'Connection timeout' : [ ] , 'Connection failed' : [ ] , }
for p in found_proxies :
msgs = ' ' . join ( [ l [ 1 ] for l in p . get_log ( ) ] )
full_log = [ p ]
for proto in p . types :
proxies_by_type [ proto ] . append ( p )
if 'Location of proxy' in msgs :
stat [ 'Wrong country' ] . append ( p )
elif 'Connection: success' in msgs :
if 'Protocol or the level' in msgs :
stat [ 'Wrong protocol/anonymity lvl' ] . append ( p )
stat [ 'Connection success' ] . append ( p )
if not verbose :
continue
events_by_ngtr = defaultdict ( list )
for ngtr , event , runtime in p . get_log ( ) :
events_by_ngtr [ ngtr ] . append ( ( event , runtime ) )
for ngtr , events in sorted ( events_by_ngtr . items ( ) , key = lambda item : item [ 0 ] ) :
full_log . append ( '\t%s' % ngtr )
for event , runtime in events :
if event . startswith ( 'Initial connection' ) :
full_log . append ( '\t\t-------------------' )
else :
full_log . append ( '\t\t{:<66} Runtime: {:.2f}' . format ( event , runtime ) )
for row in full_log :
print ( row )
elif 'Connection: failed' in msgs :
stat [ 'Connection failed' ] . append ( p )
else :
stat [ 'Connection timeout' ] . append ( p )
if verbose :
print ( 'Stats:' )
pprint ( stat )
print ( 'The number of working proxies: %d' % num_working_proxies )
for proto , proxies in proxies_by_type . items ( ) :
print ( '%s (%s): %s' % ( proto , len ( proxies ) , proxies ) )
print ( 'Errors:' , errors ) |
def _get_path ( self , name ) :
"""Get the destination class path .
: param name : The name
: type name : str
: rtype : str""" | path = self . option ( "path" )
if path is None :
path = self . _get_seeders_path ( )
return os . path . join ( path , "%s.py" % name ) |
def _create_gate_variables ( self , input_shape , dtype ) :
"""Initialize the variables used for the gates .""" | if len ( input_shape ) != 2 :
raise ValueError ( "Rank of shape must be {} not: {}" . format ( 2 , len ( input_shape ) ) )
input_size = input_shape . dims [ 1 ] . value
b_shape = [ 4 * self . _hidden_size ]
equiv_input_size = self . _hidden_size + input_size
initializer = basic . create_linear_initializer ( equiv_input_size )
if self . _use_batch_norm_h or self . _use_batch_norm_x :
self . _w_h = tf . get_variable ( self . W_GATES + "_H" , shape = [ self . _hidden_size , 4 * self . _hidden_size ] , dtype = dtype , initializer = self . _initializers . get ( self . W_GATES , initializer ) , partitioner = self . _partitioners . get ( self . W_GATES ) , regularizer = self . _regularizers . get ( self . W_GATES ) )
self . _w_x = tf . get_variable ( self . W_GATES + "_X" , shape = [ input_size , 4 * self . _hidden_size ] , dtype = dtype , initializer = self . _initializers . get ( self . W_GATES , initializer ) , partitioner = self . _partitioners . get ( self . W_GATES ) , regularizer = self . _regularizers . get ( self . W_GATES ) )
else :
self . _w_xh = tf . get_variable ( self . W_GATES , shape = [ self . _hidden_size + input_size , 4 * self . _hidden_size ] , dtype = dtype , initializer = self . _initializers . get ( self . W_GATES , initializer ) , partitioner = self . _partitioners . get ( self . W_GATES ) , regularizer = self . _regularizers . get ( self . W_GATES ) )
self . _b = tf . get_variable ( self . B_GATES , shape = b_shape , dtype = dtype , initializer = self . _initializers . get ( self . B_GATES , initializer ) , partitioner = self . _partitioners . get ( self . B_GATES ) , regularizer = self . _regularizers . get ( self . B_GATES ) ) |
def create ( self , data ) :
"""Create object from the given data .
The given data may or may not have been validated prior to calling
this function . This function will try its best in creating the object .
If the resulting object cannot be produced , raises ` ` ValidationError ` ` .
The spec can affect how individual fields will be created by
implementing ` ` clean ( ) ` ` for the fields needing customization .
: param data : the data as a dictionary .
: return : instance of ` ` klass ` ` or dictionary .
: raises : ` ` ValidationError ` ` if factory is unable to create object .""" | # todo : copy - paste code from representation . validate - > refactor
if data is None :
return None
prototype = { }
errors = { }
# create and populate the prototype
for field_name , field_spec in self . spec . fields . items ( ) :
try :
value = self . _create_value ( data , field_name , self . spec )
except ValidationError , e :
if field_name not in self . default_create_values :
if hasattr ( e , 'message_dict' ) : # prefix error keys with top level field name
errors . update ( dict ( zip ( [ field_name + '.' + key for key in e . message_dict . keys ( ) ] , e . message_dict . values ( ) ) ) )
else :
errors [ field_name ] = e . messages
else :
key_name = self . property_name_map [ field_name ]
prototype [ key_name ] = value
# check extra fields
if self . prevent_extra_fields :
extras = set ( data . keys ( ) ) - set ( self . property_name_map . keys ( ) )
if extras :
errors [ ', ' . join ( extras ) ] = [ 'field(s) not allowed' ]
# if errors , raise ValidationError
if errors :
raise ValidationError ( errors )
# return dict or object based on the prototype
_data = deepcopy ( self . default_create_values )
_data . update ( prototype )
if self . klass :
instance = self . klass ( )
instance . __dict__ . update ( prototype )
return instance
else :
return prototype |
def cluster_autocomplete ( self , text , line , start_index , end_index ) :
"autocomplete for the use command , obtain list of clusters first" | if not self . CACHED_CLUSTERS :
clusters = [ cluster . name for cluster in api . get_all_clusters ( ) ]
self . CACHED_CLUSTERS = clusters
if text :
return [ cluster for cluster in self . CACHED_CLUSTERS if cluster . startswith ( text ) ]
else :
return self . CACHED_CLUSTERS |
def homeautoswitch ( self , cmd , ain = None , param = None ) :
"""Call a switch method .
Should only be used by internal library functions .""" | assert self . sid , "Not logged in"
params = { 'switchcmd' : cmd , 'sid' : self . sid , }
if param is not None :
params [ 'param' ] = param
if ain :
params [ 'ain' ] = ain
url = self . base_url + '/webservices/homeautoswitch.lua'
response = self . session . get ( url , params = params , timeout = 10 )
response . raise_for_status ( )
return response . text . strip ( ) . encode ( 'utf-8' ) |
def _spawn_redis_connection_thread ( self ) :
"""Spawns a redis connection thread""" | self . logger . debug ( "Spawn redis connection thread" )
self . redis_connected = False
self . _redis_thread = Thread ( target = self . _setup_redis )
self . _redis_thread . setDaemon ( True )
self . _redis_thread . start ( ) |
def values ( self ) :
"""Returns a list of values for this field for this instance . It ' s a list
so we can accomodate many - to - many fields .""" | # This import is deliberately inside the function because it causes
# some settings to be imported , and we don ' t want to do that at the
# module level .
if self . field . rel :
if isinstance ( self . field . rel , models . ManyToOneRel ) :
objs = getattr ( self . instance . instance , self . field . name )
elif isinstance ( self . field . rel , models . ManyToManyRel ) : # ManyToManyRel
return list ( getattr ( self . instance . instance , self . field . name ) . all ( ) )
elif self . field . choices :
objs = dict ( self . field . choices ) . get ( self . raw_value , EMPTY_VALUE )
elif isinstance ( self . field , models . DateField ) or isinstance ( self . field , models . TimeField ) :
if self . raw_value :
if isinstance ( self . field , models . DateTimeField ) :
objs = capfirst ( formats . date_format ( self . raw_value , 'DATETIME_FORMAT' ) )
elif isinstance ( self . field , models . TimeField ) :
objs = capfirst ( formats . time_format ( self . raw_value , 'TIME_FORMAT' ) )
else :
objs = capfirst ( formats . date_format ( self . raw_value , 'DATE_FORMAT' ) )
else :
objs = EMPTY_VALUE
elif isinstance ( self . field , models . BooleanField ) or isinstance ( self . field , models . NullBooleanField ) :
objs = { True : 'Yes' , False : 'No' , None : 'Unknown' } [ self . raw_value ]
else :
objs = self . raw_value
return [ objs ] |
def _run_transient ( self , t ) :
"""Performs a transient simulation according to the specified settings
updating ' b ' and calling ' _ t _ run _ reactive ' at each time step .
Stops after reaching the end time ' t _ final ' or after achieving the
specified tolerance ' t _ tolerance ' . Stores the initial and steady - state
( if obtained ) fields in addition to transient data ( according to the
specified ' t _ output ' ) .
Parameters
t : scalar
The time to start the simulation from .
Notes
Transient solutions are stored on the object under
` ` pore . quantity _ timeStepIndex ` ` where * quantity * is specified in the
` ` settings ` ` attribute . Initial field is stored as
` ` pore . quantity _ initial ` ` . Steady - state solution ( if reached ) is stored
as ` ` pore . quantity _ steady ` ` . Current solution is stored as
` ` pore . quantity ` ` .""" | tf = self . settings [ 't_final' ]
dt = self . settings [ 't_step' ]
to = self . settings [ 't_output' ]
tol = self . settings [ 't_tolerance' ]
t_pre = self . settings [ 't_precision' ]
s = self . settings [ 't_scheme' ]
res_t = 1e+06
# Initialize the residual
if not isinstance ( to , list ) : # Make sure ' tf ' and ' to ' are multiples of ' dt '
tf = tf + ( dt - ( tf % dt ) ) * ( ( tf % dt ) != 0 )
to = to + ( dt - ( to % dt ) ) * ( ( to % dt ) != 0 )
self . settings [ 't_final' ] = tf
self . settings [ 't_output' ] = to
out = np . arange ( t + to , tf , to )
else :
out = np . array ( to )
out = np . append ( out , tf )
out = np . unique ( out )
out = np . around ( out , decimals = t_pre )
if ( s == 'steady' ) : # If solver in steady mode , do one iteration
logger . info ( ' Running in steady mode' )
x_old = self [ self . settings [ 'quantity' ] ]
self . _t_run_reactive ( x = x_old )
x_new = self [ self . settings [ 'quantity' ] ]
else : # Do time iterations
# Export the initial field ( t = t _ initial )
n = int ( - dc ( str ( round ( t , t_pre ) ) ) . as_tuple ( ) . exponent * ( round ( t , t_pre ) != int ( t ) ) )
t_str = ( str ( int ( round ( t , t_pre ) * 10 ** n ) ) + ( 'e-' + str ( n ) ) * ( n != 0 ) )
quant_init = self [ self . settings [ 'quantity' ] ]
self [ self . settings [ 'quantity' ] + '@' + t_str ] = quant_init
for time in np . arange ( t + dt , tf + dt , dt ) :
if ( res_t >= tol ) : # Check if the steady state is reached
logger . info ( ' Current time step: ' + str ( time ) + ' s' )
x_old = self [ self . settings [ 'quantity' ] ]
self . _t_run_reactive ( x = x_old )
x_new = self [ self . settings [ 'quantity' ] ]
# Compute the residual
res_t = np . sum ( np . absolute ( x_old ** 2 - x_new ** 2 ) )
logger . info ( ' Residual: ' + str ( res_t ) )
# Output transient solutions . Round time to ensure every
# value in outputs is exported .
if round ( time , t_pre ) in out :
n = int ( - dc ( str ( round ( time , t_pre ) ) ) . as_tuple ( ) . exponent * ( round ( time , t_pre ) != int ( time ) ) )
t_str = ( str ( int ( round ( time , t_pre ) * 10 ** n ) ) + ( 'e-' + str ( n ) ) * ( n != 0 ) )
self [ self . settings [ 'quantity' ] + '@' + t_str ] = x_new
logger . info ( ' Exporting time step: ' + str ( time ) + ' s' )
# Update A and b and apply BCs
self . _t_update_A ( )
self . _t_update_b ( )
self . _apply_BCs ( )
self . _A_t = ( self . _A ) . copy ( )
self . _b_t = ( self . _b ) . copy ( )
else : # Stop time iterations if residual < t _ tolerance
# Output steady state solution
n = int ( - dc ( str ( round ( time , t_pre ) ) ) . as_tuple ( ) . exponent * ( round ( time , t_pre ) != int ( time ) ) )
t_str = ( str ( int ( round ( time , t_pre ) * 10 ** n ) ) + ( 'e-' + str ( n ) ) * ( n != 0 ) )
self [ self . settings [ 'quantity' ] + '@' + t_str ] = x_new
logger . info ( ' Exporting time step: ' + str ( time ) + ' s' )
break
if ( round ( time , t_pre ) == tf ) :
logger . info ( ' Maximum time step reached: ' + str ( time ) + ' s' )
else :
logger . info ( ' Transient solver converged after: ' + str ( time ) + ' s' ) |
def handle_profile_save ( self , sender , instance , ** kwargs ) :
"""Custom handler for user profile save""" | self . handle_save ( instance . user . __class__ , instance . user ) |
def create_manager ( self , instance , superclass ) :
"""Dynamically create a RelatedManager to handle the back side of the ( G ) FK""" | rel_model = self . rating_model
rated_model = self . rated_model
class RelatedManager ( superclass ) :
def get_query_set ( self ) :
qs = RatingsQuerySet ( rel_model , rated_model = rated_model )
return qs . filter ( ** ( self . core_filters ) )
def add ( self , * objs ) :
lookup_kwargs = rel_model . lookup_kwargs ( instance )
for obj in objs :
if not isinstance ( obj , self . model ) :
raise TypeError ( "'%s' instance expected" % self . model . _meta . object_name )
for ( k , v ) in lookup_kwargs . iteritems ( ) :
setattr ( obj , k , v )
obj . save ( )
add . alters_data = True
def create ( self , ** kwargs ) :
kwargs . update ( rel_model . lookup_kwargs ( instance ) )
return super ( RelatedManager , self ) . create ( ** kwargs )
create . alters_data = True
def get_or_create ( self , ** kwargs ) :
kwargs . update ( rel_model . lookup_kwargs ( instance ) )
return super ( RelatedManager , self ) . get_or_create ( ** kwargs )
get_or_create . alters_data = True
def remove ( self , * objs ) :
for obj in objs : # Is obj actually part of this descriptor set ?
if obj in self . all ( ) :
obj . delete ( )
else :
raise rel_model . DoesNotExist ( "%r is not related to %r." % ( obj , instance ) )
remove . alters_data = True
def clear ( self ) :
self . all ( ) . delete ( )
clear . alters_data = True
def rate ( self , user , score ) :
rating , created = self . get_or_create ( user = user )
if created or score != rating . score :
rating . score = score
rating . save ( )
return rating
def unrate ( self , user ) :
return self . filter ( user = user , ** rel_model . lookup_kwargs ( instance ) ) . delete ( )
def perform_aggregation ( self , aggregator ) :
score = self . all ( ) . aggregate ( agg = aggregator ( 'score' ) )
return score [ 'agg' ]
def cumulative_score ( self ) : # simply the sum of all scores , useful for + 1 / - 1
return self . perform_aggregation ( models . Sum )
def average_score ( self ) : # the average of all the scores , useful for 1-5
return self . perform_aggregation ( models . Avg )
def standard_deviation ( self ) : # the standard deviation of all the scores , useful for 1-5
return self . perform_aggregation ( models . StdDev )
def variance ( self ) : # the variance of all the scores , useful for 1-5
return self . perform_aggregation ( models . Variance )
def similar_items ( self ) :
return SimilarItem . objects . get_for_item ( instance )
manager = RelatedManager ( )
manager . core_filters = rel_model . lookup_kwargs ( instance )
manager . model = rel_model
return manager |
def get_query_uri ( self ) :
"""Return the uri used for queries on time series data .""" | # Query URI has extra path we don ' t want so strip it off here
query_uri = self . service . settings . data [ 'query' ] [ 'uri' ]
query_uri = urlparse ( query_uri )
return query_uri . scheme + '://' + query_uri . netloc |
def item ( self ) :
"""ToDo
> > > from hydpy . core . examples import prepare _ full _ example _ 1
> > > prepare _ full _ example _ 1 ( )
> > > from hydpy import HydPy , TestIO , XMLInterface , pub
> > > hp = HydPy ( ' LahnH ' )
> > > pub . timegrids = ' 1996-01-01 ' , ' 1996-01-06 ' , ' 1d '
> > > with TestIO ( ) :
. . . hp . prepare _ everything ( )
. . . interface = XMLInterface ( ' multiple _ runs . xml ' )
> > > var = interface . exchange . itemgroups [ 0 ] . models [ 0 ] . subvars [ 0 ] . vars [ 0]
> > > item = var . item
> > > item . value
array ( 2.0)
> > > hp . elements . land _ dill . model . parameters . control . alpha
alpha ( 1.0)
> > > item . update _ variables ( )
> > > hp . elements . land _ dill . model . parameters . control . alpha
alpha ( 2.0)
> > > var = interface . exchange . itemgroups [ 0 ] . models [ 2 ] . subvars [ 0 ] . vars [ 0]
> > > item = var . item
> > > item . value
array ( 5.0)
> > > hp . elements . stream _ dill _ lahn _ 2 . model . parameters . control . lag
lag ( 0.0)
> > > item . update _ variables ( )
> > > hp . elements . stream _ dill _ lahn _ 2 . model . parameters . control . lag
lag ( 5.0)
> > > var = interface . exchange . itemgroups [ 1 ] . models [ 0 ] . subvars [ 0 ] . vars [ 0]
> > > item = var . item
> > > item . name
' sm _ lahn _ 2'
> > > item . value
array ( 123.0)
> > > hp . elements . land _ lahn _ 2 . model . sequences . states . sm
sm ( 138.31396 , 135.71124 , 147.54968 , 145.47142 , 154.96405 , 153.32805,
160.91917 , 159.62434 , 165.65575 , 164.63255)
> > > item . update _ variables ( )
> > > hp . elements . land _ lahn _ 2 . model . sequences . states . sm
sm ( 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0)
> > > var = interface . exchange . itemgroups [ 1 ] . models [ 0 ] . subvars [ 0 ] . vars [ 1]
> > > item = var . item
> > > item . name
' sm _ lahn _ 1'
> > > item . value
array ( [ 110 . , 120 . , 130 . , 140 . , 150 . , 160 . , 170 . , 180 . , 190 . ,
200 . , 210 . , 220 . , 230 . ] )
> > > hp . elements . land _ lahn _ 1 . model . sequences . states . sm
sm ( 99.27505 , 96.17726 , 109.16576 , 106.39745 , 117.97304 , 115.56252,
125.81523 , 123.73198 , 132.80035 , 130.91684 , 138.95523 , 137.25983,
142.84148)
> > > from hydpy import pub
> > > with pub . options . warntrim ( False ) :
. . . item . update _ variables ( )
> > > hp . elements . land _ lahn _ 1 . model . sequences . states . sm
sm ( 110.0 , 120.0 , 130.0 , 140.0 , 150.0 , 160.0 , 170.0 , 180.0 , 190.0 , 200.0,
206.0 , 206.0 , 206.0)
> > > for element in pub . selections . headwaters . elements :
. . . element . model . parameters . control . rfcf ( 1.1)
> > > for element in pub . selections . nonheadwaters . elements :
. . . element . model . parameters . control . rfcf ( 1.0)
> > > for subvars in interface . exchange . itemgroups [ 2 ] . models [ 0 ] . subvars :
. . . for var in subvars . vars :
. . . var . item . update _ variables ( )
> > > for element in hp . elements . catchment :
. . . print ( element , repr ( element . model . parameters . control . sfcf ) )
land _ dill sfcf ( 1.4)
land _ lahn _ 1 sfcf ( 1.4)
land _ lahn _ 2 sfcf ( 1.2)
land _ lahn _ 3 sfcf ( field = 1.1 , forest = 1.2)
> > > var = interface . exchange . itemgroups [ 3 ] . models [ 0 ] . subvars [ 1 ] . vars [ 0]
> > > hp . elements . land _ dill . model . sequences . states . sm = 1.0
> > > for name , target in var . item . yield _ name2value ( ) :
. . . print ( name , target ) # doctest : + ELLIPSIS
land _ dill _ states _ sm [ 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0 , 1.0]
land _ lahn _ 1 _ states _ sm [ 110.0 , 120.0 , 130.0 , 140.0 , 150.0 , 160.0 , 170.0 , 180.0 , 190.0 , 200.0 , 206.0 , 206.0 , 206.0]
land _ lahn _ 2 _ states _ sm [ 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0 , 123.0]
land _ lahn _ 3 _ states _ sm [ 101.3124 . . . ]
> > > vars _ = interface . exchange . itemgroups [ 3 ] . models [ 0 ] . subvars [ 0 ] . vars
> > > qt = hp . elements . land _ dill . model . sequences . fluxes . qt
> > > qt ( 1.0)
> > > qt . series = 2.0
> > > for var in vars _ :
. . . for name , target in var . item . yield _ name2value ( ) :
. . . print ( name , target ) # doctest : + ELLIPSIS
land _ dill _ fluxes _ qt 1.0
land _ dill _ fluxes _ qt _ series [ 2.0 , 2.0 , 2.0 , 2.0 , 2.0]
> > > var = interface . exchange . itemgroups [ 3 ] . nodes [ 0 ] . vars [ 0]
> > > hp . nodes . dill . sequences . sim . series = range ( 5)
> > > for name , target in var . item . yield _ name2value ( ) :
. . . print ( name , target ) # doctest : + ELLIPSIS
dill _ nodes _ sim _ series [ 0.0 , 1.0 , 2.0 , 3.0 , 4.0]
> > > for name , target in var . item . yield _ name2value ( 2 , 4 ) :
. . . print ( name , target ) # doctest : + ELLIPSIS
dill _ nodes _ sim _ series [ 2.0 , 3.0]""" | target = f'{self.master.name}.{self.name}'
if self . master . name == 'nodes' :
master = self . master . name
itemgroup = self . master . master . name
else :
master = self . master . master . name
itemgroup = self . master . master . master . name
itemclass = _ITEMGROUP2ITEMCLASS [ itemgroup ]
if itemgroup == 'getitems' :
return self . _get_getitem ( target , master , itemclass )
return self . _get_changeitem ( target , master , itemclass , itemgroup ) |
def cmd_fft ( args ) :
'''display fft from log''' | from MAVProxy . modules . lib import mav_fft
if len ( args ) > 0 :
condition = args [ 0 ]
else :
condition = None
child = multiproc . Process ( target = mav_fft . mavfft_display , args = [ mestate . filename , condition ] )
child . start ( ) |
def sample_radius ( self , n ) :
"""Sample the radial distribution ( deg ) from the 2D stellar density .
Output is elliptical radius in true projected coordinates .""" | edge = self . edge if self . edge < 20 * self . extension else 20 * self . extension
radius = np . linspace ( 0 , edge , 1.e5 )
pdf = self . _pdf ( radius ) * np . sin ( np . radians ( radius ) )
cdf = np . cumsum ( pdf )
cdf /= cdf [ - 1 ]
fn = scipy . interpolate . interp1d ( cdf , list ( range ( 0 , len ( cdf ) ) ) )
index = np . floor ( fn ( np . random . uniform ( size = n ) ) ) . astype ( int )
return radius [ index ] |
def _symbol_extract ( self , regex , plus = True , brackets = False ) :
"""Extracts a symbol or full symbol from the current line ,
optionally including the character under the cursor .
: arg regex : the compiled regular expression to use for extraction .
: arg plus : when true , the character under the cursor * is * included .
: arg brackets : when true , matching pairs of brackets are first removed
before the regex is run .""" | charplus = self . pos [ 1 ] + ( 1 if plus else - 1 )
consider = self . current_line [ : charplus ] [ : : - 1 ]
# We want to remove matching pairs of brackets so that derived types
# that have arrays still get intellisense .
if brackets == True : # The string has already been reversed , just run through it .
rightb = [ ]
lastchar = None
for i in range ( len ( consider ) ) :
if consider [ i ] == ")" :
rightb . append ( i )
elif consider [ i ] == "(" and len ( rightb ) > 0 :
lastchar = i
rightb . pop ( )
if lastchar is not None :
consider = '%' + consider [ lastchar + 1 : ]
rematch = regex . match ( consider )
if rematch is not None :
return rematch . group ( "symbol" ) [ : : - 1 ]
else :
return "" |
def read_dict ( self , dictionary , source = '<dict>' ) :
"""Read configuration from a dictionary .
Keys are section names , values are dictionaries with keys and values
that should be present in the section . If the used dictionary type
preserves order , sections and their keys will be added in order .
All types held in the dictionary are converted to strings during
reading , including section names , option names and keys .
Optional second argument is the ` source ' specifying the name of the
dictionary being read .""" | elements_added = set ( )
for section , keys in dictionary . items ( ) :
section = str ( section )
try :
self . add_section ( section )
except ( DuplicateSectionError , ValueError ) :
if self . _strict and section in elements_added :
raise
elements_added . add ( section )
for key , value in keys . items ( ) :
key = self . optionxform ( str ( key ) )
if value is not None :
value = str ( value )
if self . _strict and ( section , key ) in elements_added :
raise DuplicateOptionError ( section , key , source )
elements_added . add ( ( section , key ) )
self . set ( section , key , value ) |
def predict_proba ( self , X ) :
"""Probability estimates .
The returned estimates for all classes are ordered by the
label of classes .
Parameters
X : List of ndarrays , one for each training example .
Each training example ' s shape is ( string1 _ len , string2 _ len , n _ features , where
string1 _ len and string2 _ len are the length of the two training strings and n _ features the
number of features .
Returns
T : array - like , shape = [ n _ samples , n _ classes ]
Returns the probability of the sample for each class in the model ,
where classes are ordered as they are in ` ` self . classes _ ` ` .""" | parameters = np . ascontiguousarray ( self . parameters . T )
predictions = [ _Model ( self . _state_machine , x ) . predict ( parameters , self . viterbi ) for x in X ]
predictions = np . array ( [ [ probability for _ , probability in sorted ( prediction . items ( ) ) ] for prediction in predictions ] )
return predictions |
def _merge_includes ( self ) :
"""If " include " option exists in " default . cfg " ,
read the file ( glob - match ) in the directory .""" | raw_include_path = self . get_global_include ( )
if raw_include_path :
abs_include_path = self . _get_global_include_abs_path ( raw_include_path )
self . _validate_global_include ( abs_include_path )
self . set_global_include ( abs_include_path )
for infile in glob . glob ( abs_include_path ) :
self . config . merge ( self . _configobj_factory ( infile = infile ) ) |
def _set_replicator ( self , v , load = False ) :
"""Setter method for replicator , mapped from YANG variable / tunnel _ settings / system / tunnel / replicator ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ replicator is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ replicator ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = replicator . replicator , is_container = 'container' , presence = False , yang_name = "replicator" , rest_name = "replicator" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'NSX replicator tunnel related settings' , u'hidden' : u'debug' , u'cli-incomplete-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-tunnels' , defining_module = 'brocade-tunnels' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """replicator must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=replicator.replicator, is_container='container', presence=False, yang_name="replicator", rest_name="replicator", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'NSX replicator tunnel related settings', u'hidden': u'debug', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='container', is_config=True)""" , } )
self . __replicator = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def project_tags ( self ) :
"""List all git tags made to the project .
: return :""" | request_url = "{}git/tags" . format ( self . create_basic_url ( ) )
return_value = self . _call_api ( request_url )
return return_value [ 'tags' ] |
def upper ( self ) :
"""Returns this query with the Upper function added to its list .
: return < Query >""" | q = self . copy ( )
q . addFunction ( Query . Function . Upper )
return q |
def selection ( self ) :
"""A complete | Selection | object of all " supplying " and " routing "
elements and required nodes .
> > > from hydpy import RiverBasinNumbers2Selection
> > > rbns2s = RiverBasinNumbers2Selection (
. . . ( 111 , 113 , 1129 , 11269 , 1125 , 11261,
. . . 11262 , 1123 , 1124 , 1122 , 1121 ) )
> > > rbns2s . selection
Selection ( " complete " ,
nodes = ( " node _ 1123 " , " node _ 1125 " , " node _ 11269 " , " node _ 1129 " ,
" node _ 113 " , " node _ outlet " ) ,
elements = ( " land _ 111 " , " land _ 1121 " , " land _ 1122 " , " land _ 1123 " ,
" land _ 1124 " , " land _ 1125 " , " land _ 11261 " ,
" land _ 11262 " , " land _ 11269 " , " land _ 1129 " ,
" land _ 113 " , " stream _ 1123 " , " stream _ 1125 " ,
" stream _ 11269 " , " stream _ 1129 " , " stream _ 113 " ) )
Besides the possible modifications on the names of the different
nodes and elements , the name of the selection can be set differently :
> > > rbns2s . selection _ name = ' sel '
> > > from hydpy import pub
> > > with pub . options . ellipsis ( 1 ) :
. . . print ( repr ( rbns2s . selection ) )
Selection ( " sel " ,
nodes = ( " node _ 1123 " , . . . , " node _ outlet " ) ,
elements = ( " land _ 111 " , . . . , " stream _ 113 " ) )""" | return selectiontools . Selection ( self . selection_name , self . nodes , self . elements ) |
def setCompactProtocol ( self ) :
"""Set the compact protocol .""" | self . _compact = True
self . _serial . write ( bytes ( self . _BAUD_DETECT ) )
self . _log and self . _log . debug ( "Compact protocol has been set." ) |
def simplify ( self ) :
"""Simplify the Expr .""" | d = defaultdict ( float )
for term in self . terms :
term = term . simplify ( )
d [ term . ops ] += term . coeff
return Expr . from_terms_iter ( Term . from_ops_iter ( k , d [ k ] ) for k in sorted ( d , key = repr ) if d [ k ] ) |
def decode_cli_arg ( arg ) :
"""Turn a bytestring provided by ` argparse ` into unicode .
: param arg : The bytestring to decode .
: return : The argument as a unicode object .
: raises ValueError : If arg is None .""" | if arg is None :
raise ValueError ( 'Argument cannot be None' )
if sys . version_info . major == 3 : # already decoded
return arg
return arg . decode ( sys . getfilesystemencoding ( ) ) |
def to_range_strings ( seglist ) :
"""Turn a segment list into a list of range strings as could be parsed
by from _ range _ strings ( ) . A typical use for this function is in
machine - generating configuration files or command lines for other
programs .
Example :
> > > from pycbc _ glue . segments import *
> > > segs = segmentlist ( [ segment ( 0 , 10 ) , segment ( 35 , 35 ) , segment ( 100 , infinity ( ) ) ] )
> > > " , " . join ( to _ range _ strings ( segs ) )
'0:10,35,100 : '""" | # preallocate the string list
ranges = [ None ] * len ( seglist )
# iterate over segments
for i , seg in enumerate ( seglist ) :
if not seg :
ranges [ i ] = str ( seg [ 0 ] )
elif ( seg [ 0 ] is segments . NegInfinity ) and ( seg [ 1 ] is segments . PosInfinity ) :
ranges [ i ] = ":"
elif ( seg [ 0 ] is segments . NegInfinity ) and ( seg [ 1 ] is not segments . PosInfinity ) :
ranges [ i ] = ":%s" % str ( seg [ 1 ] )
elif ( seg [ 0 ] is not segments . NegInfinity ) and ( seg [ 1 ] is segments . PosInfinity ) :
ranges [ i ] = "%s:" % str ( seg [ 0 ] )
elif ( seg [ 0 ] is not segments . NegInfinity ) and ( seg [ 1 ] is not segments . PosInfinity ) :
ranges [ i ] = "%s:%s" % ( str ( seg [ 0 ] ) , str ( seg [ 1 ] ) )
else :
raise ValueError ( seg )
# success
return ranges |
def _compute_probabilities ( miner_data , wait_blocks , sample_size ) :
"""Computes the probabilities that a txn will be accepted at each of the gas
prices accepted by the miners .""" | miner_data_by_price = tuple ( sorted ( miner_data , key = operator . attrgetter ( 'low_percentile_gas_price' ) , reverse = True , ) )
for idx in range ( len ( miner_data_by_price ) ) :
low_percentile_gas_price = miner_data_by_price [ idx ] . low_percentile_gas_price
num_blocks_accepting_price = sum ( m . num_blocks for m in miner_data_by_price [ idx : ] )
inv_prob_per_block = ( sample_size - num_blocks_accepting_price ) / sample_size
probability_accepted = 1 - inv_prob_per_block ** wait_blocks
yield Probability ( low_percentile_gas_price , probability_accepted ) |
def get_drop_index_sql ( self , index , table = None ) :
"""Returns the SQL to drop an index from a table .
: param index : The index
: type index : Index or str
: param table : The table
: type table : Table or str or None
: rtype : str""" | if isinstance ( index , Index ) :
index = index . get_quoted_name ( self )
return "DROP INDEX %s" % index |
def _from_rest_reject_update ( model ) :
"""Reject any field updates not allowed on POST
This is done on fields with ` reject _ update = True ` .""" | dirty = model . dirty_fields
fields = model . get_fields_by_prop ( 'reject_update' , True )
reject = [ ]
for field in fields :
if field in dirty :
reject . append ( field )
if reject :
mod_fail ( 'These fields cannot be updated: %s' % ', ' . join ( reject ) ) |
def div ( self , y ) :
r"""Compute the divergence of a signal defined on the edges .
The divergence : math : ` z ` of a signal : math : ` y ` is defined as
. . math : : z = \ operatorname { div } _ \ mathcal { G } y = D y ,
where : math : ` D ` is the differential operator : attr : ` D ` .
The value of the divergence on the vertex : math : ` v _ i ` is
. . math : : z [ i ] = \ sum _ k D [ i , k ] y [ k ]
= \ sum _ { \ { k , j | e _ k = ( v _ j , v _ i ) \ in \ mathcal { E } \ } }
\ sqrt { \ frac { W [ j , i ] } { 2 } } y [ k ]
- \ sum _ { \ { k , j | e _ k = ( v _ i , v _ j ) \ in \ mathcal { E } \ } }
\ sqrt { \ frac { W [ i , j ] } { 2 } } y [ k ]
for the combinatorial Laplacian , and
. . math : : z [ i ] = \ sum _ k D [ i , k ] y [ k ]
= \ sum _ { \ { k , j | e _ k = ( v _ j , v _ i ) \ in \ mathcal { E } \ } }
\ sqrt { \ frac { W [ j , i ] } { 2 d [ i ] } } y [ k ]
- \ sum _ { \ { k , j | e _ k = ( v _ i , v _ j ) \ in \ mathcal { E } \ } }
\ sqrt { \ frac { W [ i , j ] } { 2 d [ i ] } } y [ k ]
for the normalized Laplacian .
For undirected graphs , only half the edges are kept and the
: math : ` 1 / \ sqrt { 2 } ` factor disappears from the above equations . See
: meth : ` compute _ differential _ operator ` for details .
Parameters
y : array _ like
Signal of length : attr : ` n _ edges ` living on the edges .
Returns
z : ndarray
Divergence signal of length : attr : ` n _ vertices ` living on the
vertices .
See Also
compute _ differential _ operator
grad : compute the gradient of a vertex signal
Examples
Non - directed graph and combinatorial Laplacian :
> > > graph = graphs . Path ( 4 , directed = False , lap _ type = ' combinatorial ' )
> > > graph . compute _ differential _ operator ( )
> > > graph . div ( [ 2 , - 2 , 0 ] )
array ( [ - 2 . , 4 . , - 2 . , 0 . ] )
Directed graph and combinatorial Laplacian :
> > > graph = graphs . Path ( 4 , directed = True , lap _ type = ' combinatorial ' )
> > > graph . compute _ differential _ operator ( )
> > > graph . div ( [ 2 , - 2 , 0 ] )
array ( [ - 1.41421356 , 2.82842712 , - 1.41421356 , 0 . ] )
Non - directed graph and normalized Laplacian :
> > > graph = graphs . Path ( 4 , directed = False , lap _ type = ' normalized ' )
> > > graph . compute _ differential _ operator ( )
> > > graph . div ( [ 2 , - 2 , 0 ] )
array ( [ - 2 . , 2.82842712 , - 1.41421356 , 0 . ] )
Directed graph and normalized Laplacian :
> > > graph = graphs . Path ( 4 , directed = True , lap _ type = ' normalized ' )
> > > graph . compute _ differential _ operator ( )
> > > graph . div ( [ 2 , - 2 , 0 ] )
array ( [ - 2 . , 2.82842712 , - 1.41421356 , 0 . ] )""" | y = np . asanyarray ( y )
if y . shape [ 0 ] != self . Ne :
raise ValueError ( 'First dimension must be the number of edges ' 'G.Ne = {}, got {}.' . format ( self . Ne , y . shape ) )
return self . D . dot ( y ) |
def get_id_from_cstring ( name ) :
"""Return variable type id given the C - storage name""" | for key in list ( LogTocElement . types . keys ( ) ) :
if ( LogTocElement . types [ key ] [ 0 ] == name ) :
return key
raise KeyError ( 'Type [%s] not found in LogTocElement.types!' % name ) |
def apply_flat ( self , config , namespace_separator = '_' , prefix = '' ) : # type : ( Dict [ str , Any ] , str , str ) - > None
"""Apply additional configuration from a flattened dictionary
This will look for dictionary items that match flattened keys from base _ config and apply their values on the
current configuration object .
This can be useful for applying configuration from environment variables and flat configuration file formats
such as INI files .""" | self . _init_flat_pointers ( )
for key_stack , ( container , orig_key ) in self . _flat_pointers . items ( ) :
flat_key = '{prefix}{joined_key}' . format ( prefix = prefix , joined_key = namespace_separator . join ( key_stack ) )
if flat_key in config :
container [ orig_key ] = config [ flat_key ] |
def record ( self , tags , measurement_map , timestamp , attachments = None ) :
"""records stats with a set of tags""" | assert all ( vv >= 0 for vv in measurement_map . values ( ) )
for measure , value in measurement_map . items ( ) :
if measure != self . _registered_measures . get ( measure . name ) :
return
view_datas = [ ]
for measure_name , view_data_list in self . _measure_to_view_data_list_map . items ( ) :
if measure_name == measure . name :
view_datas . extend ( view_data_list )
for view_data in view_datas :
view_data . record ( context = tags , value = value , timestamp = timestamp , attachments = attachments )
self . export ( view_datas ) |
def minizinc_version ( ) :
"""Returns the version of the found minizinc executable .""" | vs = _run_minizinc ( '--version' )
m = re . findall ( 'version ([\d\.]+)' , vs )
if not m :
raise RuntimeError ( 'MiniZinc executable not found.' )
return m [ 0 ] |
def read_pattern ( text_str , patterns , terminate_on_match = False , postprocess = str ) :
"""General pattern reading on an input string
Args :
text _ str ( str ) : the input string to search for patterns
patterns ( dict ) : A dict of patterns , e . g . ,
{ " energy " : r " energy \\ ( sigma - > 0 \\ ) \\ s + = \\ s + ( [ \\ d \\ - . ] + ) " } .
terminate _ on _ match ( bool ) : Whether to terminate when there is at
least one match in each key in pattern .
postprocess ( callable ) : A post processing function to convert all
matches . Defaults to str , i . e . , no change .
Renders accessible :
Any attribute in patterns . For example ,
{ " energy " : r " energy \\ ( sigma - > 0 \\ ) \\ s + = \\ s + ( [ \\ d \\ - . ] + ) " } will set the
value of matches [ " energy " ] = [ [ - 1234 ] , [ - 3453 ] , . . . ] , to the
results from regex and postprocess . Note that the returned values
are lists of lists , because you can grep multiple items on one line .""" | compiled = { key : re . compile ( pattern , re . MULTILINE | re . DOTALL ) for key , pattern in patterns . items ( ) }
matches = defaultdict ( list )
for key , pattern in compiled . items ( ) :
for match in pattern . finditer ( text_str ) :
matches [ key ] . append ( [ postprocess ( i ) for i in match . groups ( ) ] )
if terminate_on_match :
break
return matches |
def __crawler_stop ( self ) :
"""Mark the crawler as stopped .
Note :
If : attr : ` _ _ stopped ` is True , the main thread will be stopped . Every piece of code that gets
executed after : attr : ` _ _ stopped ` is True could cause Thread exceptions and or race conditions .""" | if self . __stopping :
return
self . __stopping = True
self . __wait_for_current_threads ( )
self . queue . move_bulk ( [ QueueItem . STATUS_QUEUED , QueueItem . STATUS_IN_PROGRESS ] , QueueItem . STATUS_CANCELLED )
self . __crawler_finish ( )
self . __stopped = True |
def PushTask ( self , task ) :
"""Pushes a task onto the heap .
Args :
task ( Task ) : task .
Raises :
ValueError : if the size of the storage file is not set in the task .""" | storage_file_size = getattr ( task , 'storage_file_size' , None )
if not storage_file_size :
raise ValueError ( 'Task storage file size not set.' )
if task . file_entry_type == dfvfs_definitions . FILE_ENTRY_TYPE_DIRECTORY :
weight = 1
else :
weight = storage_file_size
task . merge_priority = weight
heap_values = ( weight , task )
heapq . heappush ( self . _heap , heap_values )
self . _task_identifiers . add ( task . identifier ) |
def toXml ( cls , data , xparent = None ) :
"""Converts the inputted element to a Python object by looking through
the IO addons for the element ' s tag .
: param data | < variant >
xparent | < xml . etree . ElementTree . Element > | | None
: return < xml . etree . ElementTree . Element >""" | if data is None :
return None
# store XmlObjects separately from base types
if isinstance ( data , XmlObject ) :
name = 'object'
else :
name = type ( data ) . __name__
addon = cls . byName ( name )
if not addon :
raise RuntimeError ( '{0} is not a supported XML tag' . format ( name ) )
return addon . save ( data , xparent ) |
def encrypt ( payload , public_key ) :
"""Encrypt a payload using an encrypted JSON wrapper .
See : https : / / diaspora . github . io / diaspora _ federation / federation / encryption . html
: param payload : Payload document as a string .
: param public _ key : Public key of recipient as an RSA object .
: return : Encrypted JSON wrapper as dict .""" | iv , key , encrypter = EncryptedPayload . get_iv_key_encrypter ( )
aes_key_json = EncryptedPayload . get_aes_key_json ( iv , key )
cipher = PKCS1_v1_5 . new ( public_key )
aes_key = b64encode ( cipher . encrypt ( aes_key_json ) )
padded_payload = pkcs7_pad ( payload . encode ( "utf-8" ) , AES . block_size )
encrypted_me = b64encode ( encrypter . encrypt ( padded_payload ) )
return { "aes_key" : aes_key . decode ( "utf-8" ) , "encrypted_magic_envelope" : encrypted_me . decode ( "utf8" ) , } |
def B4PB ( self ) :
'''判斷是否為四大買點''' | return self . ckMinsGLI and ( self . B1 or self . B2 or self . B3 or self . B4 ) |
def count ( self ) :
"""count : get number of nodes in tree
Args : None
Returns : int""" | total = len ( self . children )
for child in self . children :
total += child . count ( )
return total |
def _execute_task ( self , * , dependency , input_args , intermediate_results , monitor ) :
"""Executes a task of the workflow .
: param dependency : A workflow dependency
: type dependency : Dependency
: param input _ args : External task parameters .
: type input _ args : dict
: param intermediate _ results : The dictionary containing intermediate results , including the results of all
tasks that the current task depends on .
: type intermediate _ results : dict
: return : The result of the task in dependency
: rtype : object""" | task = dependency . task
inputs = tuple ( intermediate_results [ self . uuid_dict [ input_task . private_task_config . uuid ] ] for input_task in dependency . inputs )
kw_inputs = input_args . get ( task , { } )
if isinstance ( kw_inputs , tuple ) :
inputs += kw_inputs
kw_inputs = { }
LOGGER . debug ( "Computing %s(*%s, **%s)" , str ( task ) , str ( inputs ) , str ( kw_inputs ) )
return task ( * inputs , ** kw_inputs , monitor = monitor ) |
def optimize_no ( self ) :
'''all options set to default''' | self . optimization = 0
self . relax = False
self . gc_sections = False
self . ffunction_sections = False
self . fdata_sections = False
self . fno_inline_small_functions = False |
def tag_details ( tag , nodenames ) :
"""Used in media and graphics to extract data from their parent tags""" | details = { }
details [ 'type' ] = tag . name
details [ 'ordinal' ] = tag_ordinal ( tag )
# Ordinal value
if tag_details_sibling_ordinal ( tag ) :
details [ 'sibling_ordinal' ] = tag_details_sibling_ordinal ( tag )
# Asset name
if tag_details_asset ( tag ) :
details [ 'asset' ] = tag_details_asset ( tag )
object_id_tag = first ( raw_parser . object_id ( tag , pub_id_type = "doi" ) )
if object_id_tag :
details [ 'component_doi' ] = extract_component_doi ( tag , nodenames )
return details |
def add_project ( self , ) :
"""Add a project and store it in the self . projects
: returns : None
: rtype : None
: raises : None""" | i = self . prj_tablev . currentIndex ( )
item = i . internalPointer ( )
if item :
project = item . internal_data ( )
if self . _atype :
self . _atype . projects . add ( project )
elif self . _dep :
self . _dep . projects . add ( project )
else :
project . users . add ( self . _user )
self . projects . append ( project )
item . set_parent ( None ) |
def get_text ( self ) :
"""Get the text in its current state .""" | return u'' . join ( u'{0}' . format ( b ) for b in self . text ) |
def ordered ( start , edges , predicate = None , inverse = False ) :
"""Depth first edges from a SciGraph response .""" | s , o = 'sub' , 'obj'
if inverse :
s , o = o , s
for edge in edges :
if predicate is not None and edge [ 'pred' ] != predicate :
print ( 'scoop!' )
continue
if edge [ s ] == start :
yield edge
yield from Graph . ordered ( edge [ o ] , edges , predicate = predicate ) |
def build_scope ( resource , method ) :
"""Compute the name of the scope for oauth
: param Resource resource : the resource manager
: param str method : an http method
: return str : the name of the scope""" | if ResourceList in inspect . getmro ( resource ) and method == 'GET' :
prefix = 'list'
else :
method_to_prefix = { 'GET' : 'get' , 'POST' : 'create' , 'PATCH' : 'update' , 'DELETE' : 'delete' }
prefix = method_to_prefix [ method ]
if ResourceRelationship in inspect . getmro ( resource ) :
prefix = '_' . join ( [ prefix , 'relationship' ] )
return '_' . join ( [ prefix , resource . schema . opts . type_ ] ) |
def _read_name ( self , bufr , idx , strings_offset ) :
"""Return a ( platform _ id , name _ id , name ) 3 - tuple like ( 0 , 1 , ' Arial ' )
for the name at * idx * position in * bufr * . * strings _ offset * is the
index into * bufr * where actual name strings begin . The returned name
is a unicode string .""" | platform_id , encoding_id , lang_id , name_id , length , str_offset = ( self . _name_header ( bufr , idx ) )
name = self . _read_name_text ( bufr , platform_id , encoding_id , strings_offset , str_offset , length )
return platform_id , name_id , name |
def clearFixedEffect ( self ) :
"""erase all fixed effects""" | self . A = [ ]
self . F = [ ]
self . F_any = np . zeros ( ( self . N , 0 ) )
self . clear_cache ( ) |
def get_transaction_details ( tx_hash , coin_symbol = 'btc' , limit = None , tx_input_offset = None , tx_output_offset = None , include_hex = False , show_confidence = False , confidence_only = False , api_key = None ) :
"""Takes a tx _ hash , coin _ symbol , and limit and returns the transaction details
Optional :
- limit : # inputs / ouputs to include ( applies to both )
- tx _ input _ offset : input offset
- tx _ output _ offset : output offset
- include _ hex : include the raw TX hex
- show _ confidence : adds confidence information to unconfirmed TXRefs .
- confidence _ only : show only the confidence statistics and don ' t return the rest of the endpoint details ( faster )""" | assert is_valid_hash ( tx_hash ) , tx_hash
assert is_valid_coin_symbol ( coin_symbol ) , coin_symbol
added = 'txs/{}{}' . format ( tx_hash , '/confidence' if confidence_only else '' )
url = make_url ( coin_symbol , added )
params = { }
if api_key :
params [ 'token' ] = api_key
if limit :
params [ 'limit' ] = limit
if tx_input_offset :
params [ 'inStart' ] = tx_input_offset
if tx_output_offset :
params [ 'outStart' ] = tx_output_offset
if include_hex :
params [ 'includeHex' ] = 'true'
if show_confidence and not confidence_only :
params [ 'includeConfidence' ] = 'true'
r = requests . get ( url , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
response_dict = get_valid_json ( r )
if 'error' not in response_dict and not confidence_only :
if response_dict [ 'block_height' ] > 0 :
response_dict [ 'confirmed' ] = parser . parse ( response_dict [ 'confirmed' ] )
else :
response_dict [ 'block_height' ] = None
# Blockcypher reports fake times if it ' s not in a block
response_dict [ 'confirmed' ] = None
# format this string as a datetime object
response_dict [ 'received' ] = parser . parse ( response_dict [ 'received' ] )
return response_dict |
def _read_tags ( self ) :
"""Fill in the _ tags dict from the tags file .
Args :
None
Returns :
True
Todo :
Figure what could go wrong and at least acknowledge the
the fact that Murphy was an optimist .""" | tags = self . _config . get ( 'tags' , { } )
logging . info ( 'Tags:' )
for tag_name in tags . keys ( ) :
tag = { }
tag [ 'Key' ] = tag_name
tag [ 'Value' ] = tags [ tag_name ]
self . _tags . append ( tag )
logging . info ( '{} = {}' . format ( tag_name , tags [ tag_name ] ) )
logging . debug ( json . dumps ( self . _tags , indent = 2 , sort_keys = True ) )
return True |
def geost_1d ( * args , ** kwargs ) : # ( lon , lat , nu ) : OR ( dst , nu )
"""; GEOST _ 1D : Compute geostrophic speeds from a sea level dataset < br / >
; Reference : Powell , B . S . , et R . R . Leben ( 2004 ) , An Optimal Filter for < br / >
; Geostrophic Mesoscale Currents from Along - Track Satellite Altimetry , < br / >
; Journal of Atmospheric and Oceanic Technology , 21(10 ) , 1633-1642.
; @ param lon { in } { optional } { type : NUMERIC } longitude in degrees
; @ param lat { in } { optional } { type : NUMERIC } latitude in degrees
; @ param dst { in } { optional } { type : NUMERIC } along - track distance .
; @ param z { in } { required } { type : NUMERIC } sea level surface . Can either be absolute < br / >
; values ( SSH ) or relative values ( SLA ) . This MUST be given in METERS .
; @ keyword strict { in } { optional } { type : BOOLEAN } If True , compute gradient at mid - distance .
; @ keyword pl04 { in } { optional } { type : BOOLEAN } If True , use the Powell & Leben 2004 method .
; @ returns Geostrophic velocity component , positive eastward
; @ author Renaud DUSSURGET , LEGOS / CTOH
; @ history Created Sep . 2009 from genweights . m ( Brian Powell ( c ) 2004 , < br / >
; University of Colorado , Boulder ) < br / >
; Modified May 2010 to be compliant with 20Hz datasets ( p & n can vary ) . < br / >
; Warining may also be issued for data with holes within the width of the < br / >
; window . < br / >
; Modified June 2010 to include filtering window width in KM instead of nb . of < br / >
; points ( Equivalent btw . 1Hz and 20Hz data ) . < br / >
; @ uses CALCUL _ DISTANCE , EXIST , GENWEIGTHS , SETINTERSECTION , SETUNION , < br / >
; OPTIMAL _ SLOPE , GRAVITY , CORIOLIS , TRACK _ ORIENT
; @ example dummy1 = geost _ 1D ( lon , lat , sla , pl04 = True , p = 11 , q = 11 ) : < br / >
; Return along - track velocity anomalies using a 11km by 11km Powell & Leben 2004 filter window < br / >
; dummy2 = geost _ 1D ( dst , sla , strict = True ) : < br / >
; Return along - track velocity anomalies computed at mid - distance < br / >""" | lon = args [ 0 ]
lat = args [ 1 ]
dst = args [ 2 ] if len ( args ) == 4 else calcul_distance ( lat , lon ) * 1e3
# distance in meters
nu = args [ 3 ] if len ( args ) == 4 else args [ 2 ]
isVector = len ( np . shape ( nu ) ) == 1
# Reshape nu if vector
if isVector :
nu = np . reshape ( nu , ( len ( nu ) , 1 ) )
nt = np . shape ( nu ) [ 1 ] if not isVector else 1
sh = nu . shape
nufilt = np . ma . array ( np . empty ( sh ) , mask = True , dtype = nu . dtype )
pl04 = kwargs . pop ( 'pl04' , False )
filter = kwargs . pop ( 'filter' , None )
strict = kwargs . pop ( 'strict' , False )
verbose = kwargs . pop ( 'verbose' , False )
if filter is not None :
for t in np . arange ( nt ) :
nufilt [ : , t ] = loess ( nu [ : , t ] , dst , filter * 1e3 )
nu = nufilt
if pl04 :
ug = np . ma . array ( np . empty ( sh ) , mask = True , dtype = nu . dtype )
for t in np . arange ( nt ) :
ug [ : , t ] = powell_leben_filter_km ( lon , lat , nu [ : , t ] , verbose = verbose , ** kwargs )
if isVector :
ug = ug . flatten ( )
return ug
# If strict option is set to True , compute gradients at mid - distance between points
if strict :
lon = ( lon [ 1 : ] - lon [ : - 1 ] ) / 2. + lon [ 0 : - 1 ]
lat = ( lat [ 1 : ] - lat [ : - 1 ] ) / 2. + lat [ 0 : - 1 ]
# Compute gravitational & coriolis forces
if strict :
sh = ( sh [ 0 ] - 1 , sh [ 1 ] )
g = np . repeat ( gravity ( lat ) , nt ) . reshape ( sh )
f = np . repeat ( coriolis ( lat ) , nt ) . reshape ( sh )
# Compute SSH 1st derivative
# dh = deriv ( dst , nu ) # ( deriv is very bad . . . )
dh = np . ma . array ( np . empty ( sh ) , mask = True , dtype = nu . dtype )
for t in np . arange ( nt ) :
dh [ : , t ] = ( nu [ 1 : , t ] - nu [ : - 1 , t ] ) / ( dst [ 1 : ] - dst [ : - 1 ] ) if strict else deriv ( dst , nu [ : , t ] )
# Compute geostrophy
# print f
# print g
# print dh
ug = - ( g * dh ) / ( f )
# Inverse sign of ug for descending tracks as Coriolis is oriented to the right
# northward
if ( not track_orient ( lon , lat ) ) : # descending tracks
ug *= - 1
if isVector :
ug = ug . flatten ( )
return ( lon , lat , ug ) if strict else ug |
def make_image ( imagesize , voxval = 0 , spacing = None , origin = None , direction = None , has_components = False , pixeltype = 'float' ) :
"""Make an image with given size and voxel value or given a mask and vector
ANTsR function : ` makeImage `
Arguments
shape : tuple / ANTsImage
input image size or mask
voxval : scalar
input image value or vector , size of mask
spacing : tuple / list
image spatial resolution
origin : tuple / list
image spatial origin
direction : list / ndarray
direction matrix to convert from index to physical space
components : boolean
whether there are components per pixel or not
pixeltype : float
data type of image values
Returns
ANTsImage""" | if isinstance ( imagesize , iio . ANTsImage ) :
img = imagesize . clone ( )
sel = imagesize > 0
if voxval . ndim > 1 :
voxval = voxval . flatten ( )
if ( len ( voxval ) == int ( ( sel > 0 ) . sum ( ) ) ) or ( len ( voxval ) == 0 ) :
img [ sel ] = voxval
else :
raise ValueError ( 'Num given voxels %i not same as num positive values %i in `imagesize`' % ( len ( voxval ) , int ( ( sel > 0 ) . sum ( ) ) ) )
return img
else :
if isinstance ( voxval , ( tuple , list , np . ndarray ) ) :
array = np . asarray ( voxval ) . astype ( 'float32' ) . reshape ( imagesize )
else :
array = np . full ( imagesize , voxval , dtype = 'float32' )
image = from_numpy ( array , origin = origin , spacing = spacing , direction = direction , has_components = has_components )
return image . clone ( pixeltype ) |
def DELETE_SLICE_0 ( self , instr ) :
'obj [ : ] = expr' | value = self . ast_stack . pop ( )
kw = dict ( lineno = instr . lineno , col_offset = 0 )
slice = _ast . Slice ( lower = None , step = None , upper = None , ** kw )
subscr = _ast . Subscript ( value = value , slice = slice , ctx = _ast . Del ( ) , ** kw )
delete = _ast . Delete ( targets = [ subscr ] , ** kw )
self . ast_stack . append ( delete ) |
def on_created ( self , event ) :
"""Function called everytime a new file is created .
Args :
event : Event to process .""" | self . _logger . debug ( 'Detected create event on watched path: %s' , event . src_path )
self . _process_event ( event ) |
def effect_repertoire ( self , mechanism , purview ) :
"""Return the effect repertoire of a mechanism over a purview .
Args :
mechanism ( tuple [ int ] ) : The mechanism for which to calculate the
effect repertoire .
purview ( tuple [ int ] ) : The purview over which to calculate the
effect repertoire .
Returns :
np . ndarray : The effect repertoire of the mechanism over the
purview .
. . note : :
The returned repertoire is a distribution over purview node states ,
not the states of the whole network .""" | # If the purview is empty , the distribution is empty , so return the
# multiplicative identity .
if not purview :
return np . array ( [ 1.0 ] )
# Use a frozenset so the arguments to ` _ single _ node _ effect _ repertoire `
# can be hashed and cached .
mechanism = frozenset ( mechanism )
# Preallocate the repertoire with the proper shape , so that
# probabilities are broadcasted appropriately .
joint = np . ones ( repertoire_shape ( purview , self . tpm_size ) )
# The effect repertoire is the product of the effect repertoires of the
# individual nodes .
return joint * functools . reduce ( np . multiply , [ self . _single_node_effect_repertoire ( mechanism , p ) for p in purview ] ) |
def write ( self , data , timeout_s = None ) :
'''Write to serial port .
Waits for serial connection to be established before writing .
Parameters
data : str or bytes
Data to write to serial port .
timeout _ s : float , optional
Maximum number of seconds to wait for serial connection to be
established .
By default , block until serial connection is ready .''' | self . connected . wait ( timeout_s )
self . protocol . transport . write ( data ) |
def sync ( ) :
'''Sync portage / overlay trees and update the eix database
CLI Example :
. . code - block : : bash
salt ' * ' eix . sync''' | # Funtoo patches eix to use ' ego sync '
if __grains__ [ 'os' ] == 'Funtoo' :
cmd = 'eix-sync -q'
else :
cmd = 'eix-sync -q -C "--ask" -C "n"'
if 'makeconf.features_contains' in __salt__ and __salt__ [ 'makeconf.features_contains' ] ( 'webrsync-gpg' ) : # GPG sign verify is supported only for " webrsync "
if salt . utils . path . which ( 'emerge-delta-webrsync' ) : # We prefer ' delta - webrsync ' to ' webrsync '
cmd += ' -W'
else :
cmd += ' -w'
return __salt__ [ 'cmd.retcode' ] ( cmd ) == 0
else :
if __salt__ [ 'cmd.retcode' ] ( cmd ) == 0 :
return True
# We fall back to " webrsync " if " rsync " fails for some reason
if salt . utils . path . which ( 'emerge-delta-webrsync' ) : # We prefer ' delta - webrsync ' to ' webrsync '
cmd += ' -W'
else :
cmd += ' -w'
return __salt__ [ 'cmd.retcode' ] ( cmd ) == 0 |
def writeTypes ( self , fd ) :
"""write out types module to file descriptor .""" | print >> fd , '#' * 50
print >> fd , '# file: %s.py' % self . getTypesModuleName ( )
print >> fd , '#'
print >> fd , '# schema types generated by "%s"' % self . __class__
print >> fd , '# %s' % ' ' . join ( sys . argv )
print >> fd , '#'
print >> fd , '#' * 50
print >> fd , TypesHeaderContainer ( )
self . gatherNamespaces ( )
for l in self . usedNamespaces . values ( ) :
sd = SchemaDescription ( do_extended = self . do_extended , extPyClasses = self . extPyClasses )
for schema in l :
sd . fromSchema ( schema )
sd . write ( fd ) |
def change_text ( self , text , fname , pattern = None , expect = None , shutit_pexpect_child = None , before = False , force = False , delete = False , note = None , replace = False , line_oriented = True , create = True , loglevel = logging . DEBUG ) :
"""Change text in a file .
Returns None if there was no match for the regexp , True if it was matched
and replaced , and False if the file did not exist or there was some other
problem .
@ param text : Text to insert .
@ param fname : Filename to insert text to
@ param pattern : Regexp for a line to match and insert after / before / replace .
If none , put at end of file .
@ param expect : See send ( )
@ param shutit _ pexpect _ child : See send ( )
@ param before : Whether to place the text before or after the matched text .
@ param force : Force the insertion even if the text is in the file .
@ param delete : Delete text from file rather than insert
@ param replace : Replace matched text with passed - in text . If nothing matches , then append .
@ param note : See send ( )
@ param line _ oriented : Consider the pattern on a per - line basis ( default True ) .
Can match any continuous section of the line , eg ' b . * d ' will match the line : ' abcde '
If not line _ oriented , the regexp is considered on with the flags re . DOTALL , re . MULTILINE
enabled""" | shutit_global . shutit_global_object . yield_to_draw ( )
shutit_pexpect_child = shutit_pexpect_child or self . get_current_shutit_pexpect_session ( ) . pexpect_child
expect = expect or self . get_current_shutit_pexpect_session ( ) . default_expect
shutit_pexpect_session = self . get_shutit_pexpect_session_from_child ( shutit_pexpect_child )
return shutit_pexpect_session . change_text ( text , fname , pattern = pattern , before = before , force = force , delete = delete , note = note , replace = replace , line_oriented = line_oriented , create = create , loglevel = loglevel ) |
def postSolve ( self ) :
'''This method adds consumption at m = 0 to the list of stable arm points ,
then constructs the consumption function as a cubic interpolation over
those points . Should be run after the backshooting routine is complete .
Parameters
none
Returns
none''' | # Add bottom point to the stable arm points
self . solution [ 0 ] . mNrm_list . insert ( 0 , 0.0 )
self . solution [ 0 ] . cNrm_list . insert ( 0 , 0.0 )
self . solution [ 0 ] . MPC_list . insert ( 0 , self . MPCmax )
# Construct an interpolation of the consumption function from the stable arm points
self . solution [ 0 ] . cFunc = CubicInterp ( self . solution [ 0 ] . mNrm_list , self . solution [ 0 ] . cNrm_list , self . solution [ 0 ] . MPC_list , self . PFMPC * ( self . h - 1.0 ) , self . PFMPC )
self . solution [ 0 ] . cFunc_U = lambda m : self . PFMPC * m |
def weighted_average_to_nodes ( x1 , x2 , data , interpolator ) :
"""Weighted average of scattered data to the nodal points
of a triangulation using the barycentric coordinates as
weightings .
Parameters
x1 , x2 : 1D arrays arrays of x , y or lon , lat ( radians )
data : 1D array of data to be lumped to the node locations
interpolator : a stripy . Triangulation or stripy . sTriangulation object
which defines the node locations and their triangulation
Returns
grid : 1D array containing the results of the weighted average
norm : 1D array of the normalisation used to compute ` grid `
count : 1D int array of number of points that contribute anything to a given node""" | import numpy as np
gridded_data = np . zeros ( interpolator . npoints )
norm = np . zeros ( interpolator . npoints )
count = np . zeros ( interpolator . npoints , dtype = np . int )
bcc , nodes = interpolator . containing_simplex_and_bcc ( x1 , x2 )
# Beware vectorising the reduction operation ! !
for i in range ( 0 , len ( data ) ) :
grid [ nodes [ i ] [ 0 ] ] += bcc [ i ] [ 0 ] * data [ i ]
grid [ nodes [ i ] [ 1 ] ] += bcc [ i ] [ 1 ] * data [ i ]
grid [ nodes [ i ] [ 2 ] ] += bcc [ i ] [ 2 ] * data [ i ]
norm [ nodes [ i ] [ 0 ] ] += bcc [ i ] [ 0 ]
norm [ nodes [ i ] [ 1 ] ] += bcc [ i ] [ 1 ]
norm [ nodes [ i ] [ 2 ] ] += bcc [ i ] [ 2 ]
count [ nodes [ i ] [ 0 ] ] += 1
count [ nodes [ i ] [ 1 ] ] += 1
count [ nodes [ i ] [ 2 ] ] += 1
grid [ np . where ( norm > 0.0 ) ] /= norm [ np . where ( norm > 0.0 ) ]
return grid , norm , count |
def curse_add_line ( self , msg , decoration = "DEFAULT" , optional = False , additional = False , splittable = False ) :
"""Return a dict with .
Where :
msg : string
decoration :
DEFAULT : no decoration
UNDERLINE : underline
BOLD : bold
TITLE : for stat title
PROCESS : for process name
STATUS : for process status
NICE : for process niceness
CPU _ TIME : for process cpu time
OK : Value is OK and non logged
OK _ LOG : Value is OK and logged
CAREFUL : Value is CAREFUL and non logged
CAREFUL _ LOG : Value is CAREFUL and logged
WARNING : Value is WARINING and non logged
WARNING _ LOG : Value is WARINING and logged
CRITICAL : Value is CRITICAL and non logged
CRITICAL _ LOG : Value is CRITICAL and logged
optional : True if the stat is optional ( display only if space is available )
additional : True if the stat is additional ( display only if space is available after optional )
spittable : Line can be splitted to fit on the screen ( default is not )""" | return { 'msg' : msg , 'decoration' : decoration , 'optional' : optional , 'additional' : additional , 'splittable' : splittable } |
def _compare_vector ( arr1 , arr2 , rel_tol ) :
"""Compares two vectors ( python lists ) for approximate equality .
Each array contains floats or strings convertible to floats
This function returns True if both arrays are of the same length
and each value is within the given relative tolerance .""" | length = len ( arr1 )
if len ( arr2 ) != length :
return False
for i in range ( length ) :
element_1 = float ( arr1 [ i ] )
element_2 = float ( arr2 [ i ] )
diff = abs ( abs ( element_1 ) - abs ( element_2 ) )
if diff != 0.0 :
rel = _reldiff ( element_1 , element_2 )
# For a basis set , a relatively coarse comparison
# should be acceptible
if rel > rel_tol :
return False
return True |
def run_bootstrap_post_init ( self , config ) :
"""runs a script after initdb or custom bootstrap script is called and waits until completion .""" | cmd = config . get ( 'post_bootstrap' ) or config . get ( 'post_init' )
if cmd :
r = self . _local_connect_kwargs
if 'host' in r : # ' / tmp ' = > ' % 2Ftmp ' for unix socket path
host = quote_plus ( r [ 'host' ] ) if r [ 'host' ] . startswith ( '/' ) else r [ 'host' ]
else :
host = ''
# https : / / www . postgresql . org / docs / current / static / libpq - pgpass . html
# A host name of localhost matches both TCP ( host name localhost ) and Unix domain socket
# ( pghost empty or the default socket directory ) connections coming from the local machine .
r [ 'host' ] = 'localhost'
# set it to localhost to write into pgpass
if 'user' in r :
user = r [ 'user' ] + '@'
else :
user = ''
if 'password' in r :
import getpass
r . setdefault ( 'user' , os . environ . get ( 'PGUSER' , getpass . getuser ( ) ) )
connstring = 'postgres://{0}{1}:{2}/{3}' . format ( user , host , r [ 'port' ] , r [ 'database' ] )
env = self . write_pgpass ( r ) if 'password' in r else None
try :
ret = self . cancellable_subprocess_call ( shlex . split ( cmd ) + [ connstring ] , env = env )
except OSError :
logger . error ( 'post_init script %s failed' , cmd )
return False
if ret != 0 :
logger . error ( 'post_init script %s returned non-zero code %d' , cmd , ret )
return False
return True |
def json_engine ( self , req ) : # pylint : disable = R0201 , W0613
"""Return torrent engine data .""" | try :
return stats . engine_data ( config . engine )
except ( error . LoggableError , xmlrpc . ERRORS ) as torrent_exc :
raise exc . HTTPInternalServerError ( str ( torrent_exc ) ) |
def sub_working_days ( self , day , delta , extra_working_days = None , extra_holidays = None , keep_datetime = False ) :
"""Substract ` delta ` working days to the date .
This method is a shortcut / helper . Users may want to use either : :
cal . add _ working _ days ( my _ date , - 7)
cal . sub _ working _ days ( my _ date , 7)
The other parameters are to be used exactly as in the
` ` add _ working _ days ` ` method .
A negative ` ` delta ` ` argument will be converted into its absolute
value . Hence , the two following calls are equivalent : :
cal . sub _ working _ days ( my _ date , - 7)
cal . sub _ working _ days ( my _ date , 7)
As in ` ` add _ working _ days ( ) ` ` you can set the parameter
` ` keep _ datetime ` ` to ` ` True ` ` to make sure that if your ` ` day ` `
argument is a ` ` datetime ` ` , the returned date will also be a
` ` datetime ` ` object .""" | delta = abs ( delta )
return self . add_working_days ( day , - delta , extra_working_days , extra_holidays , keep_datetime = keep_datetime ) |
def get_queryset ( self , request ) :
"""Limit Pages to those that belong to the request ' s user .""" | qs = super ( VISAVariableAdmin , self ) . get_queryset ( request )
return qs . filter ( device__protocol_id = PROTOCOL_ID ) |
def _wrapper_find_one ( self , filter_ = None , * args , ** kwargs ) :
"""Convert record to a dict that has no key error""" | return self . __collect . find_one ( filter_ , * args , ** kwargs ) |
def _linear_predictor ( self , X = None , modelmat = None , b = None , term = - 1 ) :
"""linear predictor
compute the linear predictor portion of the model
ie multiply the model matrix by the spline basis coefficients
Parameters
at least 1 of ( X , modelmat )
and
at least 1 of ( b , feature )
X : array - like of shape ( n _ samples , m _ features ) or None , optional
containing the input dataset
if None , will attempt to use modelmat
modelmat : array - like or None , optional
contains the spline basis for each feature evaluated at the input
values for each feature , ie model matrix
if None , will attempt to construct the model matrix from X
b : array - like or None , optional
contains the spline coefficients
if None , will use current model coefficients
feature : int , optional
feature for which to compute the linear prediction
if - 1 , will compute for all features
Returns
lp : np . array of shape ( n _ samples , )""" | if modelmat is None :
modelmat = self . _modelmat ( X , term = term )
if b is None :
b = self . coef_ [ self . terms . get_coef_indices ( term ) ]
return modelmat . dot ( b ) . flatten ( ) |
def newton ( self , start_x = None , tolerance = 1.0e-6 ) :
"""Optimise value of x using newton gauss""" | if start_x is None :
start_x = self . _analytical_fitter . fit ( self . _c )
return optimise_newton ( start_x , self . _a , self . _c , tolerance ) |
def write_data ( self , write_finished_cb ) :
"""Write trajectory data to the Crazyflie""" | self . _write_finished_cb = write_finished_cb
data = bytearray ( )
for poly4D in self . poly4Ds :
data += struct . pack ( '<ffffffff' , * poly4D . x . values )
data += struct . pack ( '<ffffffff' , * poly4D . y . values )
data += struct . pack ( '<ffffffff' , * poly4D . z . values )
data += struct . pack ( '<ffffffff' , * poly4D . yaw . values )
data += struct . pack ( '<f' , poly4D . duration )
self . mem_handler . write ( self , 0x00 , data , flush_queue = True ) |
def substitute ( script , submap ) :
"""Check for presence of template indicator and if found , perform
variable substition on script based on template type , returning
script .""" | match = config . TEMPLATE_RE . search ( script )
if match :
template_type = match . groupdict ( ) [ 'type' ]
try :
return config . TEMPLATE_TYPEMAP [ template_type ] ( script , submap )
except KeyError :
logger . error ( 'Unsupported template type: %s' % template_type )
raise
return script |
def _check_html_response ( self , response ) :
"""Checks if the API Key is valid and if the request returned a 200 status ( ok )""" | error1 = "Access to this form requires a valid API key. For more info see: http://www.clublog.org/need_api.php"
error2 = "Invalid or missing API Key"
if response . status_code == requests . codes . ok :
return True
else :
err_str = "HTTP Status Code: " + str ( response . status_code ) + " HTTP Response: " + str ( response . text )
self . _logger . error ( err_str )
if response . status_code == 403 :
raise APIKeyMissingError
else :
raise LookupError ( err_str ) |
def authorize_redirect ( self , redirect_uri : str = None , client_id : str = None , client_secret : str = None , extra_params : Dict [ str , Any ] = None , scope : str = None , response_type : str = "code" , ) -> None :
"""Redirects the user to obtain OAuth authorization for this service .
Some providers require that you register a redirect URL with
your application instead of passing one via this method . You
should call this method to log the user in , and then call
` ` get _ authenticated _ user ` ` in the handler for your
redirect URL to complete the authorization process .
. . versionchanged : : 6.0
The ` ` callback ` ` argument and returned awaitable were removed ;
this is now an ordinary synchronous function .""" | handler = cast ( RequestHandler , self )
args = { "response_type" : response_type }
if redirect_uri is not None :
args [ "redirect_uri" ] = redirect_uri
if client_id is not None :
args [ "client_id" ] = client_id
if extra_params :
args . update ( extra_params )
if scope :
args [ "scope" ] = " " . join ( scope )
url = self . _OAUTH_AUTHORIZE_URL
# type : ignore
handler . redirect ( url_concat ( url , args ) ) |
def _callback_set_qs_value ( self , key , val , success ) :
"""Push state to QSUSB , retry with backoff .""" | set_url = URL_SET . format ( self . _url , key , val )
with self . _lock :
for _repeat in range ( 1 , 6 ) :
set_result = requests . get ( set_url )
if set_result . status_code == 200 :
set_result = set_result . json ( )
if set_result . get ( 'data' , 'NO REPLY' ) != 'NO REPLY' : # self . devices . _ set _ qs _ value ( key , set _ result [ ' data ' ] )
success ( )
return True
sleep ( 0.01 * _repeat )
_LOGGER . error ( "Unable to set %s" , set_url )
return False |
def returner ( load ) :
'''Return data to the local job cache''' | serial = salt . payload . Serial ( __opts__ )
# if a minion is returning a standalone job , get a jobid
if load [ 'jid' ] == 'req' :
load [ 'jid' ] = prep_jid ( nocache = load . get ( 'nocache' , False ) )
jid_dir = salt . utils . jid . jid_dir ( load [ 'jid' ] , _job_dir ( ) , __opts__ [ 'hash_type' ] )
if os . path . exists ( os . path . join ( jid_dir , 'nocache' ) ) :
return
hn_dir = os . path . join ( jid_dir , load [ 'id' ] )
try :
os . makedirs ( hn_dir )
except OSError as err :
if err . errno == errno . EEXIST : # Minion has already returned this jid and it should be dropped
log . error ( 'An extra return was detected from minion %s, please verify ' 'the minion, this could be a replay attack' , load [ 'id' ] )
return False
elif err . errno == errno . ENOENT :
log . error ( 'An inconsistency occurred, a job was received with a job id ' '(%s) that is not present in the local cache' , load [ 'jid' ] )
return False
raise
serial . dump ( dict ( ( key , load [ key ] ) for key in [ 'return' , 'retcode' , 'success' ] if key in load ) , # Use atomic open here to avoid the file being read before it ' s
# completely written to . Refs # 1935
salt . utils . atomicfile . atomic_open ( os . path . join ( hn_dir , RETURN_P ) , 'w+b' ) )
if 'out' in load :
serial . dump ( load [ 'out' ] , # Use atomic open here to avoid the file being read before
# it ' s completely written to . Refs # 1935
salt . utils . atomicfile . atomic_open ( os . path . join ( hn_dir , OUT_P ) , 'w+b' ) ) |
def add ( self , doc ) :
"""Add a doc ' s annotations to the binder for serialization .""" | array = doc . to_array ( self . attrs )
if len ( array . shape ) == 1 :
array = array . reshape ( ( array . shape [ 0 ] , 1 ) )
self . tokens . append ( array )
spaces = doc . to_array ( SPACY )
assert array . shape [ 0 ] == spaces . shape [ 0 ]
spaces = spaces . reshape ( ( spaces . shape [ 0 ] , 1 ) )
self . spaces . append ( numpy . asarray ( spaces , dtype = bool ) )
self . strings . update ( w . text for w in doc ) |
def build_model ( self ) :
'''Find out the type of model configured and dispatch the request to the appropriate method''' | if self . model_config [ 'model-type' ] :
return self . build_red ( )
elif self . model_config [ 'model-type' ] :
return self . buidl_hred ( )
else :
raise Error ( "Unrecognized model type '{}'" . format ( self . model_config [ 'model-type' ] ) ) |
def readGyroRange ( self ) :
"""Read range of gyroscope .
@ return an int value . It should be one of the following values ( GYRO _ RANGE _ 250DEG )
@ see GYRO _ RANGE _ 250DEG
@ see GYRO _ RANGE _ 500DEG
@ see GYRO _ RANGE _ 1KDEG
@ see GYRO _ RANGE _ 2KDEG""" | raw_data = self . _readByte ( self . REG_GYRO_CONFIG )
raw_data = ( raw_data | 0xE7 ) ^ 0xE7
return raw_data |
def sent_tokenize ( context ) :
"""Cut the given context into sentences .
Avoid a linebreak in between paried symbols , float numbers , and some abbrs .
Nothing will be discard after sent _ tokeinze , simply ' ' . join ( sents ) will get the original context .
Evey whitespace , tab , linebreak will be kept .
> > > context = " I love you . Please don ' t leave . "
> > > sent _ tokenize ( context )
[ " I love you . " , " Please don ' t leave . " ]""" | # Define the regular expression
paired_symbols = [ ( "(" , ")" ) , ( "[" , "]" ) , ( "{" , "}" ) ]
paired_patterns = [ "%s.*?%s" % ( re . escape ( lt ) , re . escape ( rt ) ) for lt , rt in paired_symbols ]
number_pattern = [ '\d+\.\d+' ]
arr_pattern = [ '(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]|\svs\. |et al\.|Fig\. \d|approx\.|(?:Prof|Dr)\. (?:[A-Z]\.)?' ]
# Find the string which matches the above pattern , and remove than from the context , to get a stem string
escape_re = re . compile ( "|" . join ( paired_patterns + number_pattern + arr_pattern ) )
escapes = escape_re . findall ( context )
escaped_stem = escape_re . sub ( '{}' , context )
escaped_escaped_stem = escaped_stem . replace ( '{' , '{{' ) . replace ( '}' , '}}' )
# Find the linebreaks
sent_re = re . compile ( r'([A-Z0-9]..+?(?:[.!?]\s+|[\n$]+))' )
linebreaks = sent_re . findall ( escaped_escaped_stem )
sent_stem = sent_re . sub ( r'\1###linebreak###' , escaped_escaped_stem )
recovered_sent_stem = sent_stem . replace ( '{{}}' , '{}' )
result = recovered_sent_stem . format ( * escapes )
return [ r for r in result . split ( '###linebreak###' ) if r is not '' ] |
def ext_process ( listname , hostname , url , filepath , msg ) :
"""Here ' s where you put your code to deal with the just archived message .
Arguments here are the list name , the host name , the URL to the just
archived message , the file system path to the just archived message and
the message object .
These can be replaced or augmented as needed .""" | from pyes import ES
from pyes . exceptions import ClusterBlockException , NoServerAvailable
import datetime
# CHANGE this settings to reflect your configuration
_ES_SERVERS = [ '127.0.0.1:9500' ]
# I prefer thrift
_indexname = "mailman"
_doctype = "mail"
date = datetime . datetime . today ( )
try :
iconn = ES ( _ES_SERVERS )
status = None
try :
status = iconn . status ( _indexname )
logger . debug ( "Indexer status:%s" % status )
except :
iconn . create_index ( _indexname )
time . sleep ( 1 )
status = iconn . status ( _indexname )
mappings = { u'text' : { 'boost' : 1.0 , 'index' : 'analyzed' , 'store' : 'yes' , 'type' : u'string' , "term_vector" : "with_positions_offsets" } , u'url' : { 'boost' : 1.0 , 'index' : 'not_analyzed' , 'store' : 'yes' , 'type' : u'string' , "term_vector" : "no" } , u'title' : { 'boost' : 1.0 , 'index' : 'analyzed' , 'store' : 'yes' , 'type' : u'string' , "term_vector" : "with_positions_offsets" } , u'date' : { 'store' : 'yes' , 'type' : u'date' } }
time . sleep ( 1 )
status = iconn . put_mapping ( _doctype , mappings , _indexname )
data = dict ( url = url , title = msg . get ( 'subject' ) , date = date , text = str ( msg ) )
iconn . index ( data , _indexname , _doctype )
syslog ( 'debug' , 'listname: %s, hostname: %s, url: %s, path: %s, msg: %s' , listname , hostname , url , filepath , msg )
except ClusterBlockException :
syslog ( 'error' , 'Cluster in revocery state: listname: %s, hostname: %s, url: %s, path: %s, msg: %s' , listname , hostname , url , filepath , msg )
except NoServerAvailable :
syslog ( 'error' , 'No server available: listname: %s, hostname: %s, url: %s, path: %s, msg: %s' , listname , hostname , url , filepath , msg )
except :
import traceback
syslog ( 'error' , 'Unknown: listname: %s, hostname: %s, url: %s, path: %s, msg: %s\nstacktrace: %s' , listname , hostname , url , filepath , msg , repr ( traceback . format_exc ( ) ) )
return |
def plot_CI ( ax , sampler , modelidx = 0 , sed = True , confs = [ 3 , 1 , 0.5 ] , e_unit = u . eV , label = None , e_range = None , e_npoints = 100 , threads = None , last_step = False , ) :
"""Plot confidence interval .
Parameters
ax : ` matplotlib . Axes `
Axes to plot on .
sampler : ` emcee . EnsembleSampler `
Sampler
modelidx : int , optional
Model index . Default is 0
sed : bool , optional
Whether to plot SED or differential spectrum . If ` None ` , the units of
the observed spectrum will be used .
confs : list , optional
List of confidence levels ( in sigma ) to use for generating the
confidence intervals . Default is ` [ 3,1,0.5 ] `
e _ unit : : class : ` ~ astropy . units . Unit ` or str parseable to unit
Unit in which to plot energy axis .
e _ npoints : int , optional
How many points to compute for the model samples and ML model if
` e _ range ` is set .
threads : int , optional
How many parallel processing threads to use when computing the samples .
Defaults to the number of available cores .
last _ step : bool , optional
Whether to only use the positions in the final step of the run ( True ,
default ) or the whole chain ( False ) .""" | confs . sort ( reverse = True )
modelx , CI = _calc_CI ( sampler , modelidx = modelidx , confs = confs , e_range = e_range , e_npoints = e_npoints , last_step = last_step , threads = threads , )
# pick first confidence interval curve for units
f_unit , sedf = sed_conversion ( modelx , CI [ 0 ] [ 0 ] . unit , sed )
for ( ymin , ymax ) , conf in zip ( CI , confs ) :
color = np . log ( conf ) / np . log ( 20 ) + 0.4
ax . fill_between ( modelx . to ( e_unit ) . value , ( ymax * sedf ) . to ( f_unit ) . value , ( ymin * sedf ) . to ( f_unit ) . value , lw = 0.001 , color = ( color , ) * 3 , alpha = 0.6 , zorder = - 10 , )
_plot_MLmodel ( ax , sampler , modelidx , e_range , e_npoints , e_unit , sed )
if label is not None :
ax . set_ylabel ( "{0} [{1}]" . format ( label , f_unit . to_string ( "latex_inline" ) ) ) |
def _message_address_generate ( self , beacon_config ) :
"""Generate address for request / response message .
: param beacon _ config : server or client configuration . Client configuration is used for request and server configuration for response
: return : bytes""" | address = None
if beacon_config [ 'wasp-general::network::beacon' ] [ 'public_address' ] != '' :
address = str ( WIPV4SocketInfo . parse_address ( beacon_config [ 'wasp-general::network::beacon' ] [ 'public_address' ] ) ) . encode ( 'ascii' )
if address is not None :
address = WBeaconGouverneurMessenger . __message_splitter__ + address
if beacon_config [ 'wasp-general::network::beacon' ] [ 'public_port' ] != '' :
port = beacon_config . getint ( 'wasp-general::network::beacon' , 'public_port' )
address += WBeaconGouverneurMessenger . __message_splitter__ + str ( port ) . encode ( 'ascii' )
return address if address is not None else b'' |
def do_execute ( self ) :
"""The actual execution of the actor .
: return : None if successful , otherwise error message
: rtype : str""" | for s in self . resolve_option ( "strings" ) :
self . _output . append ( Token ( s ) )
return None |
def angular_crossmatch_against_catalogue ( self , objectList , searchPara = { } , search_name = "" , brightnessFilter = False , physicalSearch = False , classificationType = False ) :
"""* perform an angular separation crossmatch against a given catalogue in the database and annotate the crossmatch with some value added parameters ( distances , physical separations , sub - type of transient etc ) *
* * Key Arguments : * *
- ` ` objectList ` ` - - the list of transient locations to match against the crossmatch catalogue
- ` ` searchPara ` ` - - the search parameters for this individual search as lifted from the search algorithm in the sherlock settings file
- ` ` search _ name ` ` - - the name of the search as given in the sherlock settings file
- ` ` brightnessFilter ` ` - - is this search to be constrained by magnitude of the catalogue sources ? Default * False * . [ bright | faint | general ]
- ` ` physicalSearch ` ` - - is this angular search a sub - part of a physical separation search
- ` ` classificationType ` ` - - synonym , association or annotation . Default * False *
* * Return : * *
- matchedObjects - - any sources matched against the object
* * Usage : * *
Take a list of transients from somewhere
. . code - block : : python
transients = [
{ ' ps1 _ designation ' : u ' PS1-14aef ' ,
' name ' : u ' 4L3Piiq ' ,
' detection _ list _ id ' : 2,
' local _ comments ' : u ' ' ,
' ra ' : 0.02548233704918263,
' followup _ id ' : 2065412L ,
' dec ' : - 4.284933417540423,
' id ' : 1000006110041705700L ,
' object _ classification ' : 0L
{ ' ps1 _ designation ' : u ' PS1-13dcr ' ,
' name ' : u ' 3I3Phzx ' ,
' detection _ list _ id ' : 2,
' local _ comments ' : u ' ' ,
' ra ' : 4.754236999477372,
' followup _ id ' : 1140386L ,
' dec ' : 28.276703631398625,
' id ' : 1001901011281636100L ,
' object _ classification ' : 0L
{ ' ps1 _ designation ' : u ' PS1-13dhc ' ,
' name ' : u ' 3I3Pixd ' ,
' detection _ list _ id ' : 2,
' local _ comments ' : u ' ' ,
' ra ' : 1.3324973428505413,
' followup _ id ' : 1202386L ,
' dec ' : 32.98869220595689,
' id ' : 1000519791325919200L ,
' object _ classification ' : 0L
Then run the ` ` angular _ crossmatch _ against _ catalogue ` ` method to crossmatch against the catalogues and return results :
. . code - block : : python
# ANGULAR CONESEARCH ON CATALOGUE
search _ name = " ned _ d spec sn "
searchPara = self . settings [ " search algorithm " ] [ search _ name ]
matchedObjects = xmatcher . angular _ crossmatch _ against _ catalogue (
objectList = transients ,
searchPara = searchPara ,
search _ name = search _ name
. . todo : :
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring""" | self . log . debug ( 'starting the ``angular_crossmatch_against_catalogue`` method' )
self . log . info ( "STARTING %s SEARCH" % ( search_name , ) )
start_time = time . time ( )
# DEFAULTS
# print search _ name , classificationType
magnitudeLimitFilter = None
upperMagnitudeLimit = False
lowerMagnitudeLimit = False
catalogueName = searchPara [ "database table" ]
if not "mag column" in searchPara :
searchPara [ "mag column" ] = None
if brightnessFilter :
if "mag column" in searchPara and searchPara [ "mag column" ] :
magnitudeLimitFilter = self . colMaps [ catalogueName ] [ searchPara [ "mag column" ] + "ColName" ]
theseSearchPara = searchPara [ brightnessFilter ]
else :
theseSearchPara = searchPara
# EXTRACT PARAMETERS FROM ARGUMENTS & SETTINGS FILE
if classificationType == "synonym" :
radius = self . settings [ "synonym radius arcsec" ]
matchedType = theseSearchPara [ "synonym" ]
elif classificationType == "association" :
radius = theseSearchPara [ "angular radius arcsec" ]
matchedType = theseSearchPara [ "association" ]
elif classificationType == "annotation" :
radius = theseSearchPara [ "angular radius arcsec" ]
matchedType = theseSearchPara [ "annotation" ]
if brightnessFilter == "faint" :
upperMagnitudeLimit = theseSearchPara [ "mag limit" ]
elif brightnessFilter == "bright" :
lowerMagnitudeLimit = theseSearchPara [ "mag limit" ]
elif brightnessFilter == "general" :
if "faint" in searchPara :
lowerMagnitudeLimit = searchPara [ "faint" ] [ "mag limit" ]
if "bright" in searchPara :
upperMagnitudeLimit = searchPara [ "bright" ] [ "mag limit" ]
# VARIABLES
matchedObjects = [ ]
matchSubset = [ ]
transRAs = [ ]
transRAs [ : ] = [ t [ 'ra' ] for t in objectList ]
transDecs = [ ]
transDecs [ : ] = [ t [ 'dec' ] for t in objectList ]
if len ( transRAs ) == 0 :
return [ ]
cs = catalogue_conesearch ( log = self . log , ra = transRAs , dec = transDecs , radiusArcsec = radius , colMaps = self . colMaps , tableName = catalogueName , dbConn = self . dbConn , nearestOnly = False , physicalSearch = physicalSearch , upperMagnitudeLimit = upperMagnitudeLimit , lowerMagnitudeLimit = lowerMagnitudeLimit , magnitudeLimitFilter = magnitudeLimitFilter )
# catalogueMatches ARE ORDERED BY ANGULAR SEPARATION
indices , catalogueMatches = cs . search ( )
count = 1
annotatedcatalogueMatches = [ ]
for i , xm in zip ( indices , catalogueMatches ) : # CALCULATE PHYSICAL PARAMETERS . . . IF WE CAN
if "cmSepArcsec" in xm :
xm [ "separationArcsec" ] = xm [ "cmSepArcsec" ]
# CALCULATE SEPARATION IN ARCSEC
calculator = separations ( log = self . log , ra1 = objectList [ i ] [ "ra" ] , dec1 = objectList [ i ] [ "dec" ] , ra2 = xm [ "ra" ] , dec2 = xm [ "dec" ] )
angularSeparation , north , east = calculator . get ( )
xm [ "northSeparationArcsec" ] = north
xm [ "eastSeparationArcsec" ] = east
del xm [ "cmSepArcsec" ]
xm [ "association_type" ] = matchedType
xm [ "catalogue_view_name" ] = catalogueName
xm [ "transient_object_id" ] = objectList [ i ] [ "id" ]
xm [ "catalogue_table_name" ] = self . colMaps [ catalogueName ] [ "description" ]
xm [ "catalogue_table_id" ] = self . colMaps [ catalogueName ] [ "table_id" ]
xm [ "catalogue_view_id" ] = self . colMaps [ catalogueName ] [ "id" ]
if classificationType == "synonym" :
xm [ "classificationReliability" ] = 1
elif classificationType == "association" :
xm [ "classificationReliability" ] = 2
elif classificationType == "annotation" :
xm [ "classificationReliability" ] = 3
xm = self . _annotate_crossmatch_with_value_added_parameters ( crossmatchDict = xm , catalogueName = catalogueName , searchPara = theseSearchPara , search_name = search_name )
annotatedcatalogueMatches . append ( xm )
catalogueMatches = annotatedcatalogueMatches
# IF BRIGHT STAR SEARCH
if brightnessFilter == "bright" and "star" in search_name :
catalogueMatches = self . _bright_star_match ( matchedObjects = catalogueMatches , catalogueName = catalogueName , lowerMagnitudeLimit = lowerMagnitudeLimit , magnitudeLimitFilter = searchPara [ "mag column" ] )
if brightnessFilter == "general" and "galaxy" in search_name and "galaxy-like" not in search_name and "physical radius kpc" not in theseSearchPara :
catalogueMatches = self . _galaxy_association_cuts ( matchedObjects = catalogueMatches , catalogueName = catalogueName , lowerMagnitudeLimit = lowerMagnitudeLimit , upperMagnitudeLimit = upperMagnitudeLimit , magnitudeLimitFilter = searchPara [ "mag column" ] )
if "match nearest source only" in theseSearchPara and theseSearchPara [ "match nearest source only" ] == True and len ( catalogueMatches ) :
nearestMatches = [ ]
transList = [ ]
for c in catalogueMatches :
if c [ "transient_object_id" ] not in transList :
transList . append ( c [ "transient_object_id" ] )
nearestMatches . append ( c )
catalogueMatches = nearestMatches
self . log . debug ( 'completed the ``angular_crossmatch_against_catalogue`` method' )
self . log . debug ( "FINISHED %s SEARCH IN %0.5f s" % ( search_name , time . time ( ) - start_time , ) )
return catalogueMatches |
def pack ( header , s ) :
"""Pack a string into MXImageRecord .
Parameters
header : IRHeader
Header of the image record .
` ` header . label ` ` can be a number or an array . See more detail in ` ` IRHeader ` ` .
s : str
Raw image string to be packed .
Returns
s : str
The packed string .
Examples
> > > label = 4 # label can also be a 1 - D array , for example : label = [ 1,2,3]
> > > id = 2574
> > > header = mx . recordio . IRHeader ( 0 , label , id , 0)
> > > with open ( path , ' r ' ) as file :
. . . s = file . read ( )
> > > packed _ s = mx . recordio . pack ( header , s )""" | header = IRHeader ( * header )
if isinstance ( header . label , numbers . Number ) :
header = header . _replace ( flag = 0 )
else :
label = np . asarray ( header . label , dtype = np . float32 )
header = header . _replace ( flag = label . size , label = 0 )
s = label . tostring ( ) + s
s = struct . pack ( _IR_FORMAT , * header ) + s
return s |
def do_print ( self , url_data ) :
"""Determine if URL entry should be logged or not .""" | if self . verbose :
return True
if self . warnings and url_data . warnings :
return True
return not url_data . valid |
def onEnable ( self ) :
"""The configuration containing this function has been enabled by host .
Endpoints become working files , so submit some read operations .""" | trace ( 'onEnable' )
self . _disable ( )
self . _aio_context . submit ( self . _aio_recv_block_list )
self . _real_onCanSend ( )
self . _enabled = True |
def calculate_dimensions ( image_size , desired_size ) :
"""Return the Tuple with the arguments to pass to Image . crop .
If the image is smaller than than the desired _ size Don ' t do
anything . Otherwise , first calculate the ( truncated ) center and then
take half the width and height ( truncated again ) for x and y .
x0 , y0 : the center coordinates""" | current_x , current_y = image_size
target_x , target_y = desired_size
if current_x < target_x and current_y < target_y :
return None
if current_x > target_x :
new_x0 = floor ( current_x / 2 )
new_x = new_x0 - ceil ( target_x / 2 )
new_width = target_x
else :
new_x = 0
new_width = current_x
if current_y > target_y :
new_y0 = floor ( current_y / 2 )
new_y = new_y0 - ceil ( target_y / 2 )
new_height = target_y
else :
new_y = 0
new_height = current_y
return ( int ( new_x ) , int ( new_y ) , new_width , new_height ) |
def run_command ( cmd , out , ignore_errors = False ) :
"""We want to both send subprocess output to stdout or another file
descriptor as the subprocess runs , * and * capture the actual exception
message on errors . CalledProcessErrors do not reliably contain the
underlying exception in either the ' message ' or ' out ' attributes , so
we tee the stderr to a temporary file and if a CalledProcessError is
raised we read its contents to recover stderr""" | tempdir = tempfile . mkdtemp ( )
output_file = os . path . join ( tempdir , "stderr" )
original_cmd = " " . join ( cmd )
p = subprocess . Popen ( cmd , stdout = out , stderr = subprocess . PIPE )
t = subprocess . Popen ( [ "tee" , output_file ] , stdin = p . stderr , stdout = out )
t . wait ( )
p . communicate ( )
p . stderr . close ( )
if p . returncode != 0 and not ignore_errors :
with open ( output_file , "r" ) as output :
error = output . read ( )
message = 'Command: "{}": Error: "{}"' . format ( original_cmd , error . replace ( "\n" , "" ) )
shutil . rmtree ( tempdir , ignore_errors = True )
raise CommandError ( message )
shutil . rmtree ( tempdir , ignore_errors = True )
return p . returncode |
def create_vip ( self , vip_request_ids ) :
"""Method to create vip request
param vip _ request _ ids : vip _ request ids""" | uri = 'api/v3/vip-request/deploy/%s/' % vip_request_ids
return super ( ApiVipRequest , self ) . post ( uri ) |
def sameState ( s1 , s2 ) :
"""sameState ( s1 , s2)
Note :
state : = [ nfaclosure : Long , [ arc ] , accept : Boolean ]
arc : = [ label , arrow : Int , nfaClosure : Long ]""" | if ( len ( s1 [ 1 ] ) != len ( s2 [ 1 ] ) ) or ( s1 [ 2 ] != s2 [ 2 ] ) :
return False
for arcIndex in range ( 0 , len ( s1 [ 1 ] ) ) :
arc1 = s1 [ 1 ] [ arcIndex ]
arc2 = s2 [ 1 ] [ arcIndex ]
if arc1 [ : - 1 ] != arc2 [ : - 1 ] :
return False
return True |
def checked ( self , value ) :
"""Setter for * * self . _ _ checked * * attribute .
: param value : Attribute value .
: type value : bool""" | if value is not None :
assert type ( value ) is bool , "'{0}' attribute: '{1}' type is not 'bool'!" . format ( "checked" , value )
self . set_checked ( value ) |
def _traverse_dict ( self , input_dict , resolution_data , resolver_method ) :
"""Traverse a dictionary to resolve intrinsic functions on every value
: param input _ dict : Input dictionary to traverse
: param resolution _ data : Data that the ` resolver _ method ` needs to operate
: param resolver _ method : Method that can actually resolve an intrinsic function , if it detects one
: return : Modified dictionary with values resolved""" | for key , value in input_dict . items ( ) :
input_dict [ key ] = self . _traverse ( value , resolution_data , resolver_method )
return input_dict |
def get_send_command ( self , send ) :
"""Internal helper function to get command that ' s really sent""" | shutit_global . shutit_global_object . yield_to_draw ( )
if send is None :
return send
cmd_arr = send . split ( )
if cmd_arr and cmd_arr [ 0 ] in ( 'md5sum' , 'sed' , 'head' ) :
newcmd = self . get_command ( cmd_arr [ 0 ] )
send = send . replace ( cmd_arr [ 0 ] , newcmd )
return send |
def gstd ( data , channels = None ) :
"""Calculate the geometric std . dev . of the events in an FCSData object .
Parameters
data : FCSData or numpy array
NxD flow cytometry data where N is the number of events and D is
the number of parameters ( aka channels ) .
channels : int or str or list of int or list of str , optional
Channels on which to calculate the statistic . If None , use all
channels .
Returns
float or numpy array
The geometric standard deviation of the events in the specified
channels of ` data ` .""" | # Slice data to take statistics from
if channels is None :
data_stats = data
else :
data_stats = data [ : , channels ]
# Calculate and return statistic
return np . exp ( np . std ( np . log ( data_stats ) , axis = 0 ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.