signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def corrcoef ( time , crossf , integration_window = 0. ) :
"""Calculate the correlation coefficient for given auto - and crosscorrelation
functions . Standard settings yield the zero lag correlation coefficient .
Setting integration _ window > 0 yields the correlation coefficient of
integrated auto - and crosscorrelation functions . The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
time : numpy . ndarray
1 dim array of times corresponding to signal .
crossf : numpy . ndarray
Crosscorrelation functions , 1st axis first unit , 2nd axis second unit ,
3rd axis times .
integration _ window : float
Size of the integration window .
Returns
cc : numpy . ndarray
2 dim array of correlation coefficient between two units .""" | N = len ( crossf )
cc = np . zeros ( np . shape ( crossf ) [ : - 1 ] )
tbin = abs ( time [ 1 ] - time [ 0 ] )
lim = int ( integration_window / tbin )
if len ( time ) % 2 == 0 :
mid = len ( time ) / 2 - 1
else :
mid = np . floor ( len ( time ) / 2. )
for i in range ( N ) :
ai = np . sum ( crossf [ i , i ] [ mid - lim : mid + lim + 1 ] )
offset_autoi = np . mean ( crossf [ i , i ] [ : mid - 1 ] )
for j in range ( N ) :
cij = np . sum ( crossf [ i , j ] [ mid - lim : mid + lim + 1 ] )
offset_cross = np . mean ( crossf [ i , j ] [ : mid - 1 ] )
aj = np . sum ( crossf [ j , j ] [ mid - lim : mid + lim + 1 ] )
offset_autoj = np . mean ( crossf [ j , j ] [ : mid - 1 ] )
if ai > 0. and aj > 0. :
cc [ i , j ] = ( cij - offset_cross ) / np . sqrt ( ( ai - offset_autoi ) * ( aj - offset_autoj ) )
else :
cc [ i , j ] = 0.
return cc |
def parse_path ( path ) :
"""Parse a rfc 6901 path .""" | if not path :
raise ValueError ( "Invalid path" )
if isinstance ( path , str ) :
if path == "/" :
raise ValueError ( "Invalid path" )
if path [ 0 ] != "/" :
raise ValueError ( "Invalid path" )
return path . split ( _PATH_SEP ) [ 1 : ]
elif isinstance ( path , ( tuple , list ) ) :
return path
else :
raise ValueError ( "A path must be a string, tuple or list" ) |
def ReadStoredProcedures ( self , collection_link , options = None ) :
"""Reads all store procedures in a collection .
: param str collection _ link :
The link to the document collection .
: param dict options :
The request options for the request .
: return :
Query Iterable of Stored Procedures .
: rtype :
query _ iterable . QueryIterable""" | if options is None :
options = { }
return self . QueryStoredProcedures ( collection_link , None , options ) |
def get_field_def ( schema , # type : GraphQLSchema
parent_type , # type : Union [ GraphQLInterfaceType , GraphQLObjectType ]
field_ast , # type : Field
) : # type : ( . . . ) - > Optional [ GraphQLField ]
"""Not exactly the same as the executor ' s definition of get _ field _ def , in this
statically evaluated environment we do not always have an Object type ,
and need to handle Interface and Union types .""" | name = field_ast . name . value
if name == "__schema" and schema . get_query_type ( ) == parent_type :
return SchemaMetaFieldDef
elif name == "__type" and schema . get_query_type ( ) == parent_type :
return TypeMetaFieldDef
elif name == "__typename" and isinstance ( parent_type , ( GraphQLObjectType , GraphQLInterfaceType , GraphQLUnionType ) ) :
return TypeNameMetaFieldDef
elif isinstance ( parent_type , ( GraphQLObjectType , GraphQLInterfaceType ) ) :
return parent_type . fields . get ( name ) |
def calculateSignature ( privateSigningKey , message ) :
""": type privateSigningKey : ECPrivateKey
: type message : bytearray""" | if privateSigningKey . getType ( ) == Curve . DJB_TYPE :
rand = os . urandom ( 64 )
res = _curve . calculateSignature ( rand , privateSigningKey . getPrivateKey ( ) , message )
return res
else :
raise InvalidKeyException ( "Unknown type: %s" % privateSigningKey . getType ( ) ) |
def cut ( self ) :
"""Cut text""" | self . truncate_selection ( self . header_end_pos )
if self . has_selected_text ( ) :
CodeEditor . cut ( self ) |
def nbody_separation ( expr , qs ) :
"""Convert n - body problem to 2 - body problem .
Args :
expr : sympy expressions to be separated .
qs : sympy ' s symbols to be used as supplementary variable .
Return :
new _ expr ( sympy expr ) , constraints ( sympy expr ) , mapping ( dict ( str , str - > Symbol ) ) :
` new _ expr ` is converted problem , ` constraints ` is constraints for supplementary variable .
You may use ` expr = new _ expr + delta * constraints ` , delta is floating point variable .
mapping is supplementary variable ' s mapping .""" | try :
import sympy
except ImportError :
raise ImportError ( "This function requires sympy. Please install it." )
logging . debug ( expr )
free_symbols = expr . free_symbols
logging . debug ( free_symbols )
assert type ( expr ) == sympy . Add
logging . debug ( expr . args )
mapping = { }
new_expr = sympy . expand ( 0 )
constraints = sympy . expand ( 0 )
i_var = 0
for arg in expr . args :
if isinstance ( arg , sympy . Symbol ) :
new_expr += arg
continue
if not arg . free_symbols :
new_expr += arg
continue
assert type ( arg ) == sympy . Mul
syms = arg . free_symbols . copy ( )
while len ( syms ) > 2 :
it = iter ( syms )
for v1 , v2 in zip ( it , it ) :
if ( str ( v1 ) , str ( v2 ) ) in mapping :
v = mapping [ str ( v1 ) , str ( v2 ) ]
logging . debug ( f"{v1}*{v2} -> {v} (Existed variable)" )
else :
v = qs [ i_var ]
i_var += 1
mapping [ ( str ( v1 ) , str ( v2 ) ) ] = v
logging . debug ( f"{v1}*{v2} -> {v} (New variable)" )
constraints += 3 * v + v1 * v2 - 2 * v1 * v - 2 * v2 * v
logging . debug ( f"constraints: {constraints}" )
arg = arg . subs ( v1 * v2 , v )
syms = arg . free_symbols . copy ( )
new_expr += arg
logging . debug ( f"new_expr: {new_expr}" )
return new_expr , constraints , mapping |
def parse_args ( argv = None ) :
"""Parse command line options .""" | parser = ArgumentParser ( )
parser . add_argument ( '--replay-file' , dest = "replay_file" , type = str , required = True )
options = parser . parse_args ( argv )
return options |
def get ( self , build_record_id , ** kwargs ) :
"""Get Build Record Push Result by Id . .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . get ( build _ record _ id , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param int build _ record _ id : Build Record id ( required )
: return : BuildRecordPushResultRest
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . get_with_http_info ( build_record_id , ** kwargs )
else :
( data ) = self . get_with_http_info ( build_record_id , ** kwargs )
return data |
def set_default_moe_hparams ( hparams ) :
"""Add necessary hyperparameters for mixture - of - experts .""" | hparams . moe_num_experts = 16
hparams . moe_loss_coef = 1e-2
hparams . add_hparam ( "moe_gating" , "top_2" )
# Experts have fixed capacity per batch . We need some extra capacity
# in case gating is not perfectly balanced .
# moe _ capacity _ factor _ * should be set to a value > = 1.
hparams . add_hparam ( "moe_capacity_factor_train" , 1.25 )
hparams . add_hparam ( "moe_capacity_factor_eval" , 2.0 )
hparams . add_hparam ( "moe_capacity_factor_second_level" , 1.0 )
# Each expert has a hidden layer with this size .
hparams . add_hparam ( "moe_hidden_size" , 4096 )
# For gating , divide inputs into groups of this size before gating .
# Each group sends the same number of inputs to each expert .
# Ideally , the group size would be the whole batch , but this is expensive
# due to our use of matrix multiplication for reordering .
hparams . add_hparam ( "moe_group_size" , 1024 )
# For top _ 2 gating , whether to impose an additional loss in order to make
# the experts equally used as the second - place expert .
hparams . add_hparam ( "moe_use_second_place_loss" , 0 )
# In top _ 2 gating , policy for whether to use a second - place expert .
# Legal values are :
# " all " : always
# " none " : never
# " threshold " : if gate value > the given threshold
# " random " : if gate value > threshold * random _ uniform ( 0,1)
hparams . add_hparam ( "moe_second_policy_train" , "random" )
hparams . add_hparam ( "moe_second_policy_eval" , "random" )
hparams . add_hparam ( "moe_second_threshold_train" , 0.2 )
hparams . add_hparam ( "moe_second_threshold_eval" , 0.2 ) |
def height_to_geopotential ( height ) :
r"""Compute geopotential for a given height .
Parameters
height : ` pint . Quantity `
Height above sea level ( array _ like )
Returns
` pint . Quantity `
The corresponding geopotential value ( s )
Examples
> > > from metpy . constants import g , G , me , Re
> > > import metpy . calc
> > > from metpy . units import units
> > > height = np . linspace ( 0,10000 , num = 11 ) * units . m
> > > geopot = metpy . calc . height _ to _ geopotential ( height )
> > > geopot
< Quantity ( [ 0 . 9817.46806283 19631.85526579 29443.16305888
39251.39289118 49056.54621087 58858.62446525 68657.62910064
78453.56156253 88246.42329545 98036.21574306 ] , ' meter * * 2 / second * * 2 ' ) >
Notes
Derived from definition of geopotential in [ Hobbs2006 ] _ pg . 14 Eq . 1.8.""" | # Calculate geopotential
geopot = mpconsts . G * mpconsts . me * ( ( 1 / mpconsts . Re ) - ( 1 / ( mpconsts . Re + height ) ) )
return geopot |
def filter_leader_files ( cluster_config , broker_files ) :
"""Given a list of broker files , filters out all the files that
are in the replicas .
: param cluster _ config : the cluster
: type cluster _ config : kafka _ utils . utils . config . ClusterConfig
: param broker _ files : the broker files
: type broker _ files : list of ( b _ id , host , [ file _ path , file _ path . . . ] ) tuples
: returns : the filtered list
: rtype : list of ( broker _ id , host , [ file _ path , file _ path . . . ] ) tuples""" | print ( "Filtering leaders" )
leader_of = get_partition_leaders ( cluster_config )
result = [ ]
for broker , host , files in broker_files :
filtered = [ ]
for file_path in files :
tp = get_tp_from_file ( file_path )
if tp not in leader_of or leader_of [ tp ] == broker :
filtered . append ( file_path )
result . append ( ( broker , host , filtered ) )
print ( "Broker: {broker}, leader of {l_count} over {f_count} files" . format ( broker = broker , l_count = len ( filtered ) , f_count = len ( files ) , ) )
return result |
def connect ( self , server , port = 6667 ) :
"""Connects to a given IRC server . After the connection is established , it calls
the on _ connect event handler .""" | self . socket . connect ( ( server , port ) )
self . lines = self . _read_lines ( )
for event_handler in list ( self . on_connect ) :
event_handler ( self ) |
def _handle_duplicate_sources ( self , vt , sources ) :
"""Handles duplicate sources generated by the given gen target by either failure or deletion .
This method should be called after all dependencies have been injected into the graph , but
before injecting the synthetic version of this target .
Returns a boolean indicating whether it modified the underlying filesystem .
NB ( gm ) : Some code generators may re - generate code that their dependent libraries generate .
This results in targets claiming to generate sources that they really don ' t , so we try to
filter out sources that were actually generated by dependencies of the target . This causes
the code generated by the dependencies to ' win ' over the code generated by dependees . By
default , this behavior is disabled , and duplication in generated sources will raise a
TaskError . This is controlled by the - - allow - dups flag .""" | target = vt . target
target_workdir = vt . results_dir
# Walk dependency gentargets and record any sources owned by those targets that are also
# owned by this target .
duplicates_by_target = OrderedDict ( )
def record_duplicates ( dep ) :
if dep == target or not self . is_gentarget ( dep . concrete_derived_from ) :
return False
duped_sources = [ s for s in dep . sources_relative_to_source_root ( ) if s in sources . files and not self . ignore_dup ( target , dep , s ) ]
if duped_sources :
duplicates_by_target [ dep ] = duped_sources
target . walk ( record_duplicates )
# If there were no dupes , we ' re done .
if not duplicates_by_target :
return False
# If there were duplicates warn or error .
messages = [ '{target} generated sources that had already been generated by dependencies.' . format ( target = target . address . spec ) ]
for dep , duped_sources in duplicates_by_target . items ( ) :
messages . append ( '\t{} also generated:' . format ( dep . concrete_derived_from . address . spec ) )
messages . extend ( [ '\t\t{}' . format ( source ) for source in duped_sources ] )
message = '\n' . join ( messages )
if self . get_options ( ) . allow_dups :
logger . warn ( message )
else :
raise self . DuplicateSourceError ( message )
did_modify = False
# Finally , remove duplicates from the workdir . This prevents us from having to worry
# about them during future incremental compiles .
for dep , duped_sources in duplicates_by_target . items ( ) :
for duped_source in duped_sources :
safe_delete ( os . path . join ( target_workdir , duped_source ) )
did_modify = True
if did_modify :
Digest . clear ( vt . current_results_dir )
return did_modify |
def GET ( self , * args , ** kwargs ) :
"""GET request""" | return self . _handle_api ( self . API_GET , args , kwargs ) |
def report_stats ( self ) :
"""Create the dict of stats data for the MCP stats queue""" | if not self . previous :
self . previous = dict ( )
for key in self . counters :
self . previous [ key ] = 0
values = { 'name' : self . name , 'consumer_name' : self . consumer_name , 'counts' : dict ( self . counters ) , 'previous' : dict ( self . previous ) }
self . previous = dict ( self . counters )
return values |
def get_role ( role_id , ** kwargs ) :
"""Get a role by its ID .""" | try :
role = db . DBSession . query ( Role ) . filter ( Role . id == role_id ) . one ( )
return role
except NoResultFound :
raise HydraError ( "Role not found (role_id={})" . format ( role_id ) ) |
def _setup_source_and_destination ( self ) :
"""use the base class to setup the source and destinations but add to
that setup the instantiation of the " new _ crash _ source " """ | super ( FetchTransformSaveWithSeparateNewCrashSourceApp , self ) . _setup_source_and_destination ( )
if self . config . new_crash_source . new_crash_source_class :
self . new_crash_source = self . config . new_crash_source . new_crash_source_class ( self . config . new_crash_source , name = self . app_instance_name , quit_check_callback = self . quit_check )
else : # the configuration failed to provide a " new _ crash _ source " , fall
# back to tying the " new _ crash _ source " to the " source " .
self . new_crash_source = self . source |
def to_vobject ( self , filename = None , uid = None ) :
"""Return iCal object of Remind lines
If filename and UID are specified , the vObject only contains that event .
If only a filename is specified , the vObject contains all events in the file .
Otherwise the vObject contains all all objects of all files associated with the Remind object .
filename - - the remind file
uid - - the UID of the Remind line""" | self . _update ( )
cal = iCalendar ( )
if uid :
self . _gen_vevent ( self . _reminders [ filename ] [ uid ] , cal . add ( 'vevent' ) )
elif filename :
for event in self . _reminders [ filename ] . values ( ) :
self . _gen_vevent ( event , cal . add ( 'vevent' ) )
else :
for filename in self . _reminders :
for event in self . _reminders [ filename ] . values ( ) :
self . _gen_vevent ( event , cal . add ( 'vevent' ) )
return cal |
def parse_definitions ( definitions ) :
"""Parses a list of macro definitions and returns a " symbol table "
as a dictionary .
: params definitions :
A list of command line macro definitions .
Each item in the list should be in one of these two formats :
* < variable > = < value >
* < variable >
: return :
` ` dict ` ` as symbol table or raises an exception thrown by
: func : ` ` parse _ definition _ expr ` ` .
Usage : :
> > > parse _ definitions ( [ ' DEBUG = 1 ' ] )
{ ' DEBUG ' : 1}
> > > parse _ definitions ( [ ' FOOBAR = 0x40 ' , ' DEBUG = false ' ] )
{ ' DEBUG ' : False , ' FOOBAR ' : 64}
> > > parse _ definitions ( [ ' FOOBAR = whatever ' ] )
{ ' FOOBAR ' : ' whatever ' }
> > > parse _ definitions ( [ ' FOOBAR ' ] )
{ ' FOOBAR ' : None }
> > > parse _ definitions ( [ ' FOOBAR = ah = 3 ' ] )
{ ' FOOBAR ' : ' ah = 3 ' }
> > > parse _ definitions ( None )
> > > parse _ definitions ( [ ] )""" | defines = { }
if definitions :
for definition in definitions :
define , value = parse_definition_expr ( definition , default_value = None )
defines [ define ] = value
return defines |
def from_config ( cls , name , config ) :
"""Behaves like the base Configurable class ' s ` from _ config ( ) ` except this
makes sure that the ` Pluggable ` subclass with the given name is
actually a properly installed plugin first .""" | installed_classes = cls . get_installed_classes ( )
if name not in installed_classes :
raise ValueError ( "Unknown/unavailable %s" % cls . __name__ . lower ( ) )
pluggable_class = installed_classes [ name ]
pluggable_class . validate_config ( config )
instance = pluggable_class ( )
if not instance . name :
instance . name = name
instance . apply_config ( config )
return instance |
def get ( self , location_name ) :
"""Get a contact address by location name
: param str location _ name : name of location
: return : return contact address element or None
: rtype : ContactAddress""" | location_ref = location_helper ( location_name , search_only = True )
if location_ref :
for location in self :
if location . location_ref == location_ref :
return location |
def wait_stopped ( self , timeout = None , force = False ) :
"""Wait for the thread to stop .
You must have previously called signal _ stop or this function will
hang .
Args :
timeout ( float ) : The maximum time to wait for the thread to stop
before raising a TimeoutExpiredError . If force is True ,
TimeoutExpiredError is not raised and the thread is just
marked as a daemon thread so that it does not block cleanly
exiting the process .
force ( bool ) : If true and the thread does not exit in timeout seconds
no error is raised since the thread is marked as daemon and will
be killed when the process exits .""" | self . join ( timeout )
if self . is_alive ( ) and force is False :
raise TimeoutExpiredError ( "Error waiting for background thread to exit" , timeout = timeout ) |
def sync_ldap_groups ( self , ldap_groups ) :
"""Synchronize LDAP groups with local group model .""" | groupname_field = 'name'
self . stats_group_total = len ( ldap_groups )
for cname , ldap_attributes in ldap_groups :
defaults = { }
try :
for name , attribute in ldap_attributes . items ( ) :
defaults [ self . conf_LDAP_SYNC_GROUP_ATTRIBUTES [ name ] ] = attribute [ 0 ] . decode ( 'utf-8' )
except AttributeError : # In some cases attrs is a list instead of a dict ; skip these invalid groups
continue
try :
groupname = defaults [ groupname_field ]
except KeyError :
logger . warning ( "Group is missing a required attribute '%s'" % groupname_field )
self . stats_group_errors += 1
continue
kwargs = { groupname_field + '__iexact' : groupname , 'defaults' : defaults , }
try :
group , created = Group . objects . get_or_create ( ** kwargs )
except ( IntegrityError , DataError ) as e :
logger . error ( "Error creating group %s: %s" % ( groupname , e ) )
self . stats_group_errors += 1
else :
if created :
self . stats_group_added += 1
logger . debug ( "Created group %s" % groupname )
logger . info ( "Groups are synchronized" ) |
def position_target_global_int_encode ( self , time_boot_ms , coordinate_frame , type_mask , lat_int , lon_int , alt , vx , vy , vz , afx , afy , afz , yaw , yaw_rate ) :
'''Reports the current commanded vehicle position , velocity , and
acceleration as specified by the autopilot . This
should match the commands sent in
SET _ POSITION _ TARGET _ GLOBAL _ INT if the vehicle is being
controlled this way .
time _ boot _ ms : Timestamp in milliseconds since system boot . The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint . This allows the system to compensate processing latency . ( uint32 _ t )
coordinate _ frame : Valid options are : MAV _ FRAME _ GLOBAL _ INT = 5 , MAV _ FRAME _ GLOBAL _ RELATIVE _ ALT _ INT = 6 , MAV _ FRAME _ GLOBAL _ TERRAIN _ ALT _ INT = 11 ( uint8 _ t )
type _ mask : Bitmask to indicate which dimensions should be ignored by the vehicle : a value of 0b00000 or 0b00000100000 indicates that none of the setpoint dimensions should be ignored . If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration . Mapping : bit 1 : x , bit 2 : y , bit 3 : z , bit 4 : vx , bit 5 : vy , bit 6 : vz , bit 7 : ax , bit 8 : ay , bit 9 : az , bit 10 : is force setpoint , bit 11 : yaw , bit 12 : yaw rate ( uint16 _ t )
lat _ int : X Position in WGS84 frame in 1e7 * meters ( int32 _ t )
lon _ int : Y Position in WGS84 frame in 1e7 * meters ( int32 _ t )
alt : Altitude in meters in AMSL altitude , not WGS84 if absolute or relative , above terrain if GLOBAL _ TERRAIN _ ALT _ INT ( float )
vx : X velocity in NED frame in meter / s ( float )
vy : Y velocity in NED frame in meter / s ( float )
vz : Z velocity in NED frame in meter / s ( float )
afx : X acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afy : Y acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
afz : Z acceleration or force ( if bit 10 of type _ mask is set ) in NED frame in meter / s ^ 2 or N ( float )
yaw : yaw setpoint in rad ( float )
yaw _ rate : yaw rate setpoint in rad / s ( float )''' | return MAVLink_position_target_global_int_message ( time_boot_ms , coordinate_frame , type_mask , lat_int , lon_int , alt , vx , vy , vz , afx , afy , afz , yaw , yaw_rate ) |
def set_guid ( self ) :
"""Parses guid and set value""" | try :
self . guid = self . soup . find ( 'guid' ) . string
except AttributeError :
self . guid = None |
def QA_util_date_gap ( date , gap , methods ) :
''': param date : 字符串起始日 类型 str eg : 2018-11-11
: param gap : 整数 间隔多数个交易日
: param methods : gt大于 , gte 大于等于 , 小于lt , 小于等于lte , 等于 = = =
: return : 字符串 eg : 2000-01-01''' | try :
if methods in [ '>' , 'gt' ] :
return trade_date_sse [ trade_date_sse . index ( date ) + gap ]
elif methods in [ '>=' , 'gte' ] :
return trade_date_sse [ trade_date_sse . index ( date ) + gap - 1 ]
elif methods in [ '<' , 'lt' ] :
return trade_date_sse [ trade_date_sse . index ( date ) - gap ]
elif methods in [ '<=' , 'lte' ] :
return trade_date_sse [ trade_date_sse . index ( date ) - gap + 1 ]
elif methods in [ '==' , '=' , 'eq' ] :
return date
except :
return 'wrong date' |
def s_to_ev ( offset_us , source_to_detector_m , array ) :
"""convert time ( s ) to energy ( eV )
Parameters :
numpy array of time in s
offset _ us : float . Delay of detector in us
source _ to _ detector _ m : float . Distance source to detector in m
Returns :
numpy array of energy in eV""" | lambda_a = 3956. * ( array + offset_us * 1e-6 ) / source_to_detector_m
return ( 81.787 / pow ( lambda_a , 2 ) ) / 1000. |
def _init_groups ( self , string ) :
"""Extracts weather groups ( FM , PROB etc . ) and populates group list
Args :
TAF report string
Raises :
MalformedTAF : Group decoding error""" | taf_group_pattern = """
(?:FM|(?:PROB(?:\d{1,2})\s*(?:TEMPO)?)|TEMPO|BECMG|[\S\s])[A-Z0-9\+\-/\s$]+?(?=FM|PROB|TEMPO|BECMG|$)
"""
group_list = [ ]
groups = re . findall ( taf_group_pattern , string , re . VERBOSE )
if not groups :
raise MalformedTAF ( "No valid groups found" )
for group in groups :
group_list . append ( group )
return ( group_list ) |
def sample_lists ( items_list , num = 1 , seed = None ) :
r"""Args :
items _ list ( list ) :
num ( int ) : ( default = 1)
seed ( None ) : ( default = None )
Returns :
list : samples _ list
CommandLine :
python - m utool . util _ list - - exec - sample _ lists
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > items _ list = [ [ ] , [ 1 , 2 , 3 ] , [ 4 ] , [ 5 , 6 ] , [ 7 , 8 , 9 , 10 ] ]
> > > num = 2
> > > seed = 0
> > > samples _ list = sample _ lists ( items _ list , num , seed )
> > > result = ( ' samples _ list = % s ' % ( str ( samples _ list ) , ) )
> > > print ( result )
samples _ list = [ [ ] , [ 3 , 2 ] , [ 4 ] , [ 5 , 6 ] , [ 10 , 9 ] ]""" | if seed is not None :
rng = np . random . RandomState ( seed )
else :
rng = np . random
def random_choice ( items , num ) :
size = min ( len ( items ) , num )
return rng . choice ( items , size , replace = False ) . tolist ( )
samples_list = [ random_choice ( items , num ) if len ( items ) > 0 else [ ] for items in items_list ]
return samples_list |
def reset ( self ) -> None :
"""Clear all data in file storage .""" | self . close ( )
for f in os . listdir ( self . dataDir ) :
os . remove ( os . path . join ( self . dataDir , f ) )
self . _useLatestChunk ( ) |
def _get_style_of_faulting_term ( self , C , rake ) :
"""Returns the style of faulting factor""" | f_n , f_r = self . _get_fault_type_dummy_variables ( rake )
return C [ 'C6' ] * f_n + C [ 'C7' ] * f_r |
def return_search_summary ( start_time = 0 , end_time = 0 , nevents = 0 , ifos = None , ** kwargs ) :
"""Function to create a SearchSummary object where all columns are populated
but all are set to values that test False ( ie . strings to ' ' , floats / ints
to 0 , . . . ) . This avoids errors when you try to create a table containing
columns you don ' t care about , but which still need populating . NOTE : This
will also produce a process _ id with 0 values . For most applications these
should be set to their correct values .
It then populates columns if given them as options .
Returns
lsctables . SeachSummary
The " empty " SearchSummary object .""" | if ifos is None :
ifos = [ ]
# create an empty search summary
search_summary = lsctables . SearchSummary ( )
cols = lsctables . SearchSummaryTable . validcolumns
for entry in cols . keys ( ) :
if cols [ entry ] in [ 'real_4' , 'real_8' ] :
setattr ( search_summary , entry , 0. )
elif cols [ entry ] == 'int_4s' :
setattr ( search_summary , entry , 0 )
elif cols [ entry ] == 'lstring' :
setattr ( search_summary , entry , '' )
elif entry == 'process_id' :
search_summary . process_id = ilwd . ilwdchar ( "process:process_id:0" )
else :
raise ValueError ( "Column %s not recognized" % ( entry ) )
# fill in columns
if len ( ifos ) :
search_summary . ifos = ',' . join ( ifos )
if nevents :
search_summary . nevents = nevents
if start_time and end_time :
search_summary . in_start_time = int ( start_time )
search_summary . in_start_time_ns = int ( start_time % 1 * 1e9 )
search_summary . in_end_time = int ( end_time )
search_summary . in_end_time_ns = int ( end_time % 1 * 1e9 )
search_summary . out_start_time = int ( start_time )
search_summary . out_start_time_ns = int ( start_time % 1 * 1e9 )
search_summary . out_end_time = int ( end_time )
search_summary . out_end_time_ns = int ( end_time % 1 * 1e9 )
return search_summary |
def get_sentence ( self , offset : int ) -> BioCSentence or None :
"""Gets sentence with specified offset
Args :
offset : sentence offset
Return :
the sentence with specified offset""" | for sentence in self . sentences :
if sentence . offset == offset :
return sentence
return None |
def _process_elem_text ( elem , dic , subdic , text = "@text" , ** options ) :
""": param elem : ET Element object which has elem . text
: param dic : < container > ( dict [ - like ] ) object converted from elem
: param subdic : Sub < container > object converted from elem
: param options :
Keyword options , see the description of : func : ` elem _ to _ container ` for
more details .
: return : None but updating elem . text , dic and subdic as side effects""" | elem . text = elem . text . strip ( )
if elem . text :
etext = _parse_text ( elem . text , ** options )
if len ( elem ) or elem . attrib :
subdic [ text ] = etext
else :
dic [ elem . tag ] = etext |
def wait_for_jobs ( self , job_ids , timeout , delay ) :
"""Waits until the jobs appears in the completed job queue .""" | if self . skip :
return
logger . debug ( "Waiting up to %d sec for completion of the job IDs %s" , timeout , job_ids )
remaining_job_ids = set ( job_ids )
found_jobs = [ ]
countdown = timeout
while countdown > 0 :
matched_jobs = self . find_jobs ( remaining_job_ids )
if matched_jobs :
remaining_job_ids . difference_update ( { job [ "id" ] for job in matched_jobs } )
found_jobs . extend ( matched_jobs )
if not remaining_job_ids :
return found_jobs
time . sleep ( delay )
countdown -= delay
logger . error ( "Timed out while waiting for completion of the job IDs %s. Results not updated." , list ( remaining_job_ids ) , ) |
def recognize_byte ( self , image , timeout = 10 ) :
"""Process a byte image buffer .""" | result = [ ]
alpr = subprocess . Popen ( self . _cmd , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . DEVNULL )
# send image
try : # pylint : disable = unused - variable
stdout , stderr = alpr . communicate ( input = image , timeout = 10 )
stdout = io . StringIO ( str ( stdout , 'utf-8' ) )
except subprocess . TimeoutExpired :
_LOGGER . error ( "Alpr process timeout!" )
alpr . kill ( )
return None
tmp_res = { }
while True :
line = stdout . readline ( )
if not line :
if len ( tmp_res ) > 0 :
result . append ( tmp_res )
break
new_plate = self . __re_plate . search ( line )
new_result = self . __re_result . search ( line )
# found a new plate
if new_plate and len ( tmp_res ) > 0 :
result . append ( tmp_res )
tmp_res = { }
continue
# found plate result
if new_result :
try :
tmp_res [ new_result . group ( 1 ) ] = float ( new_result . group ( 2 ) )
except ValueError :
continue
_LOGGER . debug ( "Process alpr with result: %s" , result )
return result |
def listDatasetChildren ( self , dataset ) :
"""takes required dataset parameter
returns only children dataset name""" | if ( dataset == "" ) :
dbsExceptionHandler ( "dbsException-invalid-input" , "DBSDataset/listDatasetChildren. Parent Dataset name is required." )
conn = self . dbi . connection ( )
try :
result = self . datasetchildlist . execute ( conn , dataset )
return result
finally :
if conn :
conn . close ( ) |
def _minion_event ( self , load ) :
'''Receive an event from the minion and fire it on the master event
interface
: param dict load : The minion payload''' | load = self . __verify_load ( load , ( 'id' , 'tok' ) )
if load is False :
return { }
# Route to master event bus
self . masterapi . _minion_event ( load )
# Process locally
self . _handle_minion_event ( load ) |
def retrieve ( self ) :
"""Retrieves all data for this document and saves it .""" | data = self . resource ( self . id ) . get ( )
self . data = data
return data |
def make_asset_zip ( asset_dir_path , destination_directory = None ) :
"""Given an asset directory path , creates an asset zip file in the provided
destination directory
: param asset _ dir _ path : ( str ) path to the directory containing the asset
: param destination _ directory : ( str ) path to the destination directory for
the asset
: return : ( str ) Path to the asset zip file
: raises : AssetZipCreationError""" | log = logging . getLogger ( mod_logger + '.make_asset_zip' )
log . info ( 'Attempting to create an asset zip from directory: {d}' . format ( d = asset_dir_path ) )
# Ensure the path is a directory
if not os . path . isdir ( asset_dir_path ) :
raise AssetZipCreationError ( 'Provided asset_dir_path is not a directory: {d}' . format ( d = asset_dir_path ) )
# Determine a destination directory if not provided
if destination_directory is None :
destination_directory = os . path . join ( os . path . expanduser ( '~' ) , 'Downloads' )
mkdir_p ( destination_directory )
# Ensure the destination is a directory
if not os . path . isdir ( destination_directory ) :
raise AssetZipCreationError ( 'Provided destination_directory is not a directory: {d}' . format ( d = destination_directory ) )
# Validate the asset structure
try :
asset_name = validate_asset_structure ( asset_dir_path = asset_dir_path )
except Cons3rtAssetStructureError :
_ , ex , trace = sys . exc_info ( )
msg = 'Cons3rtAssetStructureError: Problem found in the asset structure: {d}\n{e}' . format ( d = asset_dir_path , e = str ( ex ) )
raise AssetZipCreationError , msg , trace
# Determine the asset zip file name ( same as asset name without spaces )
zip_file_name = 'asset-' + asset_name . replace ( ' ' , '' ) + '.zip'
log . info ( 'Using asset zip file name: {n}' . format ( n = zip_file_name ) )
# Determine the zip file path
zip_file_path = os . path . join ( destination_directory , zip_file_name )
# Remove existing zip file if it exists
if os . path . isfile ( zip_file_path ) :
log . info ( 'Removing existing asset zip file: {f}' . format ( f = zip_file_path ) )
os . remove ( zip_file_path )
# Attempt to create the zip
log . info ( 'Attempting to create asset zip file: {f}' . format ( f = zip_file_path ) )
try :
with contextlib . closing ( zipfile . ZipFile ( zip_file_path , 'w' , allowZip64 = True ) ) as zip_w :
for root , dirs , files in os . walk ( asset_dir_path ) :
for f in files :
skip = False
file_path = os . path . join ( root , f )
# Skip files in the ignore directories list
for ignore_dir in ignore_dirs :
if ignore_dir in file_path :
skip = True
break
# Skip file in the ignore files list
for ignore_file in ignore_files :
if f . startswith ( ignore_file ) :
skip = True
break
# Skip if the file ends with the specified extension
if ignore_by_extension ( item_path = file_path ) :
skip = True
if skip :
log . info ( 'Skipping file: {f}' . format ( f = file_path ) )
continue
log . info ( 'Adding file to zip: {f}' . format ( f = file_path ) )
archive_name = os . path . join ( root [ len ( asset_dir_path ) : ] , f )
if archive_name . startswith ( '/' ) :
log . debug ( 'Trimming the leading char: [/]' )
archive_name = archive_name [ 1 : ]
log . info ( 'Adding to archive as: {a}' . format ( a = archive_name ) )
zip_w . write ( file_path , archive_name )
except Exception :
_ , ex , trace = sys . exc_info ( )
msg = 'Unable to create zip file: {f}\n{e}' . format ( f = zip_file_path , e = str ( ex ) )
raise AssetZipCreationError , msg , trace
log . info ( 'Successfully created asset zip file: {f}' . format ( f = zip_file_path ) )
return zip_file_path |
def is_equal ( self , other ) :
"""Equality checker with message
: param other : Other Impact Function to be compared .
: type other : ImpactFunction
: returns : True if both are the same IF , other wise False and the
message .
: rtype : bool , str""" | properties = [ 'debug_mode' , 'use_rounding' , 'requested_extent' , 'crs' , 'analysis_extent' , 'datastore' , 'name' , 'title' , 'start_datetime' , 'end_datetime' , 'duration' , 'earthquake_function' , # ' performance _ log ' , # I don ' t think need we need this one
'hazard' , 'exposure' , 'aggregation' , # Output layers on new IF object will have a different provenance
# data with the one from original IF .
# ' impact ' ,
# ' exposure _ summary ' ,
# ' aggregate _ hazard _ impacted ' ,
# ' aggregation _ summary ' ,
# ' analysis _ impacted ' ,
# ' exposure _ summary _ table ' ,
'profiling' , ]
for if_property in properties : # Skip if it ' s debug mode for profiling
if self . debug_mode :
if if_property == 'profiling' :
continue
try :
property_a = getattr ( self , if_property )
property_b = getattr ( other , if_property )
if not isinstance ( property_a , type ( property_b ) ) :
message = ( 'Different type of property %s.\nA: %s\nB: %s' % ( if_property , type ( property_a ) , type ( property_b ) ) )
return False , message
if isinstance ( property_a , QgsMapLayer ) :
if byteify ( property_a . keywords ) != byteify ( property_b . keywords ) :
message = ( 'Keyword Layer is not equal is %s' % if_property )
return False , message
if isinstance ( property_a , QgsVectorLayer ) :
fields_a = [ f . name ( ) for f in property_a . fields ( ) ]
fields_b = [ f . name ( ) for f in property_b . fields ( ) ]
if fields_a != fields_b :
message = ( 'Layer fields is not equal for %s' % if_property )
return False , message
if ( property_a . featureCount ( ) != property_b . featureCount ( ) ) :
message = ( 'Feature count is not equal for %s' % if_property )
return False , message
elif isinstance ( property_a , QgsGeometry ) :
if not property_a . equals ( property_b ) :
string_a = property_a . asWkt ( )
string_b = property_b . asWkt ( )
message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % ( if_property , string_a , string_b ) )
return False , message
elif isinstance ( property_a , DataStore ) :
if property_a . uri_path != property_b . uri_path :
string_a = property_a . uri_path
string_b = property_b . uri_path
message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % ( if_property , string_a , string_b ) )
return False , message
else :
if property_a != property_b :
string_a = property_a
string_b = property_b
message = ( '[Non Layer] The not equal property is %s.\n' 'A: %s\nB: %s' % ( if_property , string_a , string_b ) )
return False , message
except AttributeError as e :
message = ( 'Property %s is not found. The exception is %s' % ( if_property , e ) )
return False , message
except IndexError as e :
if if_property == 'impact' :
continue
else :
message = ( 'Property %s is out of index. The exception is %s' % ( if_property , e ) )
return False , message
except Exception as e :
message = ( 'Error on %s with error message %s' % ( if_property , e ) )
return False , message
return True , '' |
def _duplicate_example ( self , request ) :
"""Duplicates the specified example .
Args :
request : A request that should contain ' index ' .
Returns :
An empty response .""" | index = int ( request . args . get ( 'index' ) )
if index >= len ( self . examples ) :
return http_util . Respond ( request , { 'error' : 'invalid index provided' } , 'application/json' , code = 400 )
new_example = self . example_class ( )
new_example . CopyFrom ( self . examples [ index ] )
self . examples . append ( new_example )
self . updated_example_indices . add ( len ( self . examples ) - 1 )
self . generate_sprite ( [ ex . SerializeToString ( ) for ex in self . examples ] )
return http_util . Respond ( request , { } , 'application/json' ) |
def closing ( input_rasterfilename , times ) :
"""Do closing .
Closing : Dilate firstly , then Erode .
Args :
input _ rasterfilename : input original raster image filename .
times : Erode and Dilate times .
Returns :
closing _ raster : raster image after close .""" | input_raster = RasterUtilClass . read_raster ( input_rasterfilename )
closing_raster = input_raster
for i in range ( times ) :
closing_raster = RasterUtilClass . raster_dilation ( closing_raster )
for i in range ( times ) :
closing_raster = RasterUtilClass . raster_erosion ( closing_raster )
return closing_raster |
def random_tickers ( length , n_tickers , endswith = None , letters = None , slicer = itertools . islice ) :
"""Generate a length - n _ tickers list of unique random ticker symbols .
Parameters
length : int
The length of each ticker string .
n _ tickers : int
Number of tickers to generate .
endswith : str , default None
Specify the ending element ( s ) of each ticker ( for example , ' X ' ) .
letters : sequence , default None
Sequence of possible letters to choose from . If None , defaults to
` string . ascii _ uppercase ` .
Returns
list of str
Examples
> > > from pyfinance import utils
> > > utils . random _ tickers ( length = 5 , n _ tickers = 4 , endswith = ' X ' )
[ ' UZTFX ' , ' ROYAX ' , ' ZBVIX ' , ' IUWYX ' ]
> > > utils . random _ tickers ( 3 , 8)
[ ' SBW ' , ' GDF ' , ' FOG ' , ' PWO ' , ' QDH ' , ' MJJ ' , ' YZD ' , ' QST ' ]""" | # The trick here is that we need uniqueness . That defeats the
# purpose of using NumPy because we need to generate 1x1.
# ( Although the alternative is just to generate a " large
# enough " duplicated sequence and prune from it . )
if letters is None :
letters = string . ascii_uppercase
if endswith : # Only generate substrings up to ` endswith `
length = length - len ( endswith )
join = "" . join
def yield_ticker ( rand = random . choices ) :
if endswith :
while True :
yield join ( rand ( letters , k = length ) ) + endswith
else :
while True :
yield join ( rand ( letters , k = length ) )
tickers = itertools . islice ( unique_everseen ( yield_ticker ( ) ) , n_tickers )
return list ( tickers ) |
def _safe_match_list ( inner_type , argument_value ) :
"""Represent the list of " inner _ type " objects in MATCH form .""" | stripped_type = strip_non_null_from_type ( inner_type )
if isinstance ( stripped_type , GraphQLList ) :
raise GraphQLInvalidArgumentError ( u'MATCH does not currently support nested lists, ' u'but inner type was {}: ' u'{}' . format ( inner_type , argument_value ) )
if not isinstance ( argument_value , list ) :
raise GraphQLInvalidArgumentError ( u'Attempting to represent a non-list as a list: ' u'{}' . format ( argument_value ) )
components = ( _safe_match_argument ( stripped_type , x ) for x in argument_value )
return u'[' + u',' . join ( components ) + u']' |
def release ( self ) :
"""Release the lock .""" | # This decrements the appropriate lock counters , and if the lock
# becomes free , it looks for a queued thread to hand it off to .
# By doing the handoff here we ensure fairness .
me = currentThread ( )
with self . _lock :
if self . is_exclusive :
if self . _exclusive_owner is not me :
raise RuntimeError ( "release() called on unheld lock" )
self . is_exclusive -= 1
if not self . is_exclusive :
self . _exclusive_owner = None
# If there are waiting shared locks , issue it to them
# all and then wake everyone up .
if self . _shared_queue :
for ( thread , waiter ) in self . _shared_queue :
self . is_shared += 1
self . _shared_owners [ thread ] = 1
waiter . notify ( )
del self . _shared_queue [ : ]
# Otherwise , if there are waiting exclusive locks ,
# they get first dibbs on the lock .
elif self . _exclusive_queue :
( thread , waiter ) = self . _exclusive_queue . pop ( 0 )
self . _exclusive_owner = thread
self . is_exclusive += 1
waiter . notify ( )
elif self . is_shared :
try :
self . _shared_owners [ me ] -= 1
if self . _shared_owners [ me ] == 0 :
del self . _shared_owners [ me ]
except KeyError :
raise RuntimeError ( "release() called on unheld lock" )
self . is_shared -= 1
if not self . is_shared : # If there are waiting exclusive locks ,
# they get first dibbs on the lock .
if self . _exclusive_queue :
( thread , waiter ) = self . _exclusive_queue . pop ( 0 )
self . _exclusive_owner = thread
self . is_exclusive += 1
waiter . notify ( )
else :
assert not self . _shared_queue
else :
raise RuntimeError ( "release() called on unheld lock" ) |
def radius_server_host_key ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
radius_server = ET . SubElement ( config , "radius-server" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
host = ET . SubElement ( radius_server , "host" )
hostname_key = ET . SubElement ( host , "hostname" )
hostname_key . text = kwargs . pop ( 'hostname' )
key = ET . SubElement ( host , "key" )
key . text = kwargs . pop ( 'key' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def parse_int ( value , base_unit = None ) :
"""> > > parse _ int ( ' 1 ' ) = = 1
True
> > > parse _ int ( ' 0x400 MB ' , ' 16384kB ' ) = = 64
True
> > > parse _ int ( ' 1MB ' , ' kB ' ) = = 1024
True
> > > parse _ int ( ' 1000 ms ' , ' s ' ) = = 1
True
> > > parse _ int ( ' 1GB ' , ' MB ' ) is None
True
> > > parse _ int ( 0 ) = = 0
True""" | convert = { 'kB' : { 'kB' : 1 , 'MB' : 1024 , 'GB' : 1024 * 1024 , 'TB' : 1024 * 1024 * 1024 } , 'ms' : { 'ms' : 1 , 's' : 1000 , 'min' : 1000 * 60 , 'h' : 1000 * 60 * 60 , 'd' : 1000 * 60 * 60 * 24 } , 's' : { 'ms' : - 1000 , 's' : 1 , 'min' : 60 , 'h' : 60 * 60 , 'd' : 60 * 60 * 24 } , 'min' : { 'ms' : - 1000 * 60 , 's' : - 60 , 'min' : 1 , 'h' : 60 , 'd' : 60 * 24 } }
value , unit = strtol ( value )
if value is not None :
if not unit :
return value
if base_unit and base_unit not in convert :
base_value , base_unit = strtol ( base_unit , False )
else :
base_value = 1
if base_unit in convert and unit in convert [ base_unit ] :
multiplier = convert [ base_unit ] [ unit ]
if multiplier < 0 :
value /= - multiplier
else :
value *= multiplier
return int ( value / base_value ) |
def role_list ( auth = None , ** kwargs ) :
'''List roles
CLI Example :
. . code - block : : bash
salt ' * ' keystoneng . role _ list
salt ' * ' keystoneng . role _ list domain _ id = b62e76fbeeff4e8fb77073f591cf211e''' | cloud = get_operator_cloud ( auth )
kwargs = _clean_kwargs ( ** kwargs )
return cloud . list_roles ( ** kwargs ) |
def inj_mass_pdf ( key , mass1 , mass2 , lomass , himass , lomass_2 = 0 , himass_2 = 0 ) :
'''Estimate the probability density based on the injection strategy
Parameters
key : string
Injection strategy
mass1 : array
First mass of the injections
mass2 : array
Second mass of the injections
lomass : float
Lower value of the mass distributions
himass : float
higher value of the mass distribution
Returns
pdf : array
Probability density of the injections''' | mass1 , mass2 = np . array ( mass1 ) , np . array ( mass2 )
if key == 'totalMass' : # Returns the PDF of mass when total mass is uniformly distributed .
# Both the component masses have the same distribution for this case .
# Parameters
# lomass : lower component mass
# himass : higher component mass
bound = np . sign ( ( lomass + himass ) - ( mass1 + mass2 ) )
bound += np . sign ( ( himass - mass1 ) * ( mass1 - lomass ) )
bound += np . sign ( ( himass - mass2 ) * ( mass2 - lomass ) )
idx = np . where ( bound != 3 )
pdf = 1. / ( himass - lomass ) / ( mass1 + mass2 - 2 * lomass )
pdf [ idx ] = 0
return pdf
if key == 'componentMass' : # Returns the PDF of mass when component mass is uniformly
# distributed . Component masses are independent for this case .
# Parameters
# lomass : lower component mass
# himass : higher component mass
bound = np . sign ( ( himass - mass1 ) * ( mass1 - lomass ) )
bound += np . sign ( ( himass_2 - mass2 ) * ( mass2 - lomass_2 ) )
idx = np . where ( bound != 2 )
pdf = np . ones_like ( mass1 ) / ( himass - lomass ) / ( himass_2 - lomass_2 )
pdf [ idx ] = 0
return pdf
if key == 'log' : # Returns the PDF of mass when component mass is uniform in log .
# Component masses are independent for this case .
# Parameters
# lomass : lower component mass
# himass : higher component mass
bound = np . sign ( ( himass - mass1 ) * ( mass1 - lomass ) )
bound += np . sign ( ( himass_2 - mass2 ) * ( mass2 - lomass_2 ) )
idx = np . where ( bound != 2 )
pdf = 1 / ( log ( himass ) - log ( lomass ) ) / ( log ( himass_2 ) - log ( lomass_2 ) )
pdf /= ( mass1 * mass2 )
pdf [ idx ] = 0
return pdf |
def create_replication_instance ( ReplicationInstanceIdentifier = None , AllocatedStorage = None , ReplicationInstanceClass = None , VpcSecurityGroupIds = None , AvailabilityZone = None , ReplicationSubnetGroupIdentifier = None , PreferredMaintenanceWindow = None , MultiAZ = None , EngineVersion = None , AutoMinorVersionUpgrade = None , Tags = None , KmsKeyId = None , PubliclyAccessible = None ) :
"""Creates the replication instance using the specified parameters .
See also : AWS API Documentation
: example : response = client . create _ replication _ instance (
ReplicationInstanceIdentifier = ' string ' ,
AllocatedStorage = 123,
ReplicationInstanceClass = ' string ' ,
VpcSecurityGroupIds = [
' string ' ,
AvailabilityZone = ' string ' ,
ReplicationSubnetGroupIdentifier = ' string ' ,
PreferredMaintenanceWindow = ' string ' ,
MultiAZ = True | False ,
EngineVersion = ' string ' ,
AutoMinorVersionUpgrade = True | False ,
Tags = [
' Key ' : ' string ' ,
' Value ' : ' string '
KmsKeyId = ' string ' ,
PubliclyAccessible = True | False
: type ReplicationInstanceIdentifier : string
: param ReplicationInstanceIdentifier : [ REQUIRED ]
The replication instance identifier . This parameter is stored as a lowercase string .
Constraints :
Must contain from 1 to 63 alphanumeric characters or hyphens .
First character must be a letter .
Cannot end with a hyphen or contain two consecutive hyphens .
Example : myrepinstance
: type AllocatedStorage : integer
: param AllocatedStorage : The amount of storage ( in gigabytes ) to be initially allocated for the replication instance .
: type ReplicationInstanceClass : string
: param ReplicationInstanceClass : [ REQUIRED ]
The compute and memory capacity of the replication instance as specified by the replication instance class .
Valid Values : dms . t2 . micro | dms . t2 . small | dms . t2 . medium | dms . t2 . large | dms . c4 . large | dms . c4 . xlarge | dms . c4.2xlarge | dms . c4.4xlarge
: type VpcSecurityGroupIds : list
: param VpcSecurityGroupIds : Specifies the VPC security group to be used with the replication instance . The VPC security group must work with the VPC containing the replication instance .
( string ) - -
: type AvailabilityZone : string
: param AvailabilityZone : The EC2 Availability Zone that the replication instance will be created in .
Default : A random , system - chosen Availability Zone in the endpoint ' s region .
Example : us - east - 1d
: type ReplicationSubnetGroupIdentifier : string
: param ReplicationSubnetGroupIdentifier : A subnet group to associate with the replication instance .
: type PreferredMaintenanceWindow : string
: param PreferredMaintenanceWindow : The weekly time range during which system maintenance can occur , in Universal Coordinated Time ( UTC ) .
Format : ddd : hh24 : mi - ddd : hh24 : mi
Default : A 30 - minute window selected at random from an 8 - hour block of time per region , occurring on a random day of the week .
Valid Days : Mon , Tue , Wed , Thu , Fri , Sat , Sun
Constraints : Minimum 30 - minute window .
: type MultiAZ : boolean
: param MultiAZ : Specifies if the replication instance is a Multi - AZ deployment . You cannot set the AvailabilityZone parameter if the Multi - AZ parameter is set to true .
: type EngineVersion : string
: param EngineVersion : The engine version number of the replication instance .
: type AutoMinorVersionUpgrade : boolean
: param AutoMinorVersionUpgrade : Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window .
Default : true
: type Tags : list
: param Tags : Tags to be associated with the replication instance .
( dict ) - -
Key ( string ) - - A key is the required name of the tag . The string value can be from 1 to 128 Unicode characters in length and cannot be prefixed with ' aws : ' or ' dms : ' . The string can only contain only the set of Unicode letters , digits , white - space , ' _ ' , ' . ' , ' / ' , ' = ' , ' + ' , ' - ' ( Java regex : ' ^ ( [ \ p { L } \ p { Z } \ p { N } _ . : / = + \ - ] * ) $ ' ) .
Value ( string ) - - A value is the optional value of the tag . The string value can be from 1 to 256 Unicode characters in length and cannot be prefixed with ' aws : ' or ' dms : ' . The string can only contain only the set of Unicode letters , digits , white - space , ' _ ' , ' . ' , ' / ' , ' = ' , ' + ' , ' - ' ( Java regex : ' ^ ( [ \ p { L } \ p { Z } \ p { N } _ . : / = + \ - ] * ) $ ' ) .
: type KmsKeyId : string
: param KmsKeyId : The KMS key identifier that will be used to encrypt the content on the replication instance . If you do not specify a value for the KmsKeyId parameter , then AWS DMS will use your default encryption key . AWS KMS creates the default encryption key for your AWS account . Your AWS account has a different default encryption key for each AWS region .
: type PubliclyAccessible : boolean
: param PubliclyAccessible : Specifies the accessibility options for the replication instance . A value of true represents an instance with a public IP address . A value of false represents an instance with a private IP address . The default value is true .
: rtype : dict
: return : {
' ReplicationInstance ' : {
' ReplicationInstanceIdentifier ' : ' string ' ,
' ReplicationInstanceClass ' : ' string ' ,
' ReplicationInstanceStatus ' : ' string ' ,
' AllocatedStorage ' : 123,
' InstanceCreateTime ' : datetime ( 2015 , 1 , 1 ) ,
' VpcSecurityGroups ' : [
' VpcSecurityGroupId ' : ' string ' ,
' Status ' : ' string '
' AvailabilityZone ' : ' string ' ,
' ReplicationSubnetGroup ' : {
' ReplicationSubnetGroupIdentifier ' : ' string ' ,
' ReplicationSubnetGroupDescription ' : ' string ' ,
' VpcId ' : ' string ' ,
' SubnetGroupStatus ' : ' string ' ,
' Subnets ' : [
' SubnetIdentifier ' : ' string ' ,
' SubnetAvailabilityZone ' : {
' Name ' : ' string '
' SubnetStatus ' : ' string '
' PreferredMaintenanceWindow ' : ' string ' ,
' PendingModifiedValues ' : {
' ReplicationInstanceClass ' : ' string ' ,
' AllocatedStorage ' : 123,
' MultiAZ ' : True | False ,
' EngineVersion ' : ' string '
' MultiAZ ' : True | False ,
' EngineVersion ' : ' string ' ,
' AutoMinorVersionUpgrade ' : True | False ,
' KmsKeyId ' : ' string ' ,
' ReplicationInstanceArn ' : ' string ' ,
' ReplicationInstancePublicIpAddress ' : ' string ' ,
' ReplicationInstancePrivateIpAddress ' : ' string ' ,
' ReplicationInstancePublicIpAddresses ' : [
' string ' ,
' ReplicationInstancePrivateIpAddresses ' : [
' string ' ,
' PubliclyAccessible ' : True | False ,
' SecondaryAvailabilityZone ' : ' string '
: returns :
Must contain from 1 to 63 alphanumeric characters or hyphens .
First character must be a letter .
Cannot end with a hyphen or contain two consecutive hyphens .""" | pass |
def is_done ( self , submissionid_or_submission , user_check = True ) :
"""Tells if a submission is done and its result is available""" | # TODO : not a very nice way to avoid too many database call . Should be refactored .
if isinstance ( submissionid_or_submission , dict ) :
submission = submissionid_or_submission
else :
submission = self . get_submission ( submissionid_or_submission , False )
if user_check and not self . user_is_submission_owner ( submission ) :
return None
return submission [ "status" ] == "done" or submission [ "status" ] == "error" |
def rollback ( self , dt ) :
"""Roll provided date backward to next offset only if not on offset .""" | dt = as_timestamp ( dt )
if not self . onOffset ( dt ) :
dt = dt - self . __class__ ( 1 , normalize = self . normalize , ** self . kwds )
return dt |
async def execute ( self , keys = [ ] , args = [ ] , client = None ) :
"Execute the script , passing any required ` ` args ` `" | if client is None :
client = self . registered_client
args = tuple ( keys ) + tuple ( args )
# make sure the Redis server knows about the script
if isinstance ( client , BasePipeline ) : # make sure this script is good to go on pipeline
client . scripts . add ( self )
try :
return await client . evalsha ( self . sha , len ( keys ) , * args )
except NoScriptError : # Maybe the client is pointed to a differnet server than the client
# that created this instance ?
# Overwrite the sha just in case there was a discrepancy .
self . sha = await client . script_load ( self . script )
return await client . evalsha ( self . sha , len ( keys ) , * args ) |
def handle ( self , * args , ** options ) :
"""Reset the database for this project .
Note : Transaction wrappers are in reverse as a work around for
autocommit , anybody know how to do this the right way ?""" | router = options [ 'router' ]
dbinfo = settings . DATABASES . get ( router )
if dbinfo is None :
raise CommandError ( "Unknown database router %s" % router )
engine = dbinfo . get ( 'ENGINE' )
user = password = database_name = database_host = database_port = ''
if engine == 'mysql' :
( user , password , database_name , database_host , database_port ) = parse_mysql_cnf ( dbinfo )
user = options [ 'user' ] or dbinfo . get ( 'USER' ) or user
password = options [ 'password' ] or dbinfo . get ( 'PASSWORD' ) or password
owner = options [ 'owner' ] or user
database_name = options [ 'dbname' ] or dbinfo . get ( 'NAME' ) or database_name
if database_name == '' :
raise CommandError ( "You need to specify DATABASE_NAME in your Django settings file." )
database_host = dbinfo . get ( 'HOST' ) or database_host
database_port = dbinfo . get ( 'PORT' ) or database_port
verbosity = options [ "verbosity" ]
if options [ 'interactive' ] :
confirm = input ( """
You have requested a database reset.
This will IRREVERSIBLY DESTROY
ALL data in the database "%s".
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % ( database_name , ) )
else :
confirm = 'yes'
if confirm != 'yes' :
print ( "Reset cancelled." )
return
SQLITE_ENGINES = getattr ( settings , 'DJANGO_EXTENSIONS_RESET_DB_SQLITE_ENGINES' , ( 'django.db.backends.sqlite3' , 'django.db.backends.spatialite' , ) )
MYSQL_ENGINES = getattr ( settings , 'DJANGO_EXTENSIONS_RESET_DB_MYSQL_ENGINES' , ( 'django.db.backends.mysql' , ) )
POSTGRESQL_ENGINES = getattr ( settings , 'DJANGO_EXTENSIONS_RESET_DB_POSTGRESQL_ENGINES' , ( 'django.db.backends.postgresql' , 'django.db.backends.postgresql_psycopg2' , 'django.db.backends.postgis' , 'psqlextra.backend' , ) )
if engine in SQLITE_ENGINES :
try :
logging . info ( "Unlinking %s database" , engine )
os . unlink ( database_name )
except OSError :
pass
elif engine in MYSQL_ENGINES :
import MySQLdb as Database
kwargs = { 'user' : user , 'passwd' : password , }
if database_host . startswith ( '/' ) :
kwargs [ 'unix_socket' ] = database_host
else :
kwargs [ 'host' ] = database_host
if database_port :
kwargs [ 'port' ] = int ( database_port )
connection = Database . connect ( ** kwargs )
drop_query = 'DROP DATABASE IF EXISTS `%s`' % database_name
utf8_support = '' if options [ 'no_utf8_support' ] else 'CHARACTER SET utf8'
create_query = 'CREATE DATABASE `%s` %s' % ( database_name , utf8_support )
logging . info ( 'Executing... "%s"' , drop_query )
connection . query ( drop_query )
logging . info ( 'Executing... "%s"' , create_query )
connection . query ( create_query . strip ( ) )
elif engine in POSTGRESQL_ENGINES :
import psycopg2 as Database
# NOQA
conn_params = { 'database' : 'template1' }
if user :
conn_params [ 'user' ] = user
if password :
conn_params [ 'password' ] = password
if database_host :
conn_params [ 'host' ] = database_host
if database_port :
conn_params [ 'port' ] = database_port
connection = Database . connect ( ** conn_params )
connection . set_isolation_level ( 0 )
# autocommit false
cursor = connection . cursor ( )
if options [ 'close_sessions' ] :
close_sessions_query = """
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = '%s';
""" % database_name
logging . info ( 'Executing... "%s"' , close_sessions_query . strip ( ) )
try :
cursor . execute ( close_sessions_query )
except Database . ProgrammingError as e :
logging . exception ( "Error: %s" , str ( e ) )
drop_query = "DROP DATABASE \"%s\";" % database_name
logging . info ( 'Executing... "%s"' , drop_query )
try :
cursor . execute ( drop_query )
except Database . ProgrammingError as e :
logging . exception ( "Error: %s" , str ( e ) )
create_query = "CREATE DATABASE \"%s\"" % database_name
if owner :
create_query += " WITH OWNER = \"%s\" " % owner
create_query += " ENCODING = 'UTF8'"
if settings . DEFAULT_TABLESPACE :
create_query += ' TABLESPACE = %s;' % settings . DEFAULT_TABLESPACE
else :
create_query += ';'
logging . info ( 'Executing... "%s"' , create_query )
cursor . execute ( create_query )
else :
raise CommandError ( "Unknown database engine %s" % engine )
if verbosity >= 2 or options [ 'interactive' ] :
print ( "Reset successful." ) |
def __filename_to_modname ( self , pathname ) :
"""@ type pathname : str
@ param pathname : Pathname to a module .
@ rtype : str
@ return : Module name .""" | filename = PathOperations . pathname_to_filename ( pathname )
if filename :
filename = filename . lower ( )
filepart , extpart = PathOperations . split_extension ( filename )
if filepart and extpart :
modName = filepart
else :
modName = filename
else :
modName = pathname
return modName |
def range ( self ) :
"""A tuple containing the numeric range for this Slot .
The Python equivalent of the CLIPS slot - range function .""" | data = clips . data . DataObject ( self . _env )
lib . EnvSlotRange ( self . _env , self . _cls , self . _name , data . byref )
return tuple ( data . value ) if isinstance ( data . value , list ) else ( ) |
def buildmod ( * modules ) :
'''Build module using znc - buildmod
CLI Example :
. . code - block : : bash
salt ' * ' znc . buildmod module . cpp [ . . . ]''' | # Check if module files are missing
missing = [ module for module in modules if not os . path . exists ( module ) ]
if missing :
return 'Error: The file ({0}) does not exist.' . format ( ', ' . join ( missing ) )
cmd = [ 'znc-buildmod' ]
cmd . extend ( modules )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = False ) . splitlines ( )
return out [ - 1 ] |
def advect ( f , vx , vy ) :
"""Move field f according to x and y velocities ( u and v )
using an implicit Euler integrator .""" | rows , cols = f . shape
cell_xs , cell_ys = np . meshgrid ( np . arange ( cols ) , np . arange ( rows ) )
center_xs = ( cell_xs - vx ) . ravel ( )
center_ys = ( cell_ys - vy ) . ravel ( )
# Compute indices of source cells .
left_ix = np . floor ( center_ys ) . astype ( int )
top_ix = np . floor ( center_xs ) . astype ( int )
rw = center_ys - left_ix
# Relative weight of right - hand cells .
bw = center_xs - top_ix
# Relative weight of bottom cells .
left_ix = np . mod ( left_ix , rows )
# Wrap around edges of simulation .
right_ix = np . mod ( left_ix + 1 , rows )
top_ix = np . mod ( top_ix , cols )
bot_ix = np . mod ( top_ix + 1 , cols )
# A linearly - weighted sum of the 4 surrounding cells .
flat_f = ( 1 - rw ) * ( ( 1 - bw ) * f [ left_ix , top_ix ] + bw * f [ left_ix , bot_ix ] ) + rw * ( ( 1 - bw ) * f [ right_ix , top_ix ] + bw * f [ right_ix , bot_ix ] )
return np . reshape ( flat_f , ( rows , cols ) ) |
def run ( self , node , expr = None , lineno = None , with_raise = True ) :
"""Execute parsed Ast representation for an expression .""" | # Note : keep the ' node is None ' test : internal code here may run
# run ( None ) and expect a None in return .
if time . time ( ) - self . start_time > self . max_time :
raise RuntimeError ( ERR_MAX_TIME . format ( self . max_time ) )
out = None
if len ( self . error ) > 0 :
return out
if node is None :
return out
if isinstance ( node , str ) :
node = self . parse ( node )
if lineno is not None :
self . lineno = lineno
if expr is not None :
self . expr = expr
# get handler for this node :
# on _ xxx with handle nodes of type ' xxx ' , etc
try :
handler = self . node_handlers [ node . __class__ . __name__ . lower ( ) ]
except KeyError :
return self . unimplemented ( node )
# run the handler : this will likely generate
# recursive calls into this run method .
try :
ret = handler ( node )
if isinstance ( ret , enumerate ) :
ret = list ( ret )
return ret
except :
if with_raise :
self . raise_exception ( node , expr = expr ) |
def load_from_directory ( list_name ) :
"""To resolve the symbol in the LEVERAGED _ ETF list ,
the date on which the symbol was in effect is needed .
Furthermore , to maintain a point in time record of our own maintenance
of the restricted list , we need a knowledge date . Thus , restricted lists
are dictionaries of datetime - > symbol lists .
new symbols should be entered as a new knowledge date entry .
This method assumes a directory structure of :
SECURITY _ LISTS _ DIR / listname / knowledge _ date / lookup _ date / add . txt
SECURITY _ LISTS _ DIR / listname / knowledge _ date / lookup _ date / delete . txt
The return value is a dictionary with :
knowledge _ date - > lookup _ date - >
{ add : [ symbol list ] , ' delete ' : [ symbol list ] }""" | data = { }
dir_path = os . path . join ( SECURITY_LISTS_DIR , list_name )
for kd_name in listdir ( dir_path ) :
kd = datetime . strptime ( kd_name , DATE_FORMAT ) . replace ( tzinfo = pytz . utc )
data [ kd ] = { }
kd_path = os . path . join ( dir_path , kd_name )
for ld_name in listdir ( kd_path ) :
ld = datetime . strptime ( ld_name , DATE_FORMAT ) . replace ( tzinfo = pytz . utc )
data [ kd ] [ ld ] = { }
ld_path = os . path . join ( kd_path , ld_name )
for fname in listdir ( ld_path ) :
fpath = os . path . join ( ld_path , fname )
with open ( fpath ) as f :
symbols = f . read ( ) . splitlines ( )
data [ kd ] [ ld ] [ fname ] = symbols
return data |
def cli ( env ) :
"""List firewalls .""" | mgr = SoftLayer . FirewallManager ( env . client )
table = formatting . Table ( [ 'firewall id' , 'type' , 'features' , 'server/vlan id' ] )
fwvlans = mgr . get_firewalls ( )
dedicated_firewalls = [ firewall for firewall in fwvlans if firewall [ 'dedicatedFirewallFlag' ] ]
for vlan in dedicated_firewalls :
features = [ ]
if vlan [ 'highAvailabilityFirewallFlag' ] :
features . append ( 'HA' )
if features :
feature_list = formatting . listing ( features , separator = ',' )
else :
feature_list = formatting . blank ( )
table . add_row ( [ 'vlan:%s' % vlan [ 'networkVlanFirewall' ] [ 'id' ] , 'VLAN - dedicated' , feature_list , vlan [ 'id' ] ] )
shared_vlan = [ firewall for firewall in fwvlans if not firewall [ 'dedicatedFirewallFlag' ] ]
for vlan in shared_vlan :
vs_firewalls = [ guest for guest in vlan [ 'firewallGuestNetworkComponents' ] if has_firewall_component ( guest ) ]
for firewall in vs_firewalls :
table . add_row ( [ 'vs:%s' % firewall [ 'id' ] , 'Virtual Server - standard' , '-' , firewall [ 'guestNetworkComponent' ] [ 'guest' ] [ 'id' ] ] )
server_firewalls = [ server for server in vlan [ 'firewallNetworkComponents' ] if has_firewall_component ( server ) ]
for firewall in server_firewalls :
table . add_row ( [ 'server:%s' % firewall [ 'id' ] , 'Server - standard' , '-' , utils . lookup ( firewall , 'networkComponent' , 'downlinkComponent' , 'hardwareId' ) ] )
env . fout ( table ) |
def generate_colours ( n ) :
"""Return a list of ` n ` distinct colours , each represented as an RGB
string suitable for use in CSS .
Based on the code at
http : / / martin . ankerl . com / 2009/12/09 / how - to - create - random - colors - programmatically /
: param n : number of colours to generate
: type n : ` int `
: rtype : ` list ` of ` str `""" | colours = [ ]
golden_ratio_conjugate = 0.618033988749895
h = 0.8
# Initial hue
s = 0.7
# Fixed saturation
v = 0.95
# Fixed value
for i in range ( n ) :
h += golden_ratio_conjugate
h %= 1
colours . append ( hsv_to_rgb ( h , s , v ) )
return colours |
def rectangle ( self , x0 , y0 , x1 , y1 ) :
"""Draw a rectangle""" | x0 , y0 , x1 , y1 = self . rect_helper ( x0 , y0 , x1 , y1 )
self . polyline ( [ [ x0 , y0 ] , [ x1 , y0 ] , [ x1 , y1 ] , [ x0 , y1 ] , [ x0 , y0 ] ] ) |
def add_transcript ( self , tx ) :
"""Add a transcript to the locus
: param tx : transcript to add
: type tx : Transcript""" | for y in [ x . payload for x in self . g . get_nodes ( ) ] :
if tx . id in [ z . id for z in y ] :
sys . stderr . write ( "WARNING tx is already in graph\n" )
return True
# transcript isn ' t part of graph yet
n = seqtools . graph . Node ( [ tx ] )
other_nodes = self . g . get_nodes ( )
self . g . add_node ( n )
# now we need to see if its connected anywhere
for n2 in other_nodes :
tx2s = n2 . payload
for tx2 in tx2s : # do exon overlap
er = self . merge_rules . get_exon_rules ( )
# if we are doing things by exon
if ( self . merge_rules . get_use_single_exons ( ) and ( tx . get_exon_count ( ) == 1 or tx2 . get_exon_count ( ) == 1 ) ) or ( self . merge_rules . get_use_multi_exons ( ) and ( tx . get_exon_count ( ) > 1 and tx2 . get_exon_count ( ) > 1 ) ) :
eo = tx . exon_overlap ( tx2 , multi_minover = er [ 'multi_minover' ] , multi_endfrac = er [ 'multi_endfrac' ] , multi_midfrac = er [ 'multi_midfrac' ] , single_minover = er [ 'single_minover' ] , single_frac = er [ 'single_frac' ] )
if self . merge_rules . get_merge_type ( ) == 'is_compatible' :
if eo . is_compatible ( ) :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_subset' :
r = eo . is_subset ( )
if r == 2 or r == 1 :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
if r == 3 or r == 1 :
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_full_overlap' :
if eo . is_full_overlap ( ) :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_any_overlap' :
if eo . match_exon_count ( ) > 0 :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
if self . merge_rules . get_use_junctions ( ) : # do junction overlap
jo = tx . junction_overlap ( tx2 , self . merge_rules . get_juntol ( ) )
# print jo . match _ junction _ count ( )
if self . merge_rules . get_merge_type ( ) == 'is_compatible' :
if jo . is_compatible ( ) :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_subset' :
r = jo . is_subset ( )
if r == 2 or r == 1 :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
if r == 3 or r == 1 :
self . g . add_edge ( Seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_full_overlap' :
if jo . is_full_overlap ( ) :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
elif self . merge_rules . get_merge_type ( ) == 'is_any_overlap' :
if jo . match_junction_count ( ) > 0 :
self . g . add_edge ( seqtools . graph . Edge ( n , n2 ) , verbose = False )
self . g . add_edge ( seqtools . graph . Edge ( n2 , n ) , verbose = False )
return True |
def is_reference_target ( resource , rtype , label ) :
"""Return true if the resource has this rtype with this label""" | prop = resource . props . references . get ( rtype , False )
if prop :
return label in prop |
def _matches_patterns ( self , matches , context ) :
"""Search for all matches with current paterns agains input _ string
: param matches : matches list
: type matches : Matches
: param context : context to use
: type context : dict
: return :
: rtype :""" | if not self . disabled ( context ) :
patterns = self . effective_patterns ( context )
for pattern in patterns :
if not pattern . disabled ( context ) :
pattern_matches = pattern . matches ( matches . input_string , context )
if pattern_matches :
log ( pattern . log_level , "Pattern has %s match(es). (%s)" , len ( pattern_matches ) , pattern )
else :
pass
# log ( pattern . log _ level , " Pattern doesn ' t match . ( % s ) " % ( pattern , ) )
for match in pattern_matches :
if match . marker :
log ( pattern . log_level , "Marker found. (%s)" , match )
matches . markers . append ( match )
else :
log ( pattern . log_level , "Match found. (%s)" , match )
matches . append ( match )
else :
log ( pattern . log_level , "Pattern is disabled. (%s)" , pattern ) |
def add_xmlid ( ctx , record , xmlid , noupdate = False ) :
"""Add a XMLID on an existing record""" | try :
ref_id , __ , __ = ctx . env [ 'ir.model.data' ] . xmlid_lookup ( xmlid )
except ValueError :
pass
# does not exist , we ' ll create a new one
else :
return ctx . env [ 'ir.model.data' ] . browse ( ref_id )
if '.' in xmlid :
module , name = xmlid . split ( '.' )
else :
module = ''
name = xmlid
return ctx . env [ 'ir.model.data' ] . create ( { 'name' : name , 'module' : module , 'model' : record . _name , 'res_id' : record . id , 'noupdate' : noupdate , } ) |
def list_supported_types ( category_name ) :
"""Prints a list of supported external account type names for the given
category _ name . For example , " AWS _ ACCESS _ KEY _ AUTH " is a supported external
account type name for external account category " AWS " .""" | types = get_supported_types ( api , category_name )
type_names = [ type . name for type in types ]
print ( "Supported account types by name for '{0}': [{1}]" . format ( category_name , COMMA_WITH_SPACE . join ( map ( str , type_names ) ) ) ) |
def hazard_notes ( self ) :
"""Get the hazard specific notes defined in definitions .
This method will do a lookup in definitions and return the
hazard definition specific notes dictionary .
This is a helper function to make it
easy to get hazard specific notes from the definitions metadata .
. . versionadded : : 3.5
: returns : A list like e . g . safe . definitions . hazard _ land _ cover [
' notes ' ]
: rtype : list , None""" | notes = [ ]
hazard = definition ( self . hazard . keywords . get ( 'hazard' ) )
if 'notes' in hazard :
notes += hazard [ 'notes' ]
if self . hazard . keywords [ 'layer_mode' ] == 'classified' :
if 'classified_notes' in hazard :
notes += hazard [ 'classified_notes' ]
if self . hazard . keywords [ 'layer_mode' ] == 'continuous' :
if 'continuous_notes' in hazard :
notes += hazard [ 'continuous_notes' ]
if self . hazard . keywords [ 'hazard_category' ] == 'single_event' :
if 'single_event_notes' in hazard :
notes += hazard [ 'single_event_notes' ]
if self . hazard . keywords [ 'hazard_category' ] == 'multiple_event' :
if 'multi_event_notes' in hazard :
notes += hazard [ 'multi_event_notes' ]
return notes |
def generate_k ( order , secexp , hash_func , data ) :
'''order - order of the DSA generator used in the signature
secexp - secure exponent ( private key ) in numeric form
hash _ func - reference to the same hash function used for generating hash
data - hash in binary form of the signing data''' | qlen = bit_length ( order )
holen = hash_func ( ) . digest_size
rolen = ( qlen + 7 ) / 8
bx = number_to_string ( secexp , order ) + bits2octets ( data , order )
# Step B
v = b ( '\x01' ) * holen
# Step C
k = b ( '\x00' ) * holen
# Step D
k = hmac . new ( k , v + b ( '\x00' ) + bx , hash_func ) . digest ( )
# Step E
v = hmac . new ( k , v , hash_func ) . digest ( )
# Step F
k = hmac . new ( k , v + b ( '\x01' ) + bx , hash_func ) . digest ( )
# Step G
v = hmac . new ( k , v , hash_func ) . digest ( )
# Step H
while True : # Step H1
t = b ( '' )
# Step H2
while len ( t ) < rolen :
v = hmac . new ( k , v , hash_func ) . digest ( )
t += v
# Step H3
secret = bits2int ( t , qlen )
if secret >= 1 and secret < order :
return secret
k = hmac . new ( k , v + b ( '\x00' ) , hash_func ) . digest ( )
v = hmac . new ( k , v , hash_func ) . digest ( ) |
def levenshtein_distance ( str_a , str_b ) :
"""Calculate the Levenshtein distance between string a and b .
: param str _ a : String - input string a
: param str _ b : String - input string b
: return : Number - Levenshtein Distance between string a and b""" | len_a , len_b = len ( str_a ) , len ( str_b )
if len_a > len_b :
str_a , str_b = str_b , str_a
len_a , len_b = len_b , len_a
current = range ( len_a + 1 )
for i in range ( 1 , len_b + 1 ) :
previous , current = current , [ i ] + [ 0 ] * len_a
for j in range ( 1 , len_a + 1 ) :
add , delete = previous [ j ] + 1 , current [ j - 1 ] + 1
change = previous [ j - 1 ]
if str_a [ j - 1 ] != str_b [ i - 1 ] :
change += + 1
current [ j ] = min ( add , delete , change )
return current [ len_a ] |
def run ( args , ff = '' ) :
"""Run setup . py with monkey patches applied .""" | import setuptools . command . egg_info
if ff == 'none' :
setuptools . command . egg_info . walk_revctrl = no_walk_revctrl
else :
setuptools . command . egg_info . walk_revctrl = partial ( walk_revctrl , ff = ff )
sys . argv = [ 'setup.py' ] + args
import setup
cleanup_pycache ( ) |
def lookupListener ( self , listenID ) :
"""( internal )
Retrieve a waiting connection by its connection identifier , passing in
the transport to be used to connect the waiting protocol factory to .""" | if listenID in self . inboundConnections : # Make the connection ?
cwait , call = self . inboundConnections . pop ( listenID )
# _ ConnectionWaiter instance
call . cancel ( )
return cwait |
def write_generator_data ( self , file ) :
"""Writes generator data in MATPOWER format .""" | for generator in self . case . generators :
vals = [ ]
vals . append ( generator . bus . _i )
vals . append ( "1 " )
# ID
vals . append ( generator . p )
vals . append ( generator . q )
vals . append ( generator . q_max )
vals . append ( generator . q_min )
vals . append ( generator . v_magnitude )
vals . append ( 0 )
# IREG
vals . append ( generator . base_mva )
vals . extend ( [ 0. , 1. , 0. , 0. , 0. ] )
vals . append ( generator . online )
vals . append ( 100.0 )
# RMPCT
vals . append ( generator . p_max )
vals . append ( generator . p_min )
vals . extend ( [ 1 , 1.0 ] )
# O1 , F1
file . write ( "%6d,'%s',%10.3f,%10.3f,%10.3f,%10.3f,%10.5f,%6d,%10.3f," "%10.5f,%10.5f,%10.5f,%10.5f,%7.5f,%d,%7.1f,%10.3f," "%10.3f,%4d,%6.4f\n" % tuple ( vals ) )
file . write ( " 0 / END OF GENERATOR DATA, BEGIN NON-TRANSFORMER BRANCH DATA\n" ) |
def getPlatformInfo ( ) :
"""Identify platform .""" | if "linux" in sys . platform :
platform = "linux"
elif "darwin" in sys . platform :
platform = "darwin"
# win32
elif sys . platform . startswith ( "win" ) :
platform = "windows"
else :
raise Exception ( "Platform '%s' is unsupported!" % sys . platform )
return platform |
def newRegion ( self , loc , width , height ) :
"""Creates a new region on the current screen at the specified offset with the specified
width and height .""" | return Region . create ( self . getTopLeft ( ) . offset ( loc ) , width , height ) |
def get_inline_func ( inline_str , modules = None , ** stream_kwargs ) :
"""returns a function decorated by ` cbox . stream ` decorator .
: param str inline _ str : the inline function to execute ,
can use ` s ` - local variable as the input line / char / raw
( according to ` input _ type ` param ) .
: param str modules : comma separated list of modules to import before
running the inline function .
: param dict stream _ kwargs : optional arguments to ` cbox . stream ` decorator
: rtype : callable""" | if not _is_compilable ( inline_str ) :
raise ValueError ( 'cannot compile the inline expression - "%s"' % inline_str )
inline_globals = _import_inline_modules ( modules )
func = _inline2func ( inline_str , inline_globals , ** stream_kwargs )
return func |
def rolling_update ( config = None , name = None , image = None , container_name = None , rc_new = None ) :
"""Performs a simple rolling update of a ReplicationController .
See https : / / github . com / kubernetes / kubernetes / blob / master / docs / design / simple - rolling - update . md
for algorithm details . We have modified it slightly to allow for keeping the same RC name
between updates , which is not supported by default by kubectl .
: param config : An instance of K8sConfig . If omitted , reads from ~ / . kube / config .
: param name : The name of the ReplicationController we want to update .
: param image : The updated image version we want applied .
: param container _ name : The name of the container we ' re targeting for the update .
Required if more than one container is present .
: param rc _ new : An instance of K8sReplicationController with the new configuration to apply .
Mutually exclusive with [ image , container _ name ] if specified .
: return :""" | if name is None :
raise SyntaxError ( 'K8sReplicationController: name: [ {0} ] cannot be None.' . format ( name ) )
if image is None and rc_new is None :
raise SyntaxError ( "K8sReplicationController: please specify either 'image' or 'rc_new'" )
if container_name is not None and image is not None and rc_new is not None :
raise SyntaxError ( 'K8sReplicationController: rc_new is mutually exclusive with an (container_name, image) pair.' )
return K8sReplicationController . _rolling_update_init ( config = config , name = name , image = image , container_name = container_name , rc_new = rc_new ) |
def refresh_if_needed ( self ) :
"""Refresh the status of the task from server if required .""" | if self . state in ( self . PENDING , self . STARTED ) :
try :
response , = self . _fetch_result ( ) [ 'tasks' ]
except ( KeyError , ValueError ) :
raise Exception ( "Unable to find results for task." )
if 'error' in response :
self . state == self . FAILURE
raise ServerError ( response [ 'error' ] )
if 'state' in response :
self . state = response [ 'state' ]
self . result = response [ 'result' ] |
def remove_style ( self ) :
"""Remove all XSL run rStyle elements""" | for n in self . root . xpath ( './/w:rStyle[@w:val="%s"]' % self . style , namespaces = self . namespaces ) :
n . getparent ( ) . remove ( n ) |
def INIT_LIST_EXPR ( self , cursor ) :
"""Returns a list of literal values .""" | values = [ self . parse_cursor ( child ) for child in list ( cursor . get_children ( ) ) ]
return values |
def snakescan ( xi , yi , xf , yf ) :
"""Scan pixels in a snake pattern along the x - coordinate then y - coordinate
: param xi : Initial x - coordinate
: type xi : int
: param yi : Initial y - coordinate
: type yi : int
: param xf : Final x - coordinate
: type xf : int
: param yf : Final y - coordinate
: type yf : int
: returns : Coordinate generator
: rtype : function""" | # Determine direction to move
dx = 1 if xf >= xi else - 1
dy = 1 if yf >= yi else - 1
# Scan pixels first along x - coordinate then y - coordinate and flip
# x - direction when the end of the line is reached
x , xa , xb = xi , xi , xf
for y in range ( yi , yf + dy , dy ) :
for x in range ( xa , xb + dx , dx ) :
yield x , y
# Swap x - direction
if x == xa or x == xb :
dx *= - 1
xa , xb = xb , xa |
def _read ( self , n ) :
"""It seems that SSL Objects read ( ) method may not supply as much
as you ' re asking for , at least with extremely large messages .
somewhere > 16K - found this in the test _ channel . py test _ large
unittest .""" | result = self . sslobj . read ( n )
while len ( result ) < n :
s = self . sslobj . read ( n - len ( result ) )
if not s :
raise IOError ( 'Socket closed' )
result += s
return result |
def parse_files ( self , req , name , field ) :
"""Pull a file from the request .""" | files = ( ( k , v ) for k , v in req . POST . items ( ) if hasattr ( v , "file" ) )
return core . get_value ( MultiDict ( files ) , name , field ) |
def _sync_from_disk ( self ) :
"""Read any changes made on disk to this Refpkg .
This is necessary if other programs are making changes to the
Refpkg on disk and your program must be synchronized to them .""" | try :
fobj = self . open_manifest ( 'r' )
except IOError as e :
if e . errno == errno . ENOENT :
raise ValueError ( "couldn't find manifest file in %s" % ( self . path , ) )
elif e . errno == errno . ENOTDIR :
raise ValueError ( "%s is not a directory" % ( self . path , ) )
else :
raise
with fobj :
self . contents = json . load ( fobj )
self . _set_defaults ( )
self . _check_refpkg ( ) |
def aspectMalefics ( self ) :
"""Returns a list with the bad aspects the object
makes to the malefics .""" | malefics = [ const . MARS , const . SATURN ]
return self . __aspectLists ( malefics , aspList = [ 0 , 90 , 180 ] ) |
def cache_status ( db , aid , force = False ) :
"""Calculate and cache status for given anime .
Don ' t do anything if status already exists and force is False .""" | with db :
cur = db . cursor ( )
if not force : # We don ' t do anything if we already have this aid in our
# cache .
cur . execute ( 'SELECT 1 FROM cache_anime WHERE aid=?' , ( aid , ) )
if cur . fetchone ( ) is not None :
return
# Retrieve information for determining complete .
cur . execute ( 'SELECT episodecount, enddate FROM anime WHERE aid=?' , ( aid , ) )
row = cur . fetchone ( )
if row is None :
raise ValueError ( 'aid provided does not exist' )
episodecount , enddate = row
# Select all regular episodes in ascending order .
cur . execute ( """
SELECT number, user_watched FROM episode
WHERE aid=? AND type=?
ORDER BY number ASC
""" , ( aid , get_eptype ( db , 'regular' ) . id ) )
# We find the last consecutive episode that is user _ watched .
number = 0
for number , watched in cur : # Once we find the first unwatched episode , we set the last
# consecutive watched episode to the previous episode ( or 0 ) .
if watched == 0 :
number -= 1
break
# We store this in the cache .
set_status ( db , aid , enddate and episodecount <= number , number ) |
def _get_parent_remote_paths ( self ) :
"""Get list of remote folders based on the list of all file urls
: return : set ( [ str ] ) : set of remote folders ( that contain files )""" | parent_paths = set ( [ item . get_remote_parent_path ( ) for item in self . file_urls ] )
if '' in parent_paths :
parent_paths . remove ( '' )
return parent_paths |
def usermacro_create ( macro , value , hostid , ** kwargs ) :
'''Create new host usermacro .
: param macro : name of the host usermacro
: param value : value of the host usermacro
: param hostid : hostid or templateid
: param _ connection _ user : Optional - zabbix user ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ password : Optional - zabbix password ( can also be set in opts or pillar , see module ' s docstring )
: param _ connection _ url : Optional - url of zabbix frontend ( can also be set in opts , pillar , see module ' s docstring )
return : ID of the created host usermacro .
CLI Example :
. . code - block : : bash
salt ' * ' zabbix . usermacro _ create ' { $ SNMP _ COMMUNITY } ' ' public ' 1''' | conn_args = _login ( ** kwargs )
ret = { }
try :
if conn_args :
params = { }
method = 'usermacro.create'
if macro : # Python mistakenly interprets macro names starting and ending with ' { ' and ' } ' as a dict
if isinstance ( macro , dict ) :
macro = "{" + six . text_type ( macro . keys ( ) [ 0 ] ) + "}"
if not macro . startswith ( '{' ) and not macro . endswith ( '}' ) :
macro = "{" + macro + "}"
params [ 'macro' ] = macro
params [ 'value' ] = value
params [ 'hostid' ] = hostid
params = _params_extend ( params , _ignore_name = True , ** kwargs )
ret = _query ( method , params , conn_args [ 'url' ] , conn_args [ 'auth' ] )
return ret [ 'result' ] [ 'hostmacroids' ] [ 0 ]
else :
raise KeyError
except KeyError :
return ret |
def dropDuplicates ( self , subset = None ) :
"""Return a new : class : ` DataFrame ` with duplicate rows removed ,
optionally only considering certain columns .
For a static batch : class : ` DataFrame ` , it just drops duplicate rows . For a streaming
: class : ` DataFrame ` , it will keep all data across triggers as intermediate state to drop
duplicates rows . You can use : func : ` withWatermark ` to limit how late the duplicate data can
be and system will accordingly limit the state . In addition , too late data older than
watermark will be dropped to avoid any possibility of duplicates .
: func : ` drop _ duplicates ` is an alias for : func : ` dropDuplicates ` .
> > > from pyspark . sql import Row
> > > df = sc . parallelize ( [ \ . . . Row ( name = ' Alice ' , age = 5 , height = 80 ) , \ . . . Row ( name = ' Alice ' , age = 5 , height = 80 ) , \ . . . Row ( name = ' Alice ' , age = 10 , height = 80 ) ] ) . toDF ( )
> > > df . dropDuplicates ( ) . show ( )
| age | height | name |
| 5 | 80 | Alice |
| 10 | 80 | Alice |
> > > df . dropDuplicates ( [ ' name ' , ' height ' ] ) . show ( )
| age | height | name |
| 5 | 80 | Alice |""" | if subset is None :
jdf = self . _jdf . dropDuplicates ( )
else :
jdf = self . _jdf . dropDuplicates ( self . _jseq ( subset ) )
return DataFrame ( jdf , self . sql_ctx ) |
def p_type_ref ( self , p ) :
'type _ ref : ID args nullable' | p [ 0 ] = AstTypeRef ( path = self . path , lineno = p . lineno ( 1 ) , lexpos = p . lexpos ( 1 ) , name = p [ 1 ] , args = p [ 2 ] , nullable = p [ 3 ] , ns = None , ) |
def _fill_function ( func , globals , defaults , dict , module , closure_values ) :
"""Fills in the rest of function data into the skeleton function object
that were created via _ make _ skel _ func ( ) .""" | func . __globals__ . update ( globals )
func . __defaults__ = defaults
func . __dict__ = dict
func . __module__ = module
cells = func . __closure__
if cells is not None :
for cell , value in zip ( cells , closure_values ) :
if value is not _empty_cell_value :
cell_set ( cell , value )
return func |
def _pc_decode ( self , msg ) :
"""PC : PLC ( lighting ) change .""" | housecode = msg [ 4 : 7 ]
return { 'housecode' : housecode , 'index' : housecode_to_index ( housecode ) , 'light_level' : int ( msg [ 7 : 9 ] ) } |
def _GetTimeElementsTuple ( self , timestamp ) :
"""Retrieves a time elements tuple from the timestamp .
A Symantec log timestamp consist of six hexadecimal octets , that represent :
First octet : Number of years since 1970
Second octet : Month , where January is represented by 0
Third octet : Day of the month
Fourth octet : Number of hours
Fifth octet : Number of minutes
Sixth octet : Number of seconds
For example , 200A13080122 represents November 19 , 2002 , 8:01:34 AM .
Args :
timestamp ( str ) : hexadecimal encoded date and time values .
Returns :
tuple : containing :
year ( int ) : year .
month ( int ) : month , where 1 represents January .
day _ of _ month ( int ) : day of month , where 1 is the first day of the month .
hours ( int ) : hours .
minutes ( int ) : minutes .
seconds ( int ) : seconds .""" | year , month , day_of_month , hours , minutes , seconds = ( int ( hexdigit [ 0 ] + hexdigit [ 1 ] , 16 ) for hexdigit in zip ( timestamp [ : : 2 ] , timestamp [ 1 : : 2 ] ) )
return ( year + 1970 , month + 1 , day_of_month , hours , minutes , seconds ) |
def update ( self , vts ) :
"""Mark a changed or invalidated VersionedTargetSet as successfully processed .""" | for vt in vts . versioned_targets :
vt . ensure_legal ( )
if not vt . valid :
self . _invalidator . update ( vt . cache_key )
vt . valid = True
self . _artifact_write_callback ( vt )
if not vts . valid :
vts . ensure_legal ( )
self . _invalidator . update ( vts . cache_key )
vts . valid = True
self . _artifact_write_callback ( vts ) |
def ok ( self , event = None ) :
"""This method is identical to tkinter . simpledialog . Dialog . ok ( ) ,
but with ' self . withdraw ( ) ' commented out .""" | if not self . validate ( ) :
self . initial_focus . focus_set ( )
# put focus back
return
# NOTE ( amin ) : Using self . withdraw ( ) here causes the
# ui to hang until the window loses and regains
# focus . There must be some blocking operation going
# on , but after some digging , I haven ' t been able to
# get any leads .
# NOTE ( amin ) : We must clear the main window ' s entry
# before returning focus to it . Otherwise , rapid
# pressing of the enter key will open multiple dialogs .
self . entry_to_clear . delete ( 0 , 'end' )
self . update_idletasks ( )
try :
self . apply ( )
finally :
self . cancel ( ) |
def to_bitarray ( data , width = 8 ) :
'''Convert data ( list of integers , bytearray or integer ) to bitarray''' | if isinstance ( data , list ) or isinstance ( data , bytearray ) :
data = combine_hex ( data )
return [ True if digit == '1' else False for digit in bin ( data ) [ 2 : ] . zfill ( width ) ] |
def _times ( t0 , hours ) :
"""Return a ( list of ) datetime ( s ) given an initial time and an ( list of ) hourly offset ( s ) .
Arguments :
t0 - - initial time
hours - - hourly offsets from t0""" | if not isinstance ( hours , Iterable ) :
return Tide . _times ( t0 , [ hours ] ) [ 0 ]
elif not isinstance ( hours [ 0 ] , datetime ) :
return np . array ( [ t0 + timedelta ( hours = h ) for h in hours ] )
else :
return np . array ( hours ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.