signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def logger ( name = 'BHMM' , pattern = '%(asctime)s %(levelname)s %(name)s: %(message)s' , date_format = '%H:%M:%S' , handler = logging . StreamHandler ( sys . stdout ) ) :
"""Retrieves the logger instance associated to the given name .
: param name : The name of the logger instance .
: type name : str
: param pattern : The associated pattern .
: type pattern : str
: param date _ format : The date format to be used in the pattern .
: type date _ format : str
: param handler : The logging handler , by default console output .
: type handler : FileHandler or StreamHandler or NullHandler
: return : The logger .
: rtype : Logger""" | _logger = logging . getLogger ( name )
_logger . setLevel ( config . log_level ( ) )
if not _logger . handlers :
formatter = logging . Formatter ( pattern , date_format )
handler . setFormatter ( formatter )
handler . setLevel ( config . log_level ( ) )
_logger . addHandler ( handler )
_logger . propagate = False
return _logger |
def strip_bewit ( url ) :
"""Strips the bewit parameter out of a url .
Returns ( encoded _ bewit , stripped _ url )
Raises InvalidBewit if no bewit found .
: param url :
The url containing a bewit parameter
: type url : str""" | m = re . search ( '[?&]bewit=([^&]+)' , url )
if not m :
raise InvalidBewit ( 'no bewit data found' )
bewit = m . group ( 1 )
stripped_url = url [ : m . start ( ) ] + url [ m . end ( ) : ]
return bewit , stripped_url |
def cib ( args ) :
"""% prog cib bamfile samplekey
Convert BAM to CIB ( a binary storage of int8 per base ) .""" | p = OptionParser ( cib . __doc__ )
p . add_option ( "--prefix" , help = "Report seqids with this prefix only" )
p . set_cpus ( )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
bamfile , samplekey = args
mkdir ( samplekey )
bam = pysam . AlignmentFile ( bamfile , "rb" )
refs = [ x for x in bam . header [ "SQ" ] ]
prefix = opts . prefix
if prefix :
refs = [ x for x in refs if x [ "SN" ] . startswith ( prefix ) ]
task_args = [ ]
for r in refs :
task_args . append ( ( bamfile , r , samplekey ) )
cpus = min ( opts . cpus , len ( task_args ) )
logging . debug ( "Use {} cpus" . format ( cpus ) )
p = Pool ( processes = cpus )
for res in p . imap ( bam_to_cib , task_args ) :
continue |
def func_call ( self , t ) :
"""For function calls e . g . TEXT ( tostring ( [ area ] , " % . 2f " ) )""" | func , params = t
func_name = func . value
func . value = "({}({}))" . format ( func_name , params )
return func |
def create ( self , name , kind , ** kwargs ) :
"""Creates an input of a specific kind in this collection , with any
arguments you specify .
: param ` name ` : The input name .
: type name : ` ` string ` `
: param ` kind ` : The kind of input :
- " ad " : Active Directory
- " monitor " : Files and directories
- " registry " : Windows Registry
- " script " : Scripts
- " splunktcp " : TCP , processed
- " tcp " : TCP , unprocessed
- " udp " : UDP
- " win - event - log - collections " : Windows event log
- " win - perfmon " : Performance monitoring
- " win - wmi - collections " : WMI
: type kind : ` ` string ` `
: param ` kwargs ` : Additional arguments ( optional ) . For more about the
available parameters , see ` Input parameters < http : / / dev . splunk . com / view / SP - CAAAEE6 # inputparams > ` _ on Splunk Developer Portal .
: type kwargs : ` ` dict ` `
: return : The new : class : ` Input ` .""" | kindpath = self . kindpath ( kind )
self . post ( kindpath , name = name , ** kwargs )
# If we created an input with restrictToHost set , then
# its path will be < restrictToHost > : < name > , not just < name > ,
# and we have to adjust accordingly .
# Url encodes the name of the entity .
name = UrlEncoded ( name , encode_slash = True )
path = _path ( self . path + kindpath , '%s:%s' % ( kwargs [ 'restrictToHost' ] , name ) if 'restrictToHost' in kwargs else name )
return Input ( self . service , path , kind ) |
def _purge_jobs ( timestamp ) :
'''Purge records from the returner tables .
: param job _ age _ in _ seconds : Purge jobs older than this
: return :''' | with _get_serv ( ) as cursor :
try :
sql = 'delete from jids where jid in (select distinct jid from salt_returns where alter_time < %s)'
cursor . execute ( sql , ( timestamp , ) )
cursor . execute ( 'COMMIT' )
except psycopg2 . DatabaseError as err :
error = err . args
sys . stderr . write ( six . text_type ( error ) )
cursor . execute ( "ROLLBACK" )
raise err
try :
sql = 'delete from salt_returns where alter_time < %s'
cursor . execute ( sql , ( timestamp , ) )
cursor . execute ( 'COMMIT' )
except psycopg2 . DatabaseError as err :
error = err . args
sys . stderr . write ( six . text_type ( error ) )
cursor . execute ( "ROLLBACK" )
raise err
try :
sql = 'delete from salt_events where alter_time < %s'
cursor . execute ( sql , ( timestamp , ) )
cursor . execute ( 'COMMIT' )
except psycopg2 . DatabaseError as err :
error = err . args
sys . stderr . write ( six . text_type ( error ) )
cursor . execute ( "ROLLBACK" )
raise err
return True |
def login ( self , username , password = '' , login_key = None , auth_code = None , two_factor_code = None , login_id = None ) :
"""Login as a specific user
: param username : username
: type username : : class : ` str `
: param password : password
: type password : : class : ` str `
: param login _ key : login key , instead of password
: type login _ key : : class : ` str `
: param auth _ code : email authentication code
: type auth _ code : : class : ` str `
: param two _ factor _ code : 2FA authentication code
: type two _ factor _ code : : class : ` str `
: param login _ id : number used for identifying logon session
: type login _ id : : class : ` int `
: return : logon result , see ` CMsgClientLogonResponse . eresult < https : / / github . com / ValvePython / steam / blob / 513c68ca081dc9409df932ad86c66100164380a6 / protobufs / steammessages _ clientserver . proto # L95 - L118 > ` _
: rtype : : class : ` . EResult `
. . note : :
Failure to login will result in the server dropping the connection , ` ` error ` ` event is fired
` ` auth _ code _ required ` ` event is fired when 2FA or Email code is needed .
Here is example code of how to handle the situation .
. . code : : python
@ steamclient . on ( steamclient . EVENT _ AUTH _ CODE _ REQUIRED )
def auth _ code _ prompt ( is _ 2fa , code _ mismatch ) :
if is _ 2fa :
code = input ( " Enter 2FA Code : " )
steamclient . login ( username , password , two _ factor _ code = code )
else :
code = input ( " Enter Email Code : " )
steamclient . login ( username , password , auth _ code = code )
Codes are required every time a user logins if sentry is not setup .
See : meth : ` set _ credential _ location `""" | self . _LOG . debug ( "Attempting login" )
if not self . _pre_login ( ) :
return EResult . TryAnotherCM
self . username = username
message = MsgProto ( EMsg . ClientLogon )
message . header . steamid = SteamID ( type = 'Individual' , universe = 'Public' )
message . body . protocol_version = 65579
message . body . client_package_version = 1771
message . body . client_os_type = EOSType . Windows10
message . body . client_language = "english"
message . body . should_remember_password = True
message . body . supports_rate_limit_response = True
if login_id is None :
message . body . obfustucated_private_ip = ip_to_int ( self . connection . local_address ) ^ 0xF00DBAAD
else :
message . body . obfustucated_private_ip = login_id
message . body . account_name = username
if login_key :
message . body . login_key = login_key
else :
message . body . password = password
sentry = self . get_sentry ( username )
if sentry is None :
message . body . eresult_sentryfile = EResult . FileNotFound
else :
message . body . eresult_sentryfile = EResult . OK
message . body . sha_sentryfile = sha1_hash ( sentry )
if auth_code :
message . body . auth_code = auth_code
if two_factor_code :
message . body . two_factor_code = two_factor_code
self . send ( message )
resp = self . wait_msg ( EMsg . ClientLogOnResponse , timeout = 30 )
if resp and resp . body . eresult == EResult . OK :
self . sleep ( 0.5 )
return EResult ( resp . body . eresult ) if resp else EResult . Fail |
def p_object_const_list ( self , p ) :
'''object _ const _ list : object _ const _ list COMMA IDENT
| IDENT''' | if len ( p ) == 4 :
p [ 1 ] . append ( p [ 3 ] )
p [ 0 ] = p [ 1 ]
elif len ( p ) == 2 :
p [ 0 ] = [ p [ 1 ] ] |
def stage_job ( self , credentials , job_details , input_files , vm_instance_name ) :
"""Request that a job be staged on a worker ( ie . download some files )
: param credentials : jobapi . Credentials : user ' s credentials used to download input _ files
: param job _ details : object : details about job ( id , name , created date , workflow version )
: param input _ files : [ InputFile ] : list of files to download
: param vm _ instance _ name : str : name of the instance lando _ worker is running on ( this passed back in the response )""" | self . _send ( JobCommands . STAGE_JOB , StageJobPayload ( credentials , job_details , input_files , vm_instance_name ) ) |
def device_count ( self ) :
"""Find amount of CUE devices
: returns : amount of CUE devices
: rtype : int""" | device_count = get_device_count ( self . corsair_sdk )
if device_count == - 1 :
self . _raise_corsair_error ( )
return device_count |
def setup_privnet ( self , host = None ) :
"""Load settings from the privnet JSON config file
Args :
host ( string , optional ) : if supplied , uses this IP or domain as neo nodes . The host must
use these standard ports : P2P 20333 , RPC 30333.""" | self . setup ( FILENAME_SETTINGS_PRIVNET )
if isinstance ( host , str ) :
if ":" in host :
raise Exception ( "No protocol prefix or port allowed in host, use just the IP or domain." )
print ( "Using custom privatenet host:" , host )
self . SEED_LIST = [ "%s:20333" % host ]
self . RPC_LIST = [ "http://%s:30333" % host ]
print ( "- P2P:" , ", " . join ( self . SEED_LIST ) )
print ( "- RPC:" , ", " . join ( self . RPC_LIST ) )
self . check_privatenet ( ) |
def multi_raw ( query , params , models , model_to_fields ) :
"""Scoop multiple model instances out of the DB at once , given a query that
returns all fields of each .
Return an iterable of sequences of model instances parallel to the
` ` models ` ` sequence of classes . For example : :
[ ( < User such - and - such > , < Watch such - and - such > ) , . . . ]""" | cursor = connections [ router . db_for_read ( models [ 0 ] ) ] . cursor ( )
cursor . execute ( query , params )
rows = cursor . fetchall ( )
for row in rows :
row_iter = iter ( row )
yield [ model_class ( ** dict ( ( a , next ( row_iter ) ) for a in model_to_fields [ model_class ] ) ) for model_class in models ] |
def scrub ( data ) :
"""Verify and clean data . Raise error if input fails .""" | # blanks , Nones , and empty strings can stay as is
if not data :
return data
if isinstance ( data , ( int , float ) ) :
return data
if isinstance ( data , list ) :
return [ scrub ( entry ) for entry in data ]
if isinstance ( data , dict ) :
return { scrub ( key ) : scrub ( value ) for key , value in data . items ( ) }
try :
return json . encoder . encode_basestring ( data ) [ 1 : - 1 ]
except TypeError as exc :
raise ValidationError ( "Input '%s' is not a permitted type: %s" % ( data , exc ) )
except Exception as exc :
raise ValidationError ( "Input '%s' not permitted: %s" % ( data , exc ) ) |
def get_environ ( keys ) :
"""Get environment variables from : data : ` os . environ ` .
: type keys : [ str ]
: rtype : dict
Some additional features .
* If ' HOST ' is not in : data : ` os . environ ` , this function
automatically fetch it using : meth : ` platform . node ` .
* If ' TTY ' is not in : data : ` os . environ ` , this function
automatically fetch it using : meth : ` os . ttyname ` .
* Set ' RASH _ SPENV _ TERMINAL ' if needed .""" | items = ( ( k , os . environ . get ( k ) ) for k in keys )
subenv = dict ( ( k , v ) for ( k , v ) in items if v is not None )
needset = lambda k : k in keys and not subenv . get ( k )
def setifnonempty ( key , value ) :
if value :
subenv [ key ] = value
if needset ( 'HOST' ) :
import platform
subenv [ 'HOST' ] = platform . node ( )
if needset ( 'TTY' ) :
setifnonempty ( 'TTY' , get_tty ( ) )
if needset ( 'RASH_SPENV_TERMINAL' ) :
from . utils . termdetection import detect_terminal
setifnonempty ( 'RASH_SPENV_TERMINAL' , detect_terminal ( ) )
return subenv |
def noop ( * layers ) :
"""Transform a sequences of layers into a null operation .""" | def begin_update ( X , drop = 0.0 ) :
return X , lambda D , * a , ** k : D
return begin_update |
def set_Y ( self , Y ) :
"""Set the output data of the model
: param Y : output observations
: type Y : np . ndarray or ObsArray""" | assert isinstance ( Y , ( np . ndarray , ObsAr ) )
state = self . update_model ( )
self . update_model ( False )
if self . normalizer is not None :
self . normalizer . scale_by ( Y )
self . Y_normalized = ObsAr ( self . normalizer . normalize ( Y ) )
self . Y = Y
else :
self . Y = ObsAr ( Y ) if isinstance ( Y , np . ndarray ) else Y
self . Y_normalized = self . Y
self . update_model ( state ) |
def set_value ( self , visual_property , value ) :
"""Set a single Visual Property Value
: param visual _ property : Visual Property ID
: param value : New value for the VP
: return : None""" | if visual_property is None or value is None :
raise ValueError ( 'Both VP and value are required.' )
new_value = [ { 'visualProperty' : visual_property , "value" : value } ]
requests . put ( self . url , data = json . dumps ( new_value ) , headers = HEADERS ) |
def dump_stacks ( self ) :
'''Dumps the stack of all threads . This function
is meant for debugging . Useful when a deadlock happens .
borrowed from : http : / / blog . ziade . org / 2012/05/25 / zmq - and - gevent - debugging - nightmares /''' | dump = [ ]
# threads
threads = dict ( [ ( th . ident , th . name ) for th in threading . enumerate ( ) ] )
for thread , frame in sys . _current_frames ( ) . items ( ) :
if thread not in threads :
continue
dump . append ( 'Thread 0x%x (%s)\n' % ( thread , threads [ thread ] ) )
dump . append ( '' . join ( traceback . format_stack ( frame ) ) )
dump . append ( '\n' )
return '' . join ( dump ) |
def fit_from_cfg ( cls , df , cfgname , debug = False , outcfgname = None ) :
"""Parameters
df : DataFrame
The dataframe which contains the columns to use for the estimation .
cfgname : string
The name of the yaml config file which describes the hedonic model .
debug : boolean , optional ( default False )
Whether to generate debug information on the model .
outcfgname : string , optional ( default cfgname )
The name of the output yaml config file where estimation results are written into .
Returns
RegressionModel which was used to fit""" | logger . debug ( 'start: fit from configuration {}' . format ( cfgname ) )
hm = cls . from_yaml ( str_or_buffer = cfgname )
ret = hm . fit ( df , debug = debug )
print ( ret . summary ( ) )
outcfgname = outcfgname or cfgname
hm . to_yaml ( str_or_buffer = outcfgname )
logger . debug ( 'finish: fit into configuration {}' . format ( outcfgname ) )
return hm |
def stop ( self , devices ) :
"""Power - Off one or more running devices .""" | for device in devices :
self . logger . info ( 'Stopping: %s' , device . id )
try :
device . power_off ( )
except packet . baseapi . Error :
raise PacketManagerException ( 'Unable to stop instance "{}"' . format ( device . id ) ) |
def parse_sync_points ( names , tests ) :
"""Slice list of test names on sync points .
If test is test file find full path to file .
Returns :
A list of test file sets and sync point strings .
Examples :
[ ' test _ hard _ reboot ' ]
[ set ( ' test1 ' , ' test2 ' ) ]
[ set ( ' test1 ' , ' test2 ' ) , ' test _ soft _ reboot ' ]
[ set ( ' test1 ' , ' test2 ' ) , ' test _ soft _ reboot ' , set ( ' test3 ' ) ]""" | test_files = [ ]
section = set ( )
for name in names :
if name in SYNC_POINTS :
if section :
test_files . append ( section )
test_files . append ( name )
section = set ( )
else :
section . add ( find_test_file ( name , tests ) )
if section :
test_files . append ( section )
return test_files |
def arc ( self , x , y , radius , start_angle , end_angle ) :
"""draw arc going counter - clockwise from start _ angle to end _ angle""" | self . _add_instruction ( "arc" , x , y , radius , start_angle , end_angle ) |
def get_all ( self ) -> List [ Commodity ] :
"""Loads all non - currency commodities , assuming they are stocks .""" | query = ( self . query . order_by ( Commodity . namespace , Commodity . mnemonic ) )
return query . all ( ) |
def interpret ( self ) :
"""Generic entrypoint of ` Interpreter ` class .""" | self . load_builtins ( )
self . load_functions ( self . tree )
self . visit ( self . tree ) |
def post ( self , request ) :
"""Respond to POSTed username / password with token .""" | serializer = AuthTokenSerializer ( data = request . data )
if serializer . is_valid ( ) :
token , _ = ExpiringToken . objects . get_or_create ( user = serializer . validated_data [ 'user' ] )
if token . expired ( ) : # If the token is expired , generate a new one .
token . delete ( )
token = ExpiringToken . objects . create ( user = serializer . validated_data [ 'user' ] )
data = { 'token' : token . key }
return Response ( data )
return Response ( serializer . errors , status = HTTP_400_BAD_REQUEST ) |
def git_tag_to_semver ( git_tag : str ) -> SemVer :
""": git _ tag : A string representation of a Git tag .
Searches a Git tag ' s string representation for a SemVer , and returns that
as a SemVer object .""" | pattern = re . compile ( r'[0-9]+\.[0-9]+\.[0-9]+$' )
match = pattern . search ( git_tag )
if match :
version = match . group ( 0 )
else :
raise InvalidTagFormatException ( 'Tag passed contains no SemVer.' )
return SemVer . from_str ( version ) |
def get_file_size ( fileobj ) :
"""Returns the size of a file - like object .""" | currpos = fileobj . tell ( )
fileobj . seek ( 0 , 2 )
total_size = fileobj . tell ( )
fileobj . seek ( currpos )
return total_size |
def toggle_rules ( self , * args ) :
"""Display or hide the view for constructing rules out of cards .""" | if self . app . manager . current != 'rules' and not isinstance ( self . app . selected_proxy , CharStatProxy ) :
self . app . rules . entity = self . app . selected_proxy
self . app . rules . rulebook = self . app . selected_proxy . rulebook
if isinstance ( self . app . selected_proxy , CharStatProxy ) :
self . app . charrules . character = self . app . selected_proxy
self . app . charrules . toggle ( )
else :
self . app . rules . toggle ( ) |
def get_abi_alignment ( self , target_data , context = None ) :
"""Get the minimum ABI alignment of this type according to data layout
* target _ data * .""" | llty = self . _get_ll_pointer_type ( target_data , context )
return target_data . get_pointee_abi_alignment ( llty ) |
def _connect ( self , timeout = None ) :
'''Connect to a running IPCServer''' | if isinstance ( self . socket_path , int ) :
sock_type = socket . AF_INET
sock_addr = ( '127.0.0.1' , self . socket_path )
else :
sock_type = socket . AF_UNIX
sock_addr = self . socket_path
self . stream = None
if timeout is not None :
timeout_at = time . time ( ) + timeout
while True :
if self . _closing :
break
if self . stream is None :
with salt . utils . asynchronous . current_ioloop ( self . io_loop ) :
self . stream = IOStream ( socket . socket ( sock_type , socket . SOCK_STREAM ) , )
try :
log . trace ( 'IPCClient: Connecting to socket: %s' , self . socket_path )
yield self . stream . connect ( sock_addr )
self . _connecting_future . set_result ( True )
break
except Exception as e :
if self . stream . closed ( ) :
self . stream = None
if timeout is None or time . time ( ) > timeout_at :
if self . stream is not None :
self . stream . close ( )
self . stream = None
self . _connecting_future . set_exception ( e )
break
yield tornado . gen . sleep ( 1 ) |
def generate_rest_view ( config , model_cls , attrs = None , es_based = True , attr_view = False , singular = False ) :
"""Generate REST view for a model class .
: param model _ cls : Generated DB model class .
: param attr : List of strings that represent names of view methods , new
generated view should support . Not supported methods are replaced
with property that raises AttributeError to display MethodNotAllowed
error .
: param es _ based : Boolean indicating if generated view should read from
elasticsearch . If True - collection reads are performed from
elasticsearch . Database is used for reads otherwise .
Defaults to True .
: param attr _ view : Boolean indicating if ItemAttributeView should be
used as a base class for generated view .
: param singular : Boolean indicating if ItemSingularView should be
used as a base class for generated view .""" | valid_attrs = ( list ( collection_methods . values ( ) ) + list ( item_methods . values ( ) ) )
missing_attrs = set ( valid_attrs ) - set ( attrs )
if singular :
bases = [ ItemSingularView ]
elif attr_view :
bases = [ ItemAttributeView ]
elif es_based :
bases = [ ESCollectionView ]
else :
bases = [ CollectionView ]
if config . registry . database_acls :
from nefertari_guards . view import ACLFilterViewMixin
bases = [ SetObjectACLMixin ] + bases + [ ACLFilterViewMixin ]
bases . append ( NefertariBaseView )
RESTView = type ( 'RESTView' , tuple ( bases ) , { 'Model' : model_cls } )
def _attr_error ( * args , ** kwargs ) :
raise AttributeError
for attr in missing_attrs :
setattr ( RESTView , attr , property ( _attr_error ) )
return RESTView |
def get_config_from_env ( cls ) :
""". . deprecated : : 2.5.3
Gets configuration out of environment .
Returns list of dicts - list of namenode representations""" | core_path = os . path . join ( os . environ [ 'HADOOP_HOME' ] , 'conf' , 'core-site.xml' )
core_configs = cls . read_core_config ( core_path )
hdfs_path = os . path . join ( os . environ [ 'HADOOP_HOME' ] , 'conf' , 'hdfs-site.xml' )
hdfs_configs = cls . read_hdfs_config ( hdfs_path )
if ( not core_configs ) and ( not hdfs_configs ) :
raise Exception ( "No config found in %s nor in %s" % ( core_path , hdfs_path ) )
configs = { 'use_trash' : hdfs_configs . get ( 'use_trash' , core_configs . get ( 'use_trash' , False ) ) , 'use_sasl' : core_configs . get ( 'use_sasl' , False ) , 'hdfs_namenode_principal' : hdfs_configs . get ( 'hdfs_namenode_principal' , None ) , 'namenodes' : hdfs_configs . get ( 'namenodes' , [ ] ) or core_configs . get ( 'namenodes' , [ ] ) }
return configs |
def apply ( self , context , clear , split , ** kwargs ) :
"""Extract candidates from the given Context .
: param context : A document to process .
: param clear : Whether or not to clear the existing database entries .
: param split : Which split to use .""" | logger . debug ( f"Document: {context}" )
# Iterate over each candidate class
for i , candidate_class in enumerate ( self . candidate_classes ) :
logger . debug ( f" Relation: {candidate_class.__name__}" )
# Generates and persists candidates
candidate_args = { "split" : split }
candidate_args [ "document_id" ] = context . id
cands = product ( * [ enumerate ( self . session . query ( mention ) . filter ( mention . document_id == context . id ) . order_by ( mention . id ) . all ( ) ) for mention in candidate_class . mentions ] )
for cand in cands : # Apply throttler if one was given .
# Accepts a tuple of Mention objects
# ( throttler returns whether or not proposed candidate
# passes throttling condition )
if self . throttlers [ i ] :
if not self . throttlers [ i ] ( tuple ( cand [ j ] [ 1 ] for j in range ( self . arities [ i ] ) ) ) :
continue
# TODO : Make this work for higher - order relations
if self . arities [ i ] == 2 :
ai , a = ( cand [ 0 ] [ 0 ] , cand [ 0 ] [ 1 ] . context )
bi , b = ( cand [ 1 ] [ 0 ] , cand [ 1 ] [ 1 ] . context )
# Check for self - joins , " nested " joins ( joins from context to
# its subcontext ) , and flipped duplicate " symmetric " relations
if not self . self_relations and a == b :
logger . debug ( f"Skipping self-joined candidate {cand}" )
continue
if not self . nested_relations and ( a in b or b in a ) :
logger . debug ( f"Skipping nested candidate {cand}" )
continue
if not self . symmetric_relations and ai > bi :
logger . debug ( f"Skipping symmetric candidate {cand}" )
continue
# Assemble candidate arguments
for j , arg_name in enumerate ( candidate_class . __argnames__ ) :
candidate_args [ arg_name + "_id" ] = cand [ j ] [ 1 ] . id
# Checking for existence
if not clear :
q = select ( [ candidate_class . id ] )
for key , value in list ( candidate_args . items ( ) ) :
q = q . where ( getattr ( candidate_class , key ) == value )
candidate_id = self . session . execute ( q ) . first ( )
if candidate_id is not None :
continue
# Add Candidate to session
yield candidate_class ( ** candidate_args ) |
def validate_q_time ( self , value ) :
"""Would be for example : [ 2013-03-01 TO 2013-04-01T00:00:00 ] and / or [ * TO * ]
Returns a valid sorl value . [ 2013-03-01T00:00:00Z TO 2013-04-01T00:00:00Z ] and / or [ * TO * ]""" | if value :
try :
range = utils . parse_datetime_range_to_solr ( value )
return range
except Exception as e :
raise serializers . ValidationError ( e . message )
return value |
def schema_keys ( schema ) :
"""Get the string values of keys in a dict - based schema .
Non - string keys are ignored .
Returns :
Set of string keys of a schema which is in the form ( eg ) :
schema = Schema ( { Required ( " foo " ) : int ,
Optional ( " bah " ) : basestring } )""" | def _get_leaf ( value ) :
if isinstance ( value , Schema ) :
return _get_leaf ( value . _schema )
return value
keys = set ( )
dict_ = schema . _schema
assert isinstance ( dict_ , dict )
for key in dict_ . iterkeys ( ) :
key_ = _get_leaf ( key )
if isinstance ( key_ , basestring ) :
keys . add ( key_ )
return keys |
def append_varint32 ( self , value ) :
"""Appends a signed 32 - bit integer to the internal buffer ,
encoded as a varint . ( Note that a negative varint32 will
always require 10 bytes of space . )""" | if not wire_format . INT32_MIN <= value <= wire_format . INT32_MAX :
raise errors . EncodeError ( 'Value out of range: %d' % value )
self . append_varint64 ( value ) |
def send_order ( self , acc_id , amount , symbol , _type , price = 0 , site = 'Pro' , _async = False ) :
"""创建并执行订单
: param amount :
: param source : 如果使用借贷资产交易 , 请在下单接口 , 请求参数source中填写 ' margin - api '
: param symbol :
: param _ type : 可选值 { buy - market : 市价买 , sell - market : 市价卖 , buy - limit : 限价买 , sell - limit : 限价卖 , buy - ioc : IOC买单 , sell - ioc : IOC卖单 }
: param price :
: return :""" | assert site in [ 'Pro' , 'HADAX' ]
assert _type in u . ORDER_TYPE
params = { 'account-id' : acc_id , 'amount' : amount , 'symbol' : symbol , 'type' : _type , 'source' : 'api' }
if price :
params [ 'price' ] = price
path = f'/v1{"/" if site == "Pro" else "/hadax/"}order/orders/place'
return api_key_post ( params , path , _async = _async ) |
def tick ( self , d ) :
"""ticks come in on every block""" | if self . test_blocks :
if not ( self . counter [ "blocks" ] or 0 ) % self . test_blocks :
self . test ( )
self . counter [ "blocks" ] += 1 |
def get_instance ( self , payload ) :
"""Build an instance of ApplicationInstance
: param dict payload : Payload response from the API
: returns : twilio . rest . api . v2010 . account . application . ApplicationInstance
: rtype : twilio . rest . api . v2010 . account . application . ApplicationInstance""" | return ApplicationInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , ) |
def generate ( env ) :
"""Add Builders and construction variables for lib to an Environment .""" | SCons . Tool . createStaticLibBuilder ( env )
# Set - up ms tools paths
msvc_setup_env_once ( env )
env [ 'AR' ] = 'lib'
env [ 'ARFLAGS' ] = SCons . Util . CLVar ( '/nologo' )
env [ 'ARCOM' ] = "${TEMPFILE('$AR $ARFLAGS /OUT:$TARGET $SOURCES','$ARCOMSTR')}"
env [ 'LIBPREFIX' ] = ''
env [ 'LIBSUFFIX' ] = '.lib' |
def deletescript ( self , name ) :
"""Delete a script from the server
See MANAGESIEVE specifications , section 2.10
: param name : script ' s name
: rtype : boolean""" | code , data = self . __send_command ( "DELETESCRIPT" , [ name . encode ( "utf-8" ) ] )
if code == "OK" :
return True
return False |
def month_interval ( year , month , return_str = False ) :
"""Usage Example : :
> > > timewrapper . day _ interval ( 2014 , 12)
( datetime ( 2014 , 12 , 1 , 0 , 0 , 0 ) , datetime ( 2014 , 12 , 31 , 23 , 59 , 59 ) )""" | if month == 12 :
start , end = datetime ( year , month , 1 ) , datetime ( year + 1 , 1 , 1 ) - timedelta ( seconds = 1 )
else :
start , end = datetime ( year , month , 1 ) , datetime ( year , month + 1 , 1 ) - timedelta ( seconds = 1 )
if not return_str :
return start , end
else :
return str ( start ) , str ( end ) |
def _call ( self , f , out ) :
"""Implement ` ` self ( vf , out ) ` ` .""" | for vfi , oi , ran_wi , dom_wi in zip ( self . vecfield , out , self . __ran_weights , self . weights ) :
vfi . multiply ( f , out = oi )
if not np . isclose ( ran_wi , dom_wi ) :
oi *= dom_wi / ran_wi |
def execute ( self , cmd , block_size = DEFAULT_EXECUTE_READ_BLOCK_SIZE ) :
"""Execute a remote command . This functionality does not support more
than one command to be executed on the same channel , so we create a
dedicated channel at the session level than allowing direct access at
the channel level .""" | with SshChannel ( self ) as sc :
self . __log . debug ( "Executing command: %s" % ( cmd ) )
sc . open_session ( )
sc . request_exec ( cmd )
buffer_ = bytearray ( )
while 1 :
bytes = sc . read ( block_size )
yield bytes
if len ( bytes ) < block_size :
break |
def from_json ( cls , data ) :
"""Create Wea from json file
" location " : { } , / / ladybug location schema
" direct _ normal _ irradiance " : [ ] , / / List of hourly direct normal
irradiance data points
" diffuse _ horizontal _ irradiance " : [ ] , / / List of hourly diffuse
horizontal irradiance data points
" timestep " : float / / timestep between measurements , default is 1""" | required_keys = ( 'location' , 'direct_normal_irradiance' , 'diffuse_horizontal_irradiance' )
optional_keys = ( 'timestep' , 'is_leap_year' )
for key in required_keys :
assert key in data , 'Required key "{}" is missing!' . format ( key )
for key in optional_keys :
if key not in data :
data [ key ] = None
location = Location . from_json ( data [ 'location' ] )
direct_normal_irradiance = HourlyContinuousCollection . from_json ( data [ 'direct_normal_irradiance' ] )
diffuse_horizontal_irradiance = HourlyContinuousCollection . from_json ( data [ 'diffuse_horizontal_irradiance' ] )
timestep = data [ 'timestep' ]
is_leap_year = data [ 'is_leap_year' ]
return cls ( location , direct_normal_irradiance , diffuse_horizontal_irradiance , timestep , is_leap_year ) |
def restore ( self , fade = False ) :
"""Restore the state of a device to that which was previously saved .
For coordinator devices restore everything . For slave devices
only restore volume etc . , not transport info ( transport info
comes from the slave ' s coordinator ) .
Args :
fade ( bool ) : Whether volume should be faded up on restore .""" | try :
if self . is_coordinator :
self . _restore_coordinator ( )
finally :
self . _restore_volume ( fade )
# Now everything is set , see if we need to be playing , stopped
# or paused ( only for coordinators )
if self . is_coordinator :
if self . transport_state == 'PLAYING' :
self . device . play ( )
elif self . transport_state == 'STOPPED' :
self . device . stop ( ) |
def RGBA ( self , val ) :
"""Set the color using an Nx4 array of RGBA uint8 values""" | # need to convert to normalized float
val = np . atleast_1d ( val ) . astype ( np . float32 ) / 255
self . rgba = val |
def reset_job ( self , description ) :
"""remove the job from the state .
if means that next time we run , this job will start over from scratch .""" | class_list = self . config . crontabber . jobs . class_list
class_list = self . _reorder_class_list ( class_list )
for class_name , job_class in class_list :
if ( job_class . app_name == description or description == job_class . __module__ + '.' + job_class . __name__ ) :
if job_class . app_name in self . job_state_database :
self . config . logger . info ( 'App reset' )
self . job_state_database . pop ( job_class . app_name )
else :
self . config . logger . warning ( 'App already reset' )
return
raise JobNotFoundError ( description ) |
def generate ( self , state ) :
"""Return a random variable if any , otherwise create a new default variable .""" | if self . count >= random . randint ( DharmaConst . VARIABLE_MIN , DharmaConst . VARIABLE_MAX ) :
return "%s%d" % ( self . var , random . randint ( 1 , self . count ) )
var = random . choice ( self )
prefix = self . eval ( var [ 0 ] , state )
suffix = self . eval ( var [ 1 ] , state )
self . count += 1
element_name = "%s%d" % ( self . var , self . count )
self . default += "%s%s%s\n" % ( prefix , element_name , suffix )
return element_name |
def _GetAttributeContainerByIndex ( self , container_type , index ) :
"""Retrieves a specific attribute container .
Args :
container _ type ( str ) : attribute container type .
index ( int ) : attribute container index .
Returns :
AttributeContainer : attribute container or None if not available .
Raises :
IOError : when there is an error querying the storage file .
OSError : when there is an error querying the storage file .""" | sequence_number = index + 1
query = 'SELECT _data FROM {0:s} WHERE rowid = {1:d}' . format ( container_type , sequence_number )
try :
self . _cursor . execute ( query )
except sqlite3 . OperationalError as exception :
raise IOError ( 'Unable to query storage file with error: {0!s}' . format ( exception ) )
row = self . _cursor . fetchone ( )
if row :
identifier = identifiers . SQLTableIdentifier ( container_type , sequence_number )
if self . compression_format == definitions . COMPRESSION_FORMAT_ZLIB :
serialized_data = zlib . decompress ( row [ 0 ] )
else :
serialized_data = row [ 0 ]
if self . _storage_profiler :
self . _storage_profiler . Sample ( 'read' , container_type , len ( serialized_data ) , len ( row [ 0 ] ) )
attribute_container = self . _DeserializeAttributeContainer ( container_type , serialized_data )
attribute_container . SetIdentifier ( identifier )
return attribute_container
count = self . _CountStoredAttributeContainers ( container_type )
index -= count
serialized_data = self . _GetSerializedAttributeContainerByIndex ( container_type , index )
attribute_container = self . _DeserializeAttributeContainer ( container_type , serialized_data )
if attribute_container :
identifier = identifiers . SQLTableIdentifier ( container_type , sequence_number )
attribute_container . SetIdentifier ( identifier )
return attribute_container |
def retrieve ( self , request , project , pk = None ) :
"""Returns a job _ log _ url object given its ID""" | log = JobLog . objects . get ( id = pk )
return Response ( self . _log_as_dict ( log ) ) |
def rate_of_change ( data , period ) :
"""Rate of Change .
Formula :
( Close - Close n periods ago ) / ( Close n periods ago ) * 100""" | catch_errors . check_for_period_error ( data , period )
rocs = [ ( ( data [ idx ] - data [ idx - ( period - 1 ) ] ) / data [ idx - ( period - 1 ) ] ) * 100 for idx in range ( period - 1 , len ( data ) ) ]
rocs = fill_for_noncomputable_vals ( data , rocs )
return rocs |
def _extractAssociation ( self , assoc_response , assoc_session ) :
"""Attempt to extract an association from the response , given
the association response message and the established
association session .
@ param assoc _ response : The association response message from
the server
@ type assoc _ response : openid . message . Message
@ param assoc _ session : The association session object that was
used when making the request
@ type assoc _ session : depends on the session type of the request
@ raises ProtocolError : when data is malformed
@ raises KeyError : when a field is missing
@ rtype : openid . association . Association""" | # Extract the common fields from the response , raising an
# exception if they are not found
assoc_type = assoc_response . getArg ( OPENID_NS , 'assoc_type' , no_default )
assoc_handle = assoc_response . getArg ( OPENID_NS , 'assoc_handle' , no_default )
# expires _ in is a base - 10 string . The Python parsing will
# accept literals that have whitespace around them and will
# accept negative values . Neither of these are really in - spec ,
# but we think it ' s OK to accept them .
expires_in_str = assoc_response . getArg ( OPENID_NS , 'expires_in' , no_default )
try :
expires_in = int ( expires_in_str )
except ValueError , why :
raise ProtocolError ( 'Invalid expires_in field: %s' % ( why [ 0 ] , ) )
# OpenID 1 has funny association session behaviour .
if assoc_response . isOpenID1 ( ) :
session_type = self . _getOpenID1SessionType ( assoc_response )
else :
session_type = assoc_response . getArg ( OPENID2_NS , 'session_type' , no_default )
# Session type mismatch
if assoc_session . session_type != session_type :
if ( assoc_response . isOpenID1 ( ) and session_type == 'no-encryption' ) : # In OpenID 1 , any association request can result in a
# ' no - encryption ' association response . Setting
# assoc _ session to a new no - encryption session should
# make the rest of this function work properly for
# that case .
assoc_session = PlainTextConsumerSession ( )
else : # Any other mismatch , regardless of protocol version
# results in the failure of the association session
# altogether .
fmt = 'Session type mismatch. Expected %r, got %r'
message = fmt % ( assoc_session . session_type , session_type )
raise ProtocolError ( message )
# Make sure assoc _ type is valid for session _ type
if assoc_type not in assoc_session . allowed_assoc_types :
fmt = 'Unsupported assoc_type for session %s returned: %s'
raise ProtocolError ( fmt % ( assoc_session . session_type , assoc_type ) )
# Delegate to the association session to extract the secret
# from the response , however is appropriate for that session
# type .
try :
secret = assoc_session . extractSecret ( assoc_response )
except ValueError , why :
fmt = 'Malformed response for %s session: %s'
raise ProtocolError ( fmt % ( assoc_session . session_type , why [ 0 ] ) )
return Association . fromExpiresIn ( expires_in , assoc_handle , secret , assoc_type ) |
def image_id ( self ) :
"""this container is created from image with id . . .""" | try : # docker > = 1.9
image_id = self . data [ "ImageID" ]
except KeyError : # docker < = 1.8
image_id = self . metadata_get ( [ "Image" ] )
return image_id |
def bitop_not ( self , dest , key ) :
"""Perform bitwise NOT operations between strings .""" | return self . execute ( b'BITOP' , b'NOT' , dest , key ) |
def _game_image_from_screen ( self , game_type ) :
"""Return the image of the given game type from the screen .
Return None if no game is found .""" | # screen
screen_img = self . _screen_shot ( )
# game image
game_rect = self . _game_finders [ game_type ] . locate_in ( screen_img )
if game_rect is None :
return
t , l , b , r = game_rect
game_img = screen_img [ t : b , l : r ]
return game_img |
def gdsii_hash ( filename , engine = None ) :
"""Calculate the a hash value for a GDSII file .
The hash is generated based only on the contents of the cells in the
GDSII library , ignoring any timestamp records present in the file
structure .
Parameters
filename : string
Full path to the GDSII file .
engine : hashlib - like engine
The engine that executes the hashing algorithm . It must provide
the methods ` ` update ` ` and ` ` hexdigest ` ` as defined in the
hashlib module . If ` ` None ` ` , the dafault ` ` hashlib . sha1 ( ) ` ` is
used .
Returns
out : string
The hash correponding to the library contents in hex format .""" | with open ( filename , 'rb' ) as fin :
data = fin . read ( )
contents = [ ]
start = pos = 0
while pos < len ( data ) :
size , rec = struct . unpack ( '>HH' , data [ pos : pos + 4 ] )
if rec == 0x0502 :
start = pos + 28
elif rec == 0x0700 :
contents . append ( data [ start : pos ] )
pos += size
h = hashlib . sha1 ( ) if engine is None else engine
for x in sorted ( contents ) :
h . update ( x )
return h . hexdigest ( ) |
def parse_named_type ( lexer : Lexer ) -> NamedTypeNode :
"""NamedType : Name""" | start = lexer . token
return NamedTypeNode ( name = parse_name ( lexer ) , loc = loc ( lexer , start ) ) |
def OnRowSize ( self , event ) :
"""Row size event handler""" | row = event . GetRowOrCol ( )
tab = self . grid . current_table
rowsize = self . grid . GetRowSize ( row ) / self . grid . grid_renderer . zoom
# Detect for resizing group of rows
rows = self . grid . GetSelectedRows ( )
if len ( rows ) == 0 :
rows = [ row , ]
# Detect for selection of rows spanning all columns
selection = self . grid . selection
num_cols = self . grid . code_array . shape [ 1 ] - 1
for box in zip ( selection . block_tl , selection . block_br ) :
leftmost_col = box [ 0 ] [ 1 ]
rightmost_col = box [ 1 ] [ 1 ]
if leftmost_col == 0 and rightmost_col == num_cols :
rows += range ( box [ 0 ] [ 0 ] , box [ 1 ] [ 0 ] + 1 )
# All row resizing is undone in one click
with undo . group ( _ ( "Resize Rows" ) ) :
for row in rows :
self . grid . code_array . set_row_height ( row , tab , rowsize )
zoomed_rowsize = rowsize * self . grid . grid_renderer . zoom
self . grid . SetRowSize ( row , zoomed_rowsize )
# Mark content as changed
post_command_event ( self . grid . main_window , self . grid . ContentChangedMsg )
event . Skip ( )
self . grid . ForceRefresh ( ) |
def get_block_overview ( block_representation , coin_symbol = 'btc' , txn_limit = None , txn_offset = None , api_key = None ) :
"""Takes a block _ representation , coin _ symbol and txn _ limit and gets an overview
of that block , including up to X transaction ids .
Note that block _ representation may be the block number or block hash""" | assert is_valid_coin_symbol ( coin_symbol )
assert is_valid_block_representation ( block_representation = block_representation , coin_symbol = coin_symbol )
url = make_url ( coin_symbol , ** dict ( blocks = block_representation ) )
params = { }
if api_key :
params [ 'token' ] = api_key
if txn_limit :
params [ 'limit' ] = txn_limit
if txn_offset :
params [ 'txstart' ] = txn_offset
r = requests . get ( url , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
response_dict = get_valid_json ( r )
if 'error' in response_dict :
return response_dict
return _clean_block ( response_dict = response_dict ) |
def objectMD5 ( obj ) :
'''Get md5 of an object''' | if hasattr ( obj , 'target_name' ) :
return obj . target_name ( )
try :
return textMD5 ( pickle . dumps ( obj ) )
except :
return '' |
def nvmlDeviceGetCurrPcieLinkWidth ( handle ) :
r"""* Retrieves the current PCIe link width
* For Fermi & tm ; or newer fully supported devices .
* @ param device The identifier of the target device
* @ param currLinkWidth Reference in which to return the current PCIe link generation
* @ return
* - \ ref NVML _ SUCCESS if \ a currLinkWidth has been populated
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a device is invalid or \ a currLinkWidth is null
* - \ ref NVML _ ERROR _ NOT _ SUPPORTED if PCIe link information is not available
* - \ ref NVML _ ERROR _ GPU _ IS _ LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
nvmlReturn _ t DECLDIR nvmlDeviceGetCurrPcieLinkWidth""" | fn = _nvmlGetFunctionPointer ( "nvmlDeviceGetCurrPcieLinkWidth" )
width = c_uint ( )
ret = fn ( handle , byref ( width ) )
_nvmlCheckReturn ( ret )
return bytes_to_str ( width . value ) |
def replace_category_by_id ( cls , category_id , category , ** kwargs ) :
"""Replace Category
Replace all attributes of Category
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ category _ by _ id ( category _ id , category , async = True )
> > > result = thread . get ( )
: param async bool
: param str category _ id : ID of category to replace ( required )
: param Category category : Attributes of category to replace ( required )
: return : Category
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_category_by_id_with_http_info ( category_id , category , ** kwargs )
else :
( data ) = cls . _replace_category_by_id_with_http_info ( category_id , category , ** kwargs )
return data |
def create_mutation_file ( self , list_of_tuples ) :
"""Create the FoldX file ' individual _ list . txt ' to run BuildModel upon .
Args :
list _ of _ tuples ( list ) : A list of tuples indicating mutation groups to carry out BuildModel upon . Example : :
( ( ' N ' , ' A ' , 308 , ' S ' ) , ( ' S ' , ' A ' , 320 , ' T ' ) , ( ' S ' , ' A ' , 321 , ' H ' ) ) , # Mutation group 1
( ( ' S ' , ' A ' , 321 , ' R ' ) , ( ' T ' , ' A ' , 345 , ' S ' ) ) # Mutation group 2""" | self . mutation_infile = op . join ( self . foldx_dir , 'individual_list.txt' )
idx = 1
with open ( self . mutation_infile , 'w' ) as f :
for mutant_group in list_of_tuples : # Write the mutation string to the file
mutstring = '' . join ( list ( map ( lambda x : '{}{}{}{};' . format ( x [ 0 ] , x [ 1 ] , x [ 2 ] , x [ 3 ] ) , mutant_group ) ) )
f . write ( mutstring + '\n' )
# Also keep track of the index being used for this mutation
self . mutation_index_to_group [ idx ] = mutant_group
idx += 1 |
def unstage_signature ( vcs , signature ) :
"""Remove ` signature ` from the list of staged signatures
Args :
vcs ( easyci . vcs . base . Vcs )
signature ( basestring )
Raises :
NotStagedError""" | evidence_path = _get_staged_history_path ( vcs )
staged = get_staged_signatures ( vcs )
if signature not in staged :
raise NotStagedError
staged . remove ( signature )
string = '\n' . join ( staged )
with open ( evidence_path , 'w' ) as f :
f . write ( string ) |
def avl_insert_dir ( root , new_node , direction = 1 ) :
"""Inserts a single node all the way to the left ( direction = 1 ) or right ( direction = 1)
CommandLine :
python - m utool . experimental . euler _ tour _ tree _ avl avl _ insert _ dir - - show
python - m utool . experimental . euler _ tour _ tree _ avl avl _ insert _ dir
Example :
> > > # DISABLE _ DOCTEST
> > > from utool . experimental . euler _ tour _ tree _ avl import * # NOQA
> > > import utool as ut
> > > root = Node ( value = ' A ' )
> > > new _ node = Node ( value = ' B ' )
> > > new _ root = avl _ insert _ dir ( root , new _ node , direction = 1)
> > > new _ root = avl _ insert _ dir ( root , Node ( value = ' Z ' ) , direction = 1)
> > > EulerTourTree ( root = new _ root ) . _ assert _ nodes ( )
> > > for v in ut . chr _ range ( 5 , base = ' C ' ) :
> > > new _ root = avl _ insert _ dir ( new _ root , Node ( value = v ) , direction = 1)
> > > self = EulerTourTree ( root = new _ root )
> > > self . _ assert _ nodes ( )
> > > new = EulerTourTree ( root = new _ root )
> > > print ( new )
> > > ut . quit _ if _ noshow ( )
> > > ut . qtensure ( )
> > > new . show _ nx ( edge _ labels = True )
> > > ut . show _ if _ requested ( )
> > > # ascii _ tree ( root )
> > > # print ( result )""" | if root is None :
return new_node
assert new_node . parent is None , str ( ( new_node , new_node . parent ) )
assert new_node . left is None
assert new_node . right is None
assert root . parent is None
node_stack = [ ]
# node stack
# dir _ stack = array ( ' I ' ) # direction stack
done = False
top = 0
# Move all the way to the right / left in tree1
node = root
# search for an empty link , save path
while True : # Always move to the right
# dir _ stack . append ( direction )
node_stack . append ( node )
if node [ direction ] is None :
break
node = node [ direction ]
extreme_node = node
# Insert a new node at the bottom of the tree
extreme_node . set_child ( direction , new_node )
new_root = root
# Walk back up the search path
# ( which for joining orderless structures was always right )
other_side = 1 - direction
top = len ( node_stack ) - 1
while ( top >= 0 ) and not done : # direction = dir _ stack [ top ]
# other _ side = 1 - direction
top_node = node_stack [ top ]
left_height = height ( top_node [ direction ] )
right_height = height ( top_node [ other_side ] )
# Terminate or rebalance as necessary
if left_height - right_height == 0 :
done = True
if left_height - right_height >= 2 :
a = top_node [ direction ] [ direction ]
b = top_node [ direction ] [ other_side ]
# Determine which rotation is required
if height ( a ) >= height ( b ) :
node_stack [ top ] = avl_rotate_single ( top_node , other_side )
else :
node_stack [ top ] = avl_rotate_double ( top_node , other_side )
# Fix parent
if top != 0 : # d _ = dir _ stack [ top - 1]
d_ = direction
node_stack [ top - 1 ] . set_child ( d_ , node_stack [ top ] )
else :
new_root = node_stack [ 0 ]
new_root . parent = None
done = True
# Update balance factors
top_node = node_stack [ top ]
left_height = height ( top_node [ direction ] )
right_height = height ( top_node [ other_side ] )
top_node . balance = max ( left_height , right_height ) + 1
top -= 1
assert new_root . parent is None
return new_root |
def load_product_class ( self , mode ) :
"""Load recipe object , according to observing mode""" | product_entry = self . products [ mode ]
return self . _get_base_class ( product_entry ) |
def normal ( nmr_distributions , nmr_samples , mean = 0 , std = 1 , ctype = 'float' , seed = None ) :
"""Draw random samples from the Gaussian distribution .
Args :
nmr _ distributions ( int ) : the number of unique continuous _ distributions to create
nmr _ samples ( int ) : The number of samples to draw
mean ( float or ndarray ) : The mean of the distribution
std ( float or ndarray ) : The standard deviation or the distribution
ctype ( str ) : the C type of the output samples
seed ( float ) : the seed for the RNG
Returns :
ndarray : A two dimensional numpy array as ( nmr _ distributions , nmr _ samples ) .""" | if is_scalar ( mean ) :
mean = np . ones ( ( nmr_distributions , 1 ) ) * mean
if is_scalar ( std ) :
std = np . ones ( ( nmr_distributions , 1 ) ) * std
kernel_data = { 'mean' : Array ( mean , as_scalar = True ) , 'std' : Array ( std , as_scalar = True ) }
kernel = SimpleCLFunction . from_string ( '''
void compute(double mean, double std, global uint* rng_state, global ''' + ctype + '''* samples){
rand123_data rand123_rng_data = rand123_initialize_data((uint[]){
rng_state[0], rng_state[1], rng_state[2], rng_state[3],
rng_state[4], rng_state[5], 0});
void* rng_data = (void*)&rand123_rng_data;
for(uint i = 0; i < ''' + str ( nmr_samples ) + '''; i++){
double4 randomnr = randn4(rng_data);
samples[i] = (''' + ctype + ''')(mean + randomnr.x * std);
}
}
''' , dependencies = [ Rand123 ( ) ] )
return _generate_samples ( kernel , nmr_distributions , nmr_samples , ctype , kernel_data , seed = seed ) |
def info ( self , text = None ) :
"""Shows and persists info symbol and text and exits .
Parameters
text : None , optional
Text to be shown alongside info symbol .
Returns
self""" | return self . stop_and_persist ( symbol = LogSymbols . INFO . value , text = text ) |
def AtMaximumDepth ( self , search_depth ) :
"""Determines if the find specification is at maximum depth .
Args :
search _ depth ( int ) : number of key path segments to compare .
Returns :
bool : True if at maximum depth , False if not .""" | if self . _key_path_segments is not None :
if search_depth >= self . _number_of_key_path_segments :
return True
return False |
def path_param ( name , ns ) :
"""Build a path parameter definition .""" | if ns . identifier_type == "uuid" :
param_type = "string"
param_format = "uuid"
else :
param_type = "string"
param_format = None
kwargs = { "name" : name , "in" : "path" , "required" : True , "type" : param_type , }
if param_format :
kwargs [ "format" ] = param_format
return swagger . PathParameterSubSchema ( ** kwargs ) |
def use ( wcspkg , raise_err = True ) :
"""Choose WCS package .""" | global coord_types , wcs_configured , WCS
if wcspkg not in common . custom_wcs : # Try to dynamically load WCS
modname = 'wcs_%s' % ( wcspkg )
path = os . path . join ( wcs_home , '%s.py' % ( modname ) )
try :
my_import ( modname , path )
except ImportError :
return False
if wcspkg in common . custom_wcs :
bnch = common . custom_wcs [ wcspkg ]
WCS = bnch . wrapper_class
coord_types = bnch . coord_types
wcs_configured = True
return True
return False |
def processing_blocks ( self ) :
"""Return the a JSON dict encoding the PBs known to SDP .""" | pb_list = ProcessingBlockList ( )
# TODO ( BMo ) realtime , offline etc .
return json . dumps ( dict ( active = pb_list . active , completed = pb_list . completed , aborted = pb_list . aborted ) ) |
def find_sum_of_first_even_and_odd ( array ) :
"""Function to calculate the sum of first even and first odd number in a list .
> > > find _ sum _ of _ first _ even _ and _ odd ( [ 1 , 3 , 5 , 7 , 4 , 1 , 6 , 8 ] )
> > > find _ sum _ of _ first _ even _ and _ odd ( [ 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 ] )
> > > find _ sum _ of _ first _ even _ and _ odd ( [ 1 , 5 , 7 , 9 , 10 ] )
11
Args :
array : List of integers in which to search .
Returns :
Sum of first even and first odd integer found in the list . If any of them isn ' t present , we consider its value as - 1.""" | first_even = next ( ( num for num in array if num % 2 == 0 ) , - 1 )
first_odd = next ( ( num for num in array if num % 2 != 0 ) , - 1 )
return first_even + first_odd |
def safe_walk ( top , topdown = True , onerror = None , followlinks = True , _seen = None ) :
'''A clone of the python os . walk function with some checks for recursive
symlinks . Unlike os . walk this follows symlinks by default .''' | if _seen is None :
_seen = set ( )
# We may not have read permission for top , in which case we can ' t
# get a list of the files the directory contains . os . path . walk
# always suppressed the exception then , rather than blow up for a
# minor reason when ( say ) a thousand readable directories are still
# left to visit . That logic is copied here .
try : # Note that listdir and error are globals in this module due
# to earlier import - * .
names = os . listdir ( top )
except os . error as err :
if onerror is not None :
onerror ( err )
return
if followlinks :
status = os . stat ( top )
# st _ ino is always 0 on some filesystems ( FAT , NTFS ) ; ignore them
if status . st_ino != 0 :
node = ( status . st_dev , status . st_ino )
if node in _seen :
return
_seen . add ( node )
dirs , nondirs = [ ] , [ ]
for name in names :
full_path = os . path . join ( top , name )
if os . path . isdir ( full_path ) :
dirs . append ( name )
else :
nondirs . append ( name )
if topdown :
yield top , dirs , nondirs
for name in dirs :
new_path = os . path . join ( top , name )
if followlinks or not os . path . islink ( new_path ) :
for x in safe_walk ( new_path , topdown , onerror , followlinks , _seen ) :
yield x
if not topdown :
yield top , dirs , nondirs |
def add_ylabel ( self , text = None ) :
"""Add a label to the y - axis .""" | y = self . fit . meta [ 'dependent' ]
if not text :
text = '$' + y [ 'tex_symbol' ] + r'$ $(\si{' + y [ 'siunitx' ] + r'})$'
self . plt . set_ylabel ( text ) |
def getGroupContentItems ( self , groupName ) :
"""Gets all the items owned by a group ( s ) .
Args :
groupName ( list ) : The name of the group ( s ) from which to get items .
Returns :
list : A list of items belonging to the group ( s ) .
Notes :
If you want to get items from a single group , ` ` groupName ` ` can be passed as a : py : obj : ` str ` .
See Also :
: py : func : ` getGroupContent ` for retrieving all content , not just items .""" | admin = None
userCommunity = None
groupIds = None
groupId = None
groupContent = None
result = None
item = None
items = [ ]
try :
admin = arcrest . manageorg . Administration ( securityHandler = self . _securityHandler )
userCommunity = admin . community
groupIds = userCommunity . getGroupIDs ( groupNames = groupName )
# groupContent = admin . query ( q = " group : " + group _ ids , bbox = None , start = 1 , num = 100 , sortField = None ,
# sortOrder = " asc " )
if not groupIds is None :
for groupId in groupIds :
groupContent = admin . content . groupContent ( groupId = groupId )
if 'error' in groupContent :
print ( groupContent )
else :
for result in groupContent [ 'items' ] :
item = admin . content . getItem ( itemId = result [ 'id' ] )
items . append ( item )
return items
except :
line , filename , synerror = trace ( )
raise common . ArcRestHelperError ( { "function" : "getGroupContent" , "line" : line , "filename" : filename , "synerror" : synerror , } )
finally :
admin = None
userCommunity = None
groupIds = None
groupId = None
groupContent = None
result = None
item = None
del admin
del userCommunity
del groupIds
del groupId
del groupContent
del result
del item
gc . collect ( ) |
def gaussian2d ( xy , sx , sy , mx = 0 , my = 0 , rho = 0 , amp = 1 , offs = 0 ) :
'''see http : / / en . wikipedia . org / wiki / Multivariate _ normal _ distribution
# probability density function of a vector [ x , y ]
sx , sy - > sigma ( standard deviation )
mx , my : mue ( mean position )
rho : correlation between x and y''' | x , y = xy
return offs + amp * ( 1 / ( 2 * np . pi * sx * sy * ( 1 - ( rho ** 2 ) ) ** 0.5 ) * np . exp ( ( - 1 / ( 2 * ( 1 - rho ** 2 ) ) ) * ( ( ( x - mx ) ** 2 / sx ** 2 ) + ( ( y - my ) ** 2 / sy ** 2 ) - ( ( 2 * rho * ( x - mx ) * ( y - my ) ) / ( sx * sy ) ) ) ) ) |
def org_set_member_access ( object_id , input_params = { } , always_retry = True , ** kwargs ) :
"""Invokes the / org - xxxx / setMemberAccess API method .
For more info , see : https : / / wiki . dnanexus . com / API - Specification - v1.0.0 / Organizations # API - method % 3A - % 2Forg - xxxx % 2FsetMemberAccess""" | return DXHTTPRequest ( '/%s/setMemberAccess' % object_id , input_params , always_retry = always_retry , ** kwargs ) |
def del_host_downtime ( self , downtime_id ) :
"""Delete a host downtime
Format of the line that triggers function call : :
DEL _ HOST _ DOWNTIME ; < downtime _ id >
: param downtime _ id : downtime id to delete
: type downtime _ id : int
: return : None""" | broks = [ ]
for item in self . daemon . hosts :
if downtime_id in item . downtimes :
broks . extend ( item . downtimes [ downtime_id ] . cancel ( self . daemon . timeperiods , self . daemon . hosts , self . daemon . services ) )
break
else :
self . send_an_element ( make_monitoring_log ( 'warning' , 'DEL_HOST_DOWNTIME: downtime id: %s does not exist ' 'and cannot be deleted.' % downtime_id ) )
for brok in broks :
self . send_an_element ( brok ) |
def add_link ( self , referenced , dependent ) :
"""Add a link between two relations to the database . Both the old and
new entries must already exist in the database .
The dependent model refers _ to _ the referenced model . So , given
arguments of ( jake _ test , bar , jake _ test , foo ) :
both values are in the schema jake _ test and foo is a view that refers
to bar , so " drop bar cascade " will drop foo and all of foo ' s
dependents .
: param BaseRelation referenced : The referenced model .
: param BaseRelation dependent : The dependent model .
: raises InternalError : If either entry does not exist .""" | referenced = _make_key ( referenced )
if ( referenced . database , referenced . schema ) not in self : # if we have not cached the referenced schema at all , we must be
# referring to a table outside our control . There ' s no need to make
# a link - we will never drop the referenced relation during a run .
logger . debug ( '{dep!s} references {ref!s} but {ref.database}.{ref.schema} ' 'is not in the cache, skipping assumed external relation' . format ( dep = dependent , ref = referenced ) )
return
dependent = _make_key ( dependent )
logger . debug ( 'adding link, {!s} references {!s}' . format ( dependent , referenced ) )
with self . lock :
self . _add_link ( referenced , dependent ) |
def get_tensor_info ( self ) :
"""See base class for details .""" | # Add the additional length dimension to every shape
def add_length_dim ( tensor_info ) :
return feature_lib . TensorInfo ( shape = ( self . _length , ) + tensor_info . shape , dtype = tensor_info . dtype , )
tensor_info = super ( SequenceDict , self ) . get_tensor_info ( )
return utils . map_nested ( add_length_dim , tensor_info ) |
def deep_merge_dicts ( base , incoming ) :
"""Performs an * in - place * deep - merge of key - values from : attr : ` incoming `
into : attr : ` base ` . No attempt is made to preserve the original state of
the objects passed in as arguments .
: param dict base : The target container for the merged values . This will
be modified * in - place * .
: type base : Any : class : ` dict ` - like object
: param dict incoming : The container from which incoming values will be
copied . Nested dicts in this will be modified .
: type incoming : Any : class : ` dict ` - like object
: rtype : None""" | for ki , vi in incoming . iteritems ( ) :
if ( ki in base and isinstance ( vi , collections . MutableMapping ) and isinstance ( base [ ki ] , collections . MutableMapping ) ) :
deep_merge_dicts ( base [ ki ] , vi )
else :
base [ ki ] = vi |
def getPersonPluginWidget ( self , name ) :
"""Return the L { PersonPluginView } for the named person .
@ type name : C { unicode }
@ param name : A value which corresponds to the I { name } attribute of an
extant L { Person } .
@ rtype : L { PersonPluginView }""" | fragment = PersonPluginView ( self . organizer . getOrganizerPlugins ( ) , self . organizer . personByName ( name ) )
fragment . setFragmentParent ( self )
return fragment |
def in_32 ( library , session , space , offset , extended = False ) :
"""Reads in an 32 - bit value from the specified memory space and offset .
Corresponds to viIn32 * function of the VISA library .
: param library : the visa library wrapped by ctypes .
: param session : Unique logical identifier to a session .
: param space : Specifies the address space . ( Constants . * SPACE * )
: param offset : Offset ( in bytes ) of the address or register from which to read .
: param extended : Use 64 bits offset independent of the platform .
: return : Data read from memory , return value of the library call .
: rtype : int , : class : ` pyvisa . constants . StatusCode `""" | value_32 = ViUInt32 ( )
if extended :
ret = library . viIn32Ex ( session , space , offset , byref ( value_32 ) )
else :
ret = library . viIn32 ( session , space , offset , byref ( value_32 ) )
return value_32 . value , ret |
def _is_indirect_jump ( _ , sim_successors ) :
"""Determine if this SimIRSB has an indirect jump as its exit""" | if sim_successors . artifacts [ 'irsb_direct_next' ] : # It ' s a direct jump
return False
default_jumpkind = sim_successors . artifacts [ 'irsb_default_jumpkind' ]
if default_jumpkind not in ( 'Ijk_Call' , 'Ijk_Boring' , 'Ijk_InvalICache' ) : # It ' s something else , like a ret of a syscall . . . we don ' t care about it
return False
return True |
def interpret ( self ) :
"""Interprets the console on the loaded code .
RETURNS :
bool ; True if the code passes , False otherwise .""" | if not self . _interpret_lines ( self . _setup ) :
return False
success = self . _interpret_lines ( self . _code , compare_all = True )
success &= self . _interpret_lines ( self . _teardown )
return success |
def register ( cls , use_admin = True ) :
"""Register with the API a : class : ` sandman . model . Model ` class and
associated endpoint .
: param cls : User - defined class derived from : class : ` sandman . model . Model ` to
be registered with the endpoint returned by : func : ` endpoint ( ) `
: type cls : : class : ` sandman . model . Model ` or tuple""" | with app . app_context ( ) :
if getattr ( current_app , 'class_references' , None ) is None :
current_app . class_references = { }
if isinstance ( cls , ( list , tuple ) ) :
for entry in cls :
register_internal_data ( entry )
entry . use_admin = use_admin
else :
register_internal_data ( cls )
cls . use_admin = use_admin |
def unhold ( self ) :
'''Turn off any active document hold and apply any collected events .
Returns :
None''' | # no - op if we are already no holding
if self . _hold is None :
return
self . _hold = None
events = list ( self . _held_events )
self . _held_events = [ ]
for event in events :
self . _trigger_on_change ( event ) |
def release ( ctx , deploy = False , test = False , version = '' ) :
"""Tag release , run Travis - CI , and deploy to PyPI""" | if test :
run ( "python setup.py check" )
run ( "python setup.py register sdist upload --dry-run" )
if deploy :
run ( "python setup.py check" )
if version :
run ( "git checkout master" )
run ( "git tag -a v{ver} -m 'v{ver}'" . format ( ver = version ) )
run ( "git push" )
run ( "git push origin --tags" )
run ( "python setup.py sdist bdist_wheel" )
run ( "twine upload --skip-existing dist/*" )
else :
print ( "- Have you updated the version?" )
print ( "- Have you updated CHANGELOG.md, README.md, and AUTHORS.md?" )
print ( "- Have you fixed any last minute bugs?" )
print ( "- Have you merged changes for release into the master branch?" )
print ( "If you answered yes to all of the above questions," )
print ( "then run `inv release --deploy -vX.YY.ZZ` to:" )
print ( "- Checkout master" )
print ( "- Tag the git release with provided vX.YY.ZZ version" )
print ( "- Push the master branch and tags to repo" ) |
def avail_sizes ( call = None ) :
'''Return a list of sizes from Azure''' | if call == 'action' :
raise SaltCloudSystemExit ( 'The avail_sizes function must be called with ' '-f or --function, or with the --list-sizes option' )
conn = get_conn ( )
data = conn . list_role_sizes ( )
ret = { }
for item in data . role_sizes :
ret [ item . name ] = object_to_dict ( item )
return ret |
def on_transmit ( self , broker ) :
"""Transmit buffered messages .""" | _vv and IOLOG . debug ( '%r.on_transmit()' , self )
if self . _output_buf :
buf = self . _output_buf . popleft ( )
written = self . transmit_side . write ( buf )
if not written :
_v and LOG . debug ( '%r.on_transmit(): disconnection detected' , self )
self . on_disconnect ( broker )
return
elif written != len ( buf ) :
self . _output_buf . appendleft ( BufferType ( buf , written ) )
_vv and IOLOG . debug ( '%r.on_transmit() -> len %d' , self , written )
self . _output_buf_len -= written
if not self . _output_buf :
broker . _stop_transmit ( self ) |
def parse_email ( self , email_info ) :
"""Allows passing emails as " Example Name < example @ example . com > "
: param email _ info : Allows passing emails as
" Example Name < example @ example . com > "
: type email _ info : string""" | name , email = rfc822 . parseaddr ( email_info )
# more than likely a string was passed here instead of an email address
if "@" not in email :
name = email
email = None
if not name :
name = None
if not email :
email = None
self . name = name
self . email = email
return name , email |
def to_protobuf ( self ) :
"""Convert object to a protobuf message""" | self . _validate ( )
kwargs = { k : _convert ( getattr ( self , k ) , 'to_protobuf' ) for k in self . _get_params ( ) }
return self . _protobuf_cls ( ** kwargs ) |
def GetEntries ( self , parser_mediator , match = None , ** unused_kwargs ) :
"""Extracts relevant Apple Account entries .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
match ( Optional [ dict [ str : object ] ] ) : keys extracted from PLIST _ KEYS .""" | accounts = match . get ( 'Accounts' , { } )
for name_account , account in iter ( accounts . items ( ) ) :
first_name = account . get ( 'FirstName' , '<FirstName>' )
last_name = account . get ( 'LastName' , '<LastName>' )
general_description = '{0:s} ({1:s} {2:s})' . format ( name_account , first_name , last_name )
event_data = plist_event . PlistTimeEventData ( )
event_data . key = name_account
event_data . root = '/Accounts'
datetime_value = account . get ( 'CreationDate' , None )
if datetime_value :
event_data . desc = 'Configured Apple account {0:s}' . format ( general_description )
event = time_events . PythonDatetimeEvent ( datetime_value , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
datetime_value = account . get ( 'LastSuccessfulConnect' , None )
if datetime_value :
event_data . desc = 'Connected Apple account {0:s}' . format ( general_description )
event = time_events . PythonDatetimeEvent ( datetime_value , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
datetime_value = account . get ( 'ValidationDate' , None )
if datetime_value :
event_data . desc = 'Last validation Apple account {0:s}' . format ( general_description )
event = time_events . PythonDatetimeEvent ( datetime_value , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data ) |
def get_pip_options ( args = [ ] , sources = None , pip_command = None ) :
"""Build a pip command from a list of sources
: param args : positional arguments passed through to the pip parser
: param sources : A list of pipfile - formatted sources , defaults to None
: param sources : list [ dict ] , optional
: param pip _ command : A pre - built pip command instance
: type pip _ command : : class : ` ~ pip . _ internal . cli . base _ command . Command `
: return : An instance of pip _ options using the supplied arguments plus sane defaults
: rtype : : class : ` ~ pip . _ internal . cli . cmdoptions `""" | if not pip_command :
pip_command = get_pip_command ( )
if not sources :
sources = [ { "url" : "https://pypi.org/simple" , "name" : "pypi" , "verify_ssl" : True } ]
_ensure_dir ( CACHE_DIR )
pip_args = args
pip_args = prepare_pip_source_args ( sources , pip_args )
pip_options , _ = pip_command . parser . parse_args ( pip_args )
pip_options . cache_dir = CACHE_DIR
return pip_options |
def get_user_profile_photos ( self , user_id , offset = None , limit = None ) :
"""Use this method to get a list of profile pictures for a user . Returns a UserProfilePhotos object .
https : / / core . telegram . org / bots / api # getuserprofilephotos
Parameters :
: param user _ id : Unique identifier of the target user
: type user _ id : int
Optional keyword parameters :
: param offset : Sequential number of the first photo to be returned . By default , all photos are returned .
: type offset : int
: param limit : Limits the number of photos to be retrieved . Values between 1—100 are accepted . Defaults to 100.
: type limit : int
Returns :
: return : Returns a UserProfilePhotos object
: rtype : pytgbot . api _ types . receivable . media . UserProfilePhotos""" | assert_type_or_raise ( user_id , int , parameter_name = "user_id" )
assert_type_or_raise ( offset , None , int , parameter_name = "offset" )
assert_type_or_raise ( limit , None , int , parameter_name = "limit" )
result = self . do ( "getUserProfilePhotos" , user_id = user_id , offset = offset , limit = limit )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . media import UserProfilePhotos
try :
return UserProfilePhotos . from_array ( result )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type UserProfilePhotos" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result |
def walk_skip_hidden ( top , onerror = None , followlinks = False ) :
"""A wrapper for ` os . walk ` that skips hidden files and directories .
This function does not have the parameter ` topdown ` from
` os . walk ` : the directories must always be recursed top - down when
using this function .
See also
os . walk : For a description of the parameters""" | for root , dirs , files in os . walk ( top , topdown = True , onerror = onerror , followlinks = followlinks ) : # These lists must be updated in - place so os . walk will skip
# hidden directories
dirs [ : ] = [ d for d in dirs if not is_path_hidden ( d ) ]
files [ : ] = [ f for f in files if not is_path_hidden ( f ) ]
yield root , dirs , files |
def reversed ( self ) :
'''Returns a copy of this arc , with the direction flipped .
> > > Arc ( ( 0 , 0 ) , 1 , 0 , 360 , True ) . reversed ( )
Arc ( [ 0.000 , 0.000 ] , 1.000 , 360.000 , 0.000 , False , degrees = 360.000)
> > > Arc ( ( 0 , 0 ) , 1 , 175 , - 175 , True ) . reversed ( )
Arc ( [ 0.000 , 0.000 ] , 1.000 , - 175.000 , 175.000 , False , degrees = 10.000)
> > > Arc ( ( 0 , 0 ) , 1 , 0 , 370 , True ) . reversed ( )
Arc ( [ 0.000 , 0.000 ] , 1.000 , 370.000 , 0.000 , False , degrees = 360.000)''' | return Arc ( self . center , self . radius , self . to_angle , self . from_angle , not self . direction ) |
def _set_member_vlan ( self , v , load = False ) :
"""Setter method for member _ vlan , mapped from YANG variable / topology _ group / member _ vlan ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ member _ vlan is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ member _ vlan ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = member_vlan . member_vlan , is_container = 'container' , presence = False , yang_name = "member-vlan" , rest_name = "member-vlan" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure Member VLANs for this topology group' , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-topology-group' , defining_module = 'brocade-topology-group' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """member_vlan must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=member_vlan.member_vlan, is_container='container', presence=False, yang_name="member-vlan", rest_name="member-vlan", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Member VLANs for this topology group', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-topology-group', defining_module='brocade-topology-group', yang_type='container', is_config=True)""" , } )
self . __member_vlan = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.