signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
async def on_raw_error ( self , message ) : """Server encountered an error and will now close the connection ."""
error = protocol . ServerError ( ' ' . join ( message . params ) ) await self . on_data_error ( error )
def system_procRet ( str_command , b_echoCommand = 0 ) : """Run the < str _ command > on the underlying shell . Any stderr stream is lost . RETURN Tuple ( retcode , str _ stdout ) o retcode : the system return code o str _ stdout : the standard output stream"""
if b_echoCommand : printf ( '<p>str_command = %s</p>' , str_command ) str_stdout = os . popen ( str_command ) . read ( ) retcode = os . popen ( str_command ) . close ( ) return retcode , str_stdout
def fundb ( self , fields = None , min_volume = 0 , min_discount = 0 , forever = False ) : """以字典形式返回分级B数据 : param fields : 利率范围 , 形如 [ ' + 3.0 % ' , ' 6.0 % ' ] : param min _ volume : 最小交易量 , 单位万元 : param min _ discount : 最小折价率 , 单位 % : param forever : 是否选择永续品种 , 默认 False"""
if fields is None : fields = [ ] # 添加当前的ctime self . __fundb_url = self . __fundb_url . format ( ctime = int ( time . time ( ) ) ) # 请求数据 rep = requests . get ( self . __fundb_url ) # 获取返回的json字符串 fundbjson = json . loads ( rep . text ) # 格式化返回的json字符串 data = self . formatfundbjson ( fundbjson ) # 过滤小于指定交易量的数据 if min_volume : data = { k : data [ k ] for k in data if float ( data [ k ] [ "fundb_volume" ] ) > min_volume } if len ( fields ) : data = { k : data [ k ] for k in data if data [ k ] [ "coupon_descr_s" ] in "" . join ( fields ) } if forever : data = { k : data [ k ] for k in data if data [ k ] [ "fundb_left_year" ] . find ( "永续" ) != - 1 } if min_discount : data = { k : data [ k ] for k in data if float ( data [ k ] [ "fundb_discount_rt" ] [ : - 1 ] ) > min_discount } self . __fundb = data return self . __fundb
def get_response ( sock , buffer_size = 4096 ) : """Helper method for retrieving a response from a given socket . Returns two values in a tuple , the first is the reponse line and the second is any extra data after the newline ."""
response = "" extra = "" while True : try : chunk = sock . recv ( buffer_size ) if chunk : response += chunk except socket . error as e : if e . errno not in [ errno . EAGAIN , errno . EINTR ] : raise if not response : break if "\n" in response : response , extra = response . split ( "\n" , 1 ) break return response , extra
def draw_text ( self , text : str , x : float , y : float , * , font_name : str , font_size : float , fill : Color ) -> None : """Draws the given text at x , y ."""
pass
def info ( self ) : """Retrieves the design document view information data , returns dictionary GET databasename / _ design / { ddoc } / _ info"""
ddoc_info = self . r_session . get ( '/' . join ( [ self . document_url , '_info' ] ) ) ddoc_info . raise_for_status ( ) return response_to_json_dict ( ddoc_info )
def section_lengths ( neurites , neurite_type = NeuriteType . all ) : '''section lengths in a collection of neurites'''
return map_sections ( _section_length , neurites , neurite_type = neurite_type )
def convertToNative ( self , aVal ) : """Convert to native bool ; interpret certain strings ."""
if aVal is None : return None if isinstance ( aVal , bool ) : return aVal # otherwise interpret strings return str ( aVal ) . lower ( ) in ( '1' , 'on' , 'yes' , 'true' )
def on_purchase_completed ( self , mapping = { 'payload' : 'payload' , 'name' : 'name' , 'status' : 'status' , 'token' : 'token' } , convert = { } , default = { } ) : """Decorator routes an Connections . Response to the wrapped function . Request is sent when Alexa completes the purchase flow . See https : / / developer . amazon . com / docs / in - skill - purchase / add - isps - to - a - skill . html # handle - results The wrapped view function may accept parameters from the Request . In addition to locale , requestId , timestamp , and type @ ask . on _ purchase _ completed ( mapping = { ' payload ' : ' payload ' , ' name ' : ' name ' , ' status ' : ' status ' , ' token ' : ' token ' } ) def completed ( payload , name , status , token ) : logger . info ( payload ) logger . info ( name ) logger . info ( status ) logger . info ( token )"""
def decorator ( f ) : self . _intent_view_funcs [ 'Connections.Response' ] = f self . _intent_mappings [ 'Connections.Response' ] = mapping self . _intent_converts [ 'Connections.Response' ] = convert self . _intent_defaults [ 'Connections.Response' ] = default @ wraps ( f ) def wrapper ( * args , ** kwargs ) : self . _flask_view_func ( * args , ** kwargs ) return f return decorator
def forwards ( self , orm ) : "Write your forwards methods here ."
projects = orm [ 'samples.Project' ] . objects . exclude ( name = DEFAULT_PROJECT_NAME ) for project in projects : cohort = orm [ 'samples.Cohort' ] ( name = project . label , autocreated = True ) cohort . project = project cohort . save ( ) cohort_bulk_add ( orm , cohort , orm [ 'samples.Sample' ] . objects . filter ( project = project ) . distinct ( ) ) batches = orm [ 'samples.Batch' ] . objects . select_related ( 'project' ) for batch in batches : cohort = orm [ 'samples.Cohort' ] ( name = batch . label , autocreated = True ) cohort . project = batch . project cohort . save ( ) cohort_bulk_add ( orm , cohort , orm [ 'samples.Sample' ] . objects . filter ( batch = batch ) . distinct ( ) ) # Populate the default cohort with all samples default_cohort = orm [ 'samples.Cohort' ] . objects . get ( name = DEFAULT_COHORT_NAME ) cohort_bulk_add ( orm , default_cohort , orm [ 'samples.Sample' ] . objects . all ( ) )
def palette ( hues , saturations , values ) : """Generate a palette . Parameters hues : ` int ` Number of hues . saturations : ` int ` Number of saturations . values : ` int ` Number of values . Raises ValueError If ` hues ` * ` saturations ` * ` values ` > 256 or min ( ` hues ` , ` saturations ` , ` values ` ) < 1. Returns ` list ` of ` int ` Palette for ` PIL . Image . putpalette ` ."""
size = hues * saturations * values if size > 256 : raise ValueError ( 'palette size > 256: {0}' . format ( size ) ) if min ( hues , saturations , values ) < 1 : raise ValueError ( 'invalid palette size: {0} {1} {2}' . format ( hues , saturations , values ) ) ret = [ ] if hues == 1 and saturations == 1 : if values == 1 : size = 0 else : nvalues = values - 1 for value in range ( values ) : value1 = value * 255 // nvalues ret . extend ( ( value1 , value1 , value1 ) ) else : for saturation in range ( 1 , saturations + 1 ) : saturation1 = saturation / saturations for hue in range ( 1 , hues + 1 ) : hue1 = hue / hues for value in range ( 1 , values + 1 ) : value1 = value / values ret . extend ( floor ( x * 255 ) for x in hsv_to_rgb ( hue1 , saturation1 , value1 ) ) ret . extend ( 0 for _ in range ( ( 256 - size ) * 3 ) ) return ret
def getIndicesFromBigIndex ( self , bigIndex ) : """Get index set from given big index @ param bigIndex @ return index set @ note no checks are performed to ensure that the returned big index is valid"""
indices = numpy . array ( [ 0 for i in range ( self . ndims ) ] ) for i in range ( self . ndims ) : indices [ i ] = bigIndex // self . dimProd [ i ] % self . dims [ i ] return indices
def encode_arc ( self , coords ) : """Appends commands to _ geometry to create an arc . - Returns False if nothing was added - Returns True and moves _ last _ x , _ last _ y if some points where added"""
last_x , last_y = self . _last_x , self . _last_y float_x , float_y = next ( coords ) x , y = self . coords_on_grid ( float_x , float_y ) dx , dy = x - last_x , y - last_y cmd_encoded = encode_cmd_length ( CMD_MOVE_TO , 1 ) commands = [ cmd_encoded , zigzag ( dx ) , zigzag ( dy ) , CMD_FAKE ] pairs_added = 0 last_x , last_y = x , y for float_x , float_y in coords : x , y = self . coords_on_grid ( float_x , float_y ) dx , dy = x - last_x , y - last_y if dx == 0 and dy == 0 : continue commands . append ( zigzag ( dx ) ) commands . append ( zigzag ( dy ) ) last_x , last_y = x , y pairs_added += 1 if pairs_added == 0 : return False cmd_encoded = encode_cmd_length ( CMD_LINE_TO , pairs_added ) commands [ 3 ] = cmd_encoded self . _geometry . extend ( commands ) self . _last_x , self . _last_y = last_x , last_y return True
def setup ( self , ** kwargs ) : '''This is called during production de - trending , prior to calling the : py : obj : ` Detrender . run ( ) ` method . : param str parent _ model : The name of the model to operate on . Default ` nPLD `'''
# Load the parent model self . parent_model = kwargs . get ( 'parent_model' , 'nPLD' ) if not self . load_model ( self . parent_model ) : raise Exception ( 'Unable to load parent model.' ) # Save static copies of the de - trended flux , # the outlier mask and the lambda array self . _norm = np . array ( self . flux ) self . recmask = np . array ( self . mask ) self . reclam = np . array ( self . lam ) # Now reset the model params self . optimize_gp = False nseg = len ( self . breakpoints ) self . lam_idx = - 1 self . lam = [ [ 1e5 ] + [ None for i in range ( self . pld_order - 1 ) ] for b in range ( nseg ) ] self . cdpp_arr = np . array ( [ np . nan for b in range ( nseg ) ] ) self . cdppr_arr = np . array ( [ np . nan for b in range ( nseg ) ] ) self . cdppv_arr = np . array ( [ np . nan for b in range ( nseg ) ] ) self . cdpp = np . nan self . cdppr = np . nan self . cdppv = np . nan self . cdppg = np . nan self . model = np . zeros_like ( self . time ) self . loaded = True
def clone ( self ) : """Returns : Todo : an exact copy of self"""
clone = copy . copy ( self ) clone . _unused = clone . _unused . clone ( ) clone . alarms = copy . copy ( self . alarms ) return clone
def populateFromFile ( self , dataUrl ) : """Populates the instance variables of this RnaQuantificationSet from the specified data URL ."""
self . _dbFilePath = dataUrl self . _db = SqliteRnaBackend ( self . _dbFilePath ) self . addRnaQuants ( )
def get_hashes ( self , all_hashes = False ) : """Returns a list of hashes in hashcat - firendly format for tickets with encryption type 23 ( which is RC4) all _ hashes : overrides the encryption type filtering and returns hash for all tickets"""
hashes = [ ] for cred in self . credentials : res = Ticket . load ( cred . ticket . to_asn1 ( ) ) . native if int ( res [ 'enc-part' ] [ 'etype' ] ) == 23 or all_hashes == True : hashes . append ( cred . to_hash ( ) ) return hashes
def put ( self , session ) : """Return a session to the pool . Never blocks : if the pool is full , raises . : type session : : class : ` ~ google . cloud . spanner _ v1 . session . Session ` : param session : the session being returned . : raises : : exc : ` six . moves . queue . Full ` if the queue is full ."""
self . _sessions . put_nowait ( ( _NOW ( ) + self . _delta , session ) )
def publish_server_opened ( self , server_address , topology_id ) : """Publish a ServerOpeningEvent to all server listeners . : Parameters : - ` server _ address ` : The address ( host / port pair ) of the server . - ` topology _ id ` : A unique identifier for the topology this server is a part of ."""
event = ServerOpeningEvent ( server_address , topology_id ) for subscriber in self . __server_listeners : try : subscriber . opened ( event ) except Exception : _handle_exception ( )
def mnist_extract_labels ( filename , num_images ) : """Extract the labels into a 1 - hot matrix [ image index , label index ] ."""
print ( 'Extracting' , filename ) with gzip . open ( filename ) as bytestream : bytestream . read ( 8 ) buf = bytestream . read ( 1 * num_images ) labels = np . frombuffer ( buf , dtype = np . uint8 ) # Convert to dense 1 - hot representation . return ( np . arange ( 10 ) == labels [ : , None ] ) . astype ( np . float32 )
def _find_func ( self , operation ) : """Find the function to use to configure the given operation . The input might be an ` Operation ` enum or a string ."""
if isinstance ( operation , Operation ) : operation_name = operation . name . lower ( ) else : operation_name = operation . lower ( ) return getattr ( self , "configure_{}" . format ( operation_name ) )
def md5sum_is_current ( src_file ) : """Checks whether src _ file has the same md5 hash as the one on disk"""
src_md5 = get_md5sum ( src_file ) src_md5_file = src_file + '.md5' if os . path . exists ( src_md5_file ) : with open ( src_md5_file , 'r' ) as file_checksum : ref_md5 = file_checksum . read ( ) return src_md5 == ref_md5 return False
def read_pixel_register ( self , pix_regs = None , dcs = range ( 40 ) , overwrite_config = False ) : '''The function reads the pixel register , interprets the data and returns a masked numpy arrays with the data for the chosen pixel register . Pixels without any data are masked . Parameters pix _ regs : iterable , string List of pixel register to read ( e . g . Enable , C _ High , . . . ) . If None all are read : " EnableDigInj " , " Imon " , " Enable " , " C _ High " , " C _ Low " , " TDAC " , " FDAC " dcs : iterable , int List of double columns to read . overwrite _ config : bool The read values overwrite the config in RAM if true . Returns list of masked numpy . ndarrays'''
if pix_regs is None : pix_regs = [ "EnableDigInj" , "Imon" , "Enable" , "C_High" , "C_Low" , "TDAC" , "FDAC" ] self . register_utils . send_commands ( self . register . get_commands ( "ConfMode" ) ) result = [ ] for pix_reg in pix_regs : pixel_data = np . ma . masked_array ( np . zeros ( shape = ( 80 , 336 ) , dtype = np . uint32 ) , mask = True ) # the result pixel array , only pixel with data are not masked for dc in dcs : with self . readout ( fill_buffer = True , callback = None , errback = None ) : self . register_utils . send_commands ( self . register . get_commands ( "RdFrontEnd" , name = [ pix_reg ] , dcs = [ dc ] ) ) data = self . read_data ( ) interpret_pixel_data ( data , dc , pixel_data , invert = False if pix_reg == "EnableDigInj" else True ) if overwrite_config : self . register . set_pixel_register ( pix_reg , pixel_data . data ) result . append ( pixel_data ) return result
def sqlvm_group_create ( client , cmd , sql_virtual_machine_group_name , resource_group_name , location , sql_image_offer , sql_image_sku , domain_fqdn , cluster_operator_account , sql_service_account , storage_account_url , storage_account_key , cluster_bootstrap_account = None , file_share_witness_path = None , ou_path = None , tags = None ) : '''Creates a SQL virtual machine group .'''
tags = tags or { } # Create the windows server failover cluster domain profile object . wsfc_domain_profile_object = WsfcDomainProfile ( domain_fqdn = domain_fqdn , ou_path = ou_path , cluster_bootstrap_account = cluster_bootstrap_account , cluster_operator_account = cluster_operator_account , sql_service_account = sql_service_account , file_share_witness_path = file_share_witness_path , storage_account_url = storage_account_url , storage_account_primary_key = storage_account_key ) sqlvm_group_object = SqlVirtualMachineGroup ( sql_image_offer = sql_image_offer , sql_image_sku = sql_image_sku , wsfc_domain_profile = wsfc_domain_profile_object , location = location , tags = tags ) # Since it ' s a running operation , we will do the put and then the get to display the instance . LongRunningOperation ( cmd . cli_ctx ) ( sdk_no_wait ( False , client . create_or_update , resource_group_name , sql_virtual_machine_group_name , sqlvm_group_object ) ) return client . get ( resource_group_name , sql_virtual_machine_group_name )
def pss ( self ) : """Peirce ( Hansen - Kuipers , True ) Skill Score ( ad - bc ) / ( ( a + c ) ( b + d ) )"""
return ( self . table [ 0 , 0 ] * self . table [ 1 , 1 ] - self . table [ 0 , 1 ] * self . table [ 1 , 0 ] ) / ( ( self . table [ 0 , 0 ] + self . table [ 1 , 0 ] ) * ( self . table [ 0 , 1 ] + self . table [ 1 , 1 ] ) )
def _elbv2_load_balancer ( self , lookup ) : """Args : lookup : the friendly name of the V2 elb to look up Returns : A dict with the load balancer description Raises : botocore . exceptions . ClientError : no such load - balancer"""
client = EFAwsResolver . __CLIENTS [ 'elbv2' ] elbs = client . describe_load_balancers ( Names = [ lookup ] ) # getting the first one , since we requested only one lb elb = elbs [ 'LoadBalancers' ] [ 0 ] return elb
def _delete ( self , ) : """Internal implementation for deleting a reftrack . This will just delete the reftrack , set the children to None , update the status , and the rootobject . If the object is an alien , it will also set the parent to None , so it dissapears from the model . : returns : None : rtype : None : raises : None"""
refobjinter = self . get_refobjinter ( ) refobjinter . delete ( self . get_refobj ( ) ) self . set_refobj ( None , setParent = False ) if self . alien ( ) : # it should not be in the scene # so also remove it from the model # so we cannot load it again parent = self . get_parent ( ) if parent : parent . remove_child ( self ) self . _treeitem . parent ( ) . remove_child ( self . _treeitem ) else : # only remove all children from the model and set their parent to None for c in self . get_all_children ( ) : c . _parent = None self . _treeitem . remove_child ( c . _treeitem ) # this should not have any children anymore self . _children = [ ] self . set_status ( None )
def grid ( metadata , layout , params ) : """layout : numpy arrays x , y metadata : user - defined numpy arrays with metadata n _ layer : number of cells in the layer ( squared ) n _ tile : number of cells in the tile ( squared )"""
x = layout [ "x" ] y = layout [ "y" ] x_min = np . min ( x ) x_max = np . max ( x ) y_min = np . min ( y ) y_max = np . max ( y ) # this creates the grid bins = np . linspace ( x_min , x_max , params [ "n_layer" ] - 1 ) xd = np . digitize ( x , bins ) bins = np . linspace ( y_min , y_max , params [ "n_layer" ] - 1 ) yd = np . digitize ( y , bins ) # the number of tiles is the number of cells divided by the number of cells in each tile num_tiles = int ( params [ "n_layer" ] / params [ "n_tile" ] ) print ( "num tiles" , num_tiles ) # we will save the tiles in an array indexed by the tile coordinates tiles = { } for ti in range ( num_tiles ) : for tj in range ( num_tiles ) : tiles [ ( ti , tj ) ] = { "x" : [ ] , "y" : [ ] , "ci" : [ ] , # cell - space x coordinate "cj" : [ ] , # cell - space y coordinate "gi" : [ ] , # global index } for i , xi in enumerate ( x ) : if ( i % 1000 == 0 or i + 1 == len ( x ) ) : print ( "point" , i + 1 , "/" , len ( x ) , end = "\r" ) # layout - space coordinates yi = y [ i ] # grid - space cell coordinates ci = xd [ i ] cj = yd [ i ] # tile coordinate ti = math . floor ( ci / params [ "n_tile" ] ) tj = math . floor ( cj / params [ "n_tile" ] ) # TODO : don ' t append a point if it doesn ' t match a filter function provided in params filter = params . get ( "filter" , lambda i , metadata : True ) if ( filter ( i , metadata = metadata ) ) : tiles [ ( ti , tj ) ] [ "x" ] . append ( xi ) tiles [ ( ti , tj ) ] [ "y" ] . append ( yi ) tiles [ ( ti , tj ) ] [ "ci" ] . append ( ci ) tiles [ ( ti , tj ) ] [ "cj" ] . append ( cj ) tiles [ ( ti , tj ) ] [ "gi" ] . append ( i ) return tiles
def waitForNetworkCoverage ( self , timeout = None ) : """Block until the modem has GSM network coverage . This method blocks until the modem is registered with the network and the signal strength is greater than 0 , optionally timing out if a timeout was specified : param timeout : Maximum time to wait for network coverage , in seconds : type timeout : int or float : raise TimeoutException : if a timeout was specified and reached : raise InvalidStateException : if the modem is not going to receive network coverage ( SIM blocked , etc ) : return : the current signal strength : rtype : int"""
block = [ True ] if timeout != None : # Set up a timeout mechanism def _cancelBlock ( ) : block [ 0 ] = False t = threading . Timer ( timeout , _cancelBlock ) t . start ( ) ss = - 1 checkCreg = True while block [ 0 ] : if checkCreg : cregResult = lineMatching ( r'^\+CREG:\s*(\d),(\d)$' , self . write ( 'AT+CREG?' , parseError = False ) ) # example result : + CREG : 0,1 if cregResult : status = int ( cregResult . group ( 2 ) ) if status in ( 1 , 5 ) : # 1 : registered , home network , 5 : registered , roaming # Now simply check and return network signal strength checkCreg = False elif status == 3 : raise InvalidStateException ( 'Network registration denied' ) elif status == 0 : raise InvalidStateException ( 'Device not searching for network operator' ) else : # Disable network registration check ; only use signal strength self . log . info ( '+CREG check disabled due to invalid response or unsupported command' ) checkCreg = False else : # Check signal strength ss = self . signalStrength if ss > 0 : return ss time . sleep ( 1 ) else : # If this is reached , the timer task has triggered raise TimeoutException ( )
def cli ( env , identifier , hardware_identifier , virtual_identifier ) : """Attach devices to a ticket ."""
ticket_mgr = SoftLayer . TicketManager ( env . client ) if hardware_identifier and virtual_identifier : raise exceptions . ArgumentError ( "Cannot attach hardware and a virtual server at the same time" ) if hardware_identifier : hardware_mgr = SoftLayer . HardwareManager ( env . client ) hardware_id = helpers . resolve_id ( hardware_mgr . resolve_ids , hardware_identifier , 'hardware' ) ticket_mgr . attach_hardware ( identifier , hardware_id ) elif virtual_identifier : vs_mgr = SoftLayer . VSManager ( env . client ) vs_id = helpers . resolve_id ( vs_mgr . resolve_ids , virtual_identifier , 'VS' ) ticket_mgr . attach_virtual_server ( identifier , vs_id ) else : raise exceptions . ArgumentError ( "Must have a hardware or virtual server identifier to attach" )
def _makeJobSuccessorReadyToRun ( self , jobGraph , jobNode ) : """make a successor job ready to run , returning False if they should not yet be run"""
successorJobStoreID = jobNode . jobStoreID # Build map from successor to predecessors . if successorJobStoreID not in self . toilState . successorJobStoreIDToPredecessorJobs : self . toilState . successorJobStoreIDToPredecessorJobs [ successorJobStoreID ] = [ ] self . toilState . successorJobStoreIDToPredecessorJobs [ successorJobStoreID ] . append ( jobGraph ) if jobNode . predecessorNumber > 1 : return self . _checkSuccessorReadyToRunMultiplePredecessors ( jobGraph , jobNode , successorJobStoreID ) else : return True
def init_parser ( cls , parser ) : """Initialize argument parser"""
subparsers = parser . add_subparsers ( title = 'Search domain' ) # Compound subcommand parser_compound = subparsers . add_parser ( 'compound' , help = 'Search in compounds' ) parser_compound . set_defaults ( which = 'compound' ) parser_compound . add_argument ( '--id' , '-i' , dest = 'id' , metavar = 'id' , action = FilePrefixAppendAction , type = text_type , default = [ ] , help = 'Compound ID' ) parser_compound . add_argument ( '--name' , '-n' , dest = 'name' , metavar = 'name' , action = FilePrefixAppendAction , type = text_type , default = [ ] , help = 'Name of compound' ) # Reaction subcommand parser_reaction = subparsers . add_parser ( 'reaction' , help = 'Search in reactions' ) parser_reaction . set_defaults ( which = 'reaction' ) parser_reaction . add_argument ( '--id' , '-i' , dest = 'id' , metavar = 'id' , action = FilePrefixAppendAction , type = str , default = [ ] , help = 'Reaction ID' ) parser_reaction . add_argument ( '--compound' , '-c' , dest = 'compound' , metavar = 'compound' , action = FilePrefixAppendAction , type = str , default = [ ] , help = 'Comma-separated list of compound IDs' )
def _make_set ( var ) : '''Force var to be a set'''
if var is None : return set ( ) if not isinstance ( var , list ) : if isinstance ( var , six . string_types ) : var = var . split ( ) else : var = list ( var ) return set ( var )
def _encode_list ( name , value , check_keys , opts ) : """Encode a list / tuple ."""
lname = gen_list_name ( ) data = b"" . join ( [ _name_value_to_bson ( next ( lname ) , item , check_keys , opts ) for item in value ] ) return b"\x04" + name + _PACK_INT ( len ( data ) + 5 ) + data + b"\x00"
def unload ( self , core ) : """http : / / wiki . apache . org / solr / CoreAdmin # head - f5055a885932e2c25096a8856de840b06764d143"""
params = { 'action' : 'UNLOAD' , 'core' : core , } return self . _get_url ( self . url , params = params )
def set_user_presence ( self , userid , presence ) : '''set presence of user'''
response , status_code = self . __pod__ . Presence . post_v2_user_uid_presence ( sessionToken = self . __session__ , uid = userid , presence = presence ) . result ( ) self . logger . debug ( '%s: %s' % ( status_code , response ) ) return status_code , response
def send ( self , datum ) : """Send * datum * ( e . g . a sentence or MRS ) to ACE . Warning : Sending data without reading ( e . g . , via : meth : ` receive ` ) can fill the buffer and cause data to be lost . Use the : meth : ` interact ` method for most data - processing tasks with ACE ."""
try : self . _p . stdin . write ( ( datum . rstrip ( ) + '\n' ) ) self . _p . stdin . flush ( ) except ( IOError , OSError ) : # ValueError if file was closed manually logging . info ( 'Attempted to write to a closed process; attempting to reopen' ) self . _open ( ) self . _p . stdin . write ( ( datum . rstrip ( ) + '\n' ) ) self . _p . stdin . flush ( )
def which ( program ) : """Locate ` program ` in PATH Arguments : program ( str ) : Name of program , e . g . " python " """
def is_exe ( fpath ) : if os . path . isfile ( fpath ) and os . access ( fpath , os . X_OK ) : return True return False for path in os . environ [ "PATH" ] . split ( os . pathsep ) : for ext in os . getenv ( "PATHEXT" , "" ) . split ( os . pathsep ) : fname = program + ext . lower ( ) abspath = os . path . join ( path . strip ( '"' ) , fname ) if is_exe ( abspath ) : return abspath return None
def find_by_id ( self , project_membership , params = { } , ** options ) : """Returns the project membership record . Parameters project _ membership : { Id } Globally unique identifier for the project membership . [ params ] : { Object } Parameters for the request"""
path = "/project_memberships/%s" % ( project_membership ) return self . client . get ( path , params , ** options )
async def _wrap_gen ( self , ID : str ) : """异步迭代器包装 . Parameters : ID ( str ) : - 任务ID Yield : ( Any ) : - 从异步迭代器结果队列中获取的结果 Raise : ( StopAsyncIteration ) : - 异步迭代器终止时抛出该异常"""
while True : result = await self . _gens_queue [ ID ] . get ( ) if isinstance ( result , StopAsyncIteration ) : del self . _gens_queue [ ID ] break else : yield result
def file_delete ( context , id , file_id ) : """file _ delete ( context , id , path ) Delete a component file > > > dcictl component - file - delete [ OPTIONS ] : param string id : ID of the component to delete file [ required ] : param string file _ id : ID for the file to delete [ required ]"""
component . file_delete ( context , id = id , file_id = file_id )
def get_rrsets_by_type_owner ( self , zone_name , rtype , owner_name , q = None , ** kwargs ) : """Returns the list of RRSets in the specified zone of the specified type . Arguments : zone _ name - - The name of the zone . rtype - - The type of the RRSets . This can be numeric ( 1 ) or if a well - known name is defined for the type ( A ) , you can use it instead . owner _ name - - The owner name for the RRSet . If no trailing dot is supplied , the owner _ name is assumed to be relative ( foo ) . If a trailing dot is supplied , the owner name is assumed to be absolute ( foo . zonename . com . ) Keyword Arguments : q - - The search parameters , in a dict . Valid keys are : ttl - must match the TTL for the rrset value - substring match of the first BIND field value sort - - The sort column used to order the list . Valid values for the sort field are : TTL TYPE reverse - - Whether the list is ascending ( False ) or descending ( True ) offset - - The position in the list for the first returned element ( 0 based ) limit - - The maximum number of rows to be returned ."""
uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name params = build_params ( q , kwargs ) return self . rest_api_connection . get ( uri , params )
def smt_dataset ( directory = 'data/' , train = False , dev = False , test = False , train_filename = 'train.txt' , dev_filename = 'dev.txt' , test_filename = 'test.txt' , extracted_name = 'trees' , check_files = [ 'trees/train.txt' ] , url = 'http://nlp.stanford.edu/sentiment/trainDevTestTrees_PTB.zip' , fine_grained = False , subtrees = False ) : """Load the Stanford Sentiment Treebank dataset . Semantic word spaces have been very useful but cannot express the meaning of longer phrases in a principled way . Further progress towards understanding compositionality in tasks such as sentiment detection requires richer supervised training and evaluation resources and more powerful models of composition . To remedy this , we introduce a Sentiment Treebank . It includes fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and presents new challenges for sentiment compositionality . * * Reference * * : https : / / nlp . stanford . edu / sentiment / index . html * * Citation : * * Richard Socher , Alex Perelygin , Jean Y . Wu , Jason Chuang , Christopher D . Manning , Andrew Y . Ng and Christopher Potts . Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank Args : directory ( str , optional ) : Directory to cache the dataset . train ( bool , optional ) : If to load the training split of the dataset . dev ( bool , optional ) : If to load the development split of the dataset . test ( bool , optional ) : If to load the test split of the dataset . train _ filename ( str , optional ) : The filename of the training split . dev _ filename ( str , optional ) : The filename of the development split . test _ filename ( str , optional ) : The filename of the test split . extracted _ name ( str , optional ) : Name of the extracted dataset directory . check _ files ( str , optional ) : Check if these files exist , then this download was successful . url ( str , optional ) : URL of the dataset ` tar . gz ` file . subtrees ( bool , optional ) : Whether to include sentiment - tagged subphrases in addition to complete examples . fine _ grained ( bool , optional ) : Whether to use 5 - class instead of 3 - class labeling . Returns : : class : ` tuple ` of : class : ` torchnlp . datasets . Dataset ` : Tuple with the training tokens , dev tokens and test tokens in order if their respective boolean argument is true . Example : > > > from torchnlp . datasets import smt _ dataset # doctest : + SKIP > > > train = smt _ dataset ( train = True ) # doctest : + SKIP > > > train [ 5 ] # doctest : + SKIP ' text ' : " Whether or not you ' re enlightened by any of Derrida ' s lectures on . . . " , ' label ' : ' positive '"""
download_file_maybe_extract ( url = url , directory = directory , check_files = check_files ) ret = [ ] splits = [ ( train , train_filename ) , ( dev , dev_filename ) , ( test , test_filename ) ] splits = [ f for ( requested , f ) in splits if requested ] for filename in splits : full_path = os . path . join ( directory , extracted_name , filename ) examples = [ ] with io . open ( full_path , encoding = 'utf-8' ) as f : for line in f : line = line . strip ( ) if subtrees : examples . extend ( parse_tree ( line , subtrees = subtrees , fine_grained = fine_grained ) ) else : examples . append ( parse_tree ( line , subtrees = subtrees , fine_grained = fine_grained ) ) ret . append ( Dataset ( examples ) ) if len ( ret ) == 1 : return ret [ 0 ] else : return tuple ( ret )
def extract_client_auth ( request ) : """Get client credentials using HTTP Basic Authentication method . Or try getting parameters via POST . See : http : / / tools . ietf . org / html / rfc6750 # section - 2.1 Return a tuple ` ( client _ id , client _ secret ) ` ."""
auth_header = request . META . get ( 'HTTP_AUTHORIZATION' , '' ) if re . compile ( '^Basic\s{1}.+$' ) . match ( auth_header ) : b64_user_pass = auth_header . split ( ) [ 1 ] try : user_pass = b64decode ( b64_user_pass ) . decode ( 'utf-8' ) . split ( ':' ) client_id , client_secret = tuple ( user_pass ) except Exception : client_id = client_secret = '' else : client_id = request . POST . get ( 'client_id' , '' ) client_secret = request . POST . get ( 'client_secret' , '' ) return ( client_id , client_secret )
def validate_and_store_body_data ( self , data ) : """Attempts simple body data validation by comparining incoming data to the content length header . If passes store the data into self . _ buffer . Parameters : data ( bytes ) : Incoming client data to be added to the body Raises : HTTPErrorBadRequest : Raised if data is sent when not expected , or if too much data is sent ."""
# add data to end of buffer self . body_buffer [ - 1 : ] = data if len ( self . body_buffer ) > self . content_length : problem = "Content length exceeds expected value (%d > %d)" % ( len ( self . body_buffer ) , self . content_length ) raise HTTPErrorBadRequest ( phrase = problem )
def emit ( self , data_frame ) : """Use this function in emit data into the store . : param data _ frame : DataFrame to be recorded ."""
if self . result is not None : raise MultipleEmitsError ( ) data_frame . columns = [ self . prefix + '__' + c for c in data_frame . columns ] self . result = data_frame
def handle_headers ( self , message_header , block_headers_message ) : """Handle a ' headers ' message . NOTE : we request headers in order , so we will expect to receive them in order here . Verify that we do so ."""
log . debug ( "handle headers (%s)" % len ( block_headers_message . headers ) ) block_headers = block_headers_message . headers serializer = BlockHeaderSerializer ( ) # verify that the local header chain connects to this sequence current_height = SPVClient . height ( self . path ) if current_height is None : assert USE_TESTNET current_height = - 1 assert ( current_height >= 0 and USE_MAINNET ) or USE_TESTNET , "Invalid height %s" % current_height last_header = None if current_height >= 0 : last_header = SPVClient . read_header ( self . path , current_height ) log . debug ( "Receive %s headers (%s to %s)" % ( len ( block_headers ) , current_height , current_height + len ( block_headers ) ) ) else : # first testnet header log . debug ( "Receive %s testnet headers (%s to %s)" % ( len ( block_headers ) , current_height + 1 , current_height + len ( block_headers ) ) ) last_header = { "version" : block_headers [ 0 ] . version , "prev_block_hash" : "%064x" % block_headers [ 0 ] . prev_block , "merkle_root" : "%064x" % block_headers [ 0 ] . merkle_root , "timestamp" : block_headers [ 0 ] . timestamp , "bits" : block_headers [ 0 ] . bits , "nonce" : block_headers [ 0 ] . nonce , "hash" : block_headers [ 0 ] . calculate_hash ( ) } if ( USE_MAINNET or ( USE_TESTNET and current_height >= 0 ) ) and last_header [ 'hash' ] != self . hash_to_string ( block_headers [ 0 ] . prev_block ) : raise Exception ( "Received discontinuous block header at height %s: hash '%s' (expected '%s')" % ( current_height , self . hash_to_string ( block_headers [ 0 ] . prev_block ) , last_header [ 'hash' ] ) ) header_start = 1 if USE_TESTNET and current_height < 0 : # save initial header header_start = 0 # verify that this sequence of headers constitutes a hash chain for i in xrange ( header_start , len ( block_headers ) ) : prev_block_hash = self . hash_to_string ( block_headers [ i ] . prev_block ) if i > 0 and prev_block_hash != block_headers [ i - 1 ] . calculate_hash ( ) : raise Exception ( "Block '%s' is not continuous with block '%s'" % prev_block_hash , block_headers [ i - 1 ] . calculate_hash ( ) ) if current_height < 0 : # save the first header if not os . path . exists ( self . path ) : with open ( self . path , "wb" ) as f : block_header_serializer = BlockHeaderSerializer ( ) bin_data = block_header_serializer . serialize ( block_headers [ 0 ] ) f . write ( bin_data ) # got all headers , including the first current_height = 0 # insert into to local headers database next_block_id = current_height + 1 for block_header in block_headers : with open ( self . path , "rb+" ) as f : # omit tx count block_header . txns_count = 0 bin_data = serializer . serialize ( block_header ) if len ( bin_data ) != BLOCK_HEADER_SIZE : raise Exception ( "Block %s (%s) has %s-byte header" % ( next_block_id , block_header . calculate_hash ( ) , len ( bin_data ) ) ) # NOTE : the fact that we use seek + write ensures that we can : # * restart synchronizing at any point # * allow multiple processes to work on the chain safely ( even if they ' re duplicating effort ) f . seek ( BLOCK_HEADER_SIZE * next_block_id , os . SEEK_SET ) f . write ( bin_data ) if SPVClient . height ( self . path ) >= self . last_block_id - 1 : break next_block_id += 1 current_block_id = SPVClient . height ( self . path ) if current_block_id >= self . last_block_id - 1 : # got all headers self . loop_exit ( ) return prev_block_header = SPVClient . read_header ( self . path , current_block_id ) prev_block_hash = prev_block_header [ 'hash' ] self . send_getheaders ( prev_block_hash )
def val_accuracy ( show_swap ) : """http : / / wiki . apache . org / spamassassin / TopSharedMemoryBug"""
kv = kernel_ver ( ) pid = os . getpid ( ) swap_accuracy = - 1 if kv [ : 2 ] == ( 2 , 4 ) : if proc . open ( 'meminfo' ) . read ( ) . find ( "Inact_" ) == - 1 : return 1 , swap_accuracy return 0 , swap_accuracy elif kv [ : 2 ] == ( 2 , 6 ) : if os . path . exists ( proc . path ( pid , 'smaps' ) ) : swap_accuracy = 1 if proc . open ( pid , 'smaps' ) . read ( ) . find ( "Pss:" ) != - 1 : return 2 , swap_accuracy else : return 1 , swap_accuracy if ( 2 , 6 , 1 ) <= kv <= ( 2 , 6 , 9 ) : return - 1 , swap_accuracy return 0 , swap_accuracy elif kv [ 0 ] > 2 and os . path . exists ( proc . path ( pid , 'smaps' ) ) : swap_accuracy = 1 if show_swap and proc . open ( pid , 'smaps' ) . read ( ) . find ( "SwapPss:" ) != - 1 : swap_accuracy = 2 return 2 , swap_accuracy else : return 1 , swap_accuracy
def overlap_add ( blk_sig , size = None , hop = None , wnd = None , normalize = True ) : """Overlap - add algorithm using Numpy arrays . Parameters blk _ sig : An iterable of blocks ( sequences ) , such as the ` ` Stream . blocks ` ` result . size : Block size for each ` ` blk _ sig ` ` element , in samples . hop : Number of samples for two adjacent blocks ( defaults to the size ) . wnd : Windowing function to be applied to each block or any iterable with exactly ` ` size ` ` elements . If ` ` None ` ` ( default ) , applies a rectangular window . normalize : Flag whether the window should be normalized so that the process could happen in the [ - 1 ; 1 ] range , dividing the window by its hop gain . Default is ` ` True ` ` . Returns A Stream instance with the blocks overlapped and added . See Also Stream . blocks : Splits the Stream instance into blocks with given size and hop . blocks : Same to Stream . blocks but for without using the Stream class . chain : Lazily joins all iterables given as parameters . chain . from _ iterable : Same to ` ` chain ( * data ) ` ` , but the ` ` data ` ` evaluation is lazy . window : Window / apodization / tapering functions for a given size as a StrategyDict . Note Each block has the window function applied to it and the result is the sum of the blocks without any edge - case special treatment for the first and last few blocks ."""
import numpy as np # Finds the size from data , if needed if size is None : blk_sig = Stream ( blk_sig ) size = len ( blk_sig . peek ( ) ) if hop is None : hop = size # Find the right windowing function to be applied if wnd is None : wnd = np . ones ( size ) elif callable ( wnd ) and not isinstance ( wnd , Stream ) : wnd = wnd ( size ) if isinstance ( wnd , Sequence ) : wnd = np . array ( wnd ) elif isinstance ( wnd , Iterable ) : wnd = np . hstack ( wnd ) else : raise TypeError ( "Window should be an iterable or a callable" ) # Normalization to the [ - 1 ; 1 ] range if normalize : steps = Stream ( wnd ) . blocks ( hop ) . map ( np . array ) gain = np . sum ( np . abs ( np . vstack ( steps ) ) , 0 ) . max ( ) if gain : # If gain is zero , normalization couldn ' t have any effect wnd = wnd / gain # Can ' t use " / = " nor " * = " as Numpy would keep datatype # Overlap - add algorithm old = np . zeros ( size ) for blk in ( wnd * blk for blk in blk_sig ) : blk [ : - hop ] += old [ hop : ] for el in blk [ : hop ] : yield el old = blk for el in old [ hop : ] : # No more blocks , finish yielding the last one yield el
def uri_query ( self , value ) : """Adds a query . : param value : the query"""
del self . uri_query queries = value . split ( "&" ) for q in queries : option = Option ( ) option . number = defines . OptionRegistry . URI_QUERY . number option . value = str ( q ) self . add_option ( option )
def short_url ( self , long_url ) : """长链接转短链接 : param long _ url : 长链接 : return : 返回的结果数据"""
data = { 'appid' : self . appid , 'long_url' : long_url , } return self . _post ( 'tools/shorturl' , data = data )
async def dump_blob ( writer , elem , elem_type , params = None ) : """Dumps blob to a binary stream : param writer : : param elem : : param elem _ type : : param params : : return :"""
elem_is_blob = isinstance ( elem , x . BlobType ) data = bytes ( getattr ( elem , x . BlobType . DATA_ATTR ) if elem_is_blob else elem ) await dump_varint ( writer , len ( elem ) ) await writer . awrite ( data )
def deleteRows ( self , login , tableName , startRow , endRow ) : """Parameters : - login - tableName - startRow - endRow"""
self . send_deleteRows ( login , tableName , startRow , endRow ) self . recv_deleteRows ( )
def create ( self , sid , seedList ) : """Create a new named ( sid ) Seed from a list of seed URLs : param sid : the name to assign to the new seed list : param seedList : the list of seeds to use : return : the created Seed object"""
seedUrl = lambda uid , url : { "id" : uid , "url" : url } if not isinstance ( seedList , tuple ) : seedList = ( seedList , ) seedListData = { "id" : "12345" , "name" : sid , "seedUrls" : [ seedUrl ( uid , url ) for uid , url in enumerate ( seedList ) ] } # As per resolution of https : / / issues . apache . org / jira / browse / NUTCH - 2123 seedPath = self . server . call ( 'post' , "/seed/create" , seedListData , TextAcceptHeader ) new_seed = Seed ( sid , seedPath , self . server ) return new_seed
async def reset ( self ) : """Synchronously reset a tile . This method must be called from the emulation loop and will synchronously shut down all background tasks running this tile , clear it to reset state and then restart the initialization background task ."""
await self . _device . emulator . stop_tasks ( self . address ) self . _handle_reset ( ) self . _logger . info ( "Tile at address %d has reset itself." , self . address ) self . _logger . info ( "Starting main task for tile at address %d" , self . address ) self . _device . emulator . add_task ( self . address , self . _reset_vector ( ) )
def subnets_list ( virtual_network , resource_group , ** kwargs ) : '''. . versionadded : : 2019.2.0 List all subnets within a virtual network . : param virtual _ network : The virtual network name to list subnets within . : param resource _ group : The resource group name assigned to the virtual network . CLI Example : . . code - block : : bash salt - call azurearm _ network . subnets _ list testnet testgroup'''
result = { } netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs ) try : subnets = __utils__ [ 'azurearm.paged_object_to_list' ] ( netconn . subnets . list ( resource_group_name = resource_group , virtual_network_name = virtual_network ) ) for subnet in subnets : result [ subnet [ 'name' ] ] = subnet except CloudError as exc : __utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs ) result = { 'error' : str ( exc ) } return result
def toString ( self ) : """Connection information as a string ."""
string = "" if self . toLayer . _verbosity > 4 : string += "wed: from '" + self . fromLayer . name + "' to '" + self . toLayer . name + "'\n" string += " " for j in range ( self . toLayer . size ) : string += " " + self . toLayer . name + "[" + str ( j ) + "]" string += '\n' for i in range ( self . fromLayer . size ) : string += self . fromLayer . name + "[" + str ( i ) + "]" + ": " for j in range ( self . toLayer . size ) : string += " " + str ( self . wed [ i ] [ j ] ) string += '\n' string += '\n' string += "dweight: from '" + self . fromLayer . name + "' to '" + self . toLayer . name + "'\n" string += " " for j in range ( self . toLayer . size ) : string += " " + self . toLayer . name + "[" + str ( j ) + "]" string += '\n' for i in range ( self . fromLayer . size ) : string += self . fromLayer . name + "[" + str ( i ) + "]" + ": " for j in range ( self . toLayer . size ) : string += " " + str ( self . dweight [ i ] [ j ] ) string += '\n' string += '\n' if self . toLayer . _verbosity > 2 : string += "Weights: from '" + self . fromLayer . name + "' to '" + self . toLayer . name + "'\n" string += " " for j in range ( self . toLayer . size ) : string += " " + self . toLayer . name + "[" + str ( j ) + "]" string += '\n' for i in range ( self . fromLayer . size ) : string += self . fromLayer . name + "[" + str ( i ) + "]" + ": " for j in range ( self . toLayer . size ) : string += " " + str ( self . weight [ i ] [ j ] ) string += '\n' string += '\n' return string
def _populate_worksheet ( self , workbook , worksheet ) : """Write chart data contents to * worksheet * in the bubble chart layout . Write the data for each series to a separate three - column table with X values in column A , Y values in column B , and bubble sizes in column C . Place the series label in the first ( heading ) cell of the values column ."""
chart_num_format = workbook . add_format ( { 'num_format' : self . _chart_data . number_format } ) for series in self . _chart_data : series_num_format = ( workbook . add_format ( { 'num_format' : series . number_format } ) ) offset = self . series_table_row_offset ( series ) # write X values worksheet . write_column ( offset + 1 , 0 , series . x_values , chart_num_format ) # write Y values worksheet . write ( offset , 1 , series . name ) worksheet . write_column ( offset + 1 , 1 , series . y_values , series_num_format ) # write bubble sizes worksheet . write ( offset , 2 , 'Size' ) worksheet . write_column ( offset + 1 , 2 , series . bubble_sizes , chart_num_format )
def scan_forever ( queue , * args , ** kwargs ) : """Return an infinite iterator over an fsq queue that blocks waiting for the queue trigger . Work is yielded as FSQWorkItem objects when available , assuming the default generator ( FSQScanGenerator ) is in use . Essentially , this function wraps fsq . scan ( ) and blocks for more work . It takes all the same parameters as scan ( ) , plus process _ once _ now , which is a boolean to determine if an initial . scan ( ) is run before listening to the trigger . This argument defaults to True ."""
process_once_now = kwargs . get ( 'process_once_now' , True ) if process_once_now : for work in scan ( queue , * args , ** kwargs ) : yield work while True : with open ( fsq_path . trigger ( queue ) , 'rb' ) as t : t . read ( 1 ) for work in scan ( queue , * args , ** kwargs ) : yield work
def set_textcolor ( self , color ) : """set color for labels and axis text"""
self . textcolor = color self . relabel ( ) if callable ( self . theme_color_callback ) : self . theme_color_callback ( color , 'text' )
def shift ( self , * args , ** kwargs ) : """shift ( hours , minutes , seconds , milliseconds , ratio ) Shift ` start ` and ` end ` attributes of each items of file either by applying a ratio or by adding an offset . ` ratio ` should be either an int or a float . Example to convert subtitles from 23.9 fps to 25 fps : > > > subs . shift ( ratio = 25/23.9) All " time " arguments are optional and have a default value of 0. Example to delay all subs from 2 seconds and half > > > subs . shift ( seconds = 2 , milliseconds = 500)"""
for item in self : item . shift ( * args , ** kwargs )
def generate_observation_from_state ( self , state_index ) : """Generate a single synthetic observation data from a given state . Parameters state _ index : int Index of the state from which observations are to be generated . Returns observation : float A single observation from the given state . Examples Generate an observation model . > > > output _ model = DiscreteOutputModel ( np . array ( [ [ 0.5,0.5 ] , [ 0.1,0.9 ] ] ) ) Generate sample from each state . > > > observation = output _ model . generate _ observation _ from _ state ( 0)"""
# generate random generator ( note that this is inefficient - better use one of the next functions import scipy . stats gen = scipy . stats . rv_discrete ( values = ( range ( len ( self . _output_probabilities [ state_index ] ) ) , self . _output_probabilities [ state_index ] ) ) gen . rvs ( size = 1 )
def transformToNative ( obj ) : """Turn obj . value into a list ."""
if obj . isNative : return obj obj . isNative = True obj . value = splitFields ( obj . value ) return obj
def performAction ( self , action ) : """Perform an action on the world that changes it ' s internal state ."""
gs = [ g for g in self . case . online_generators if g . bus . type != REFERENCE ] assert len ( action ) == len ( gs ) logger . info ( "Action: %s" % list ( action ) ) # Set the output of each ( non - reference ) generator . for i , g in enumerate ( gs ) : g . p = action [ i ] # Compute power flows and slack generator set - point . NewtonPF ( self . case , verbose = False ) . solve ( ) # FastDecoupledPF ( self . case , verbose = False ) . solve ( ) # Store all generator set - points ( only used for plotting ) . self . _Pg [ : , self . _step ] = [ g . p for g in self . case . online_generators ] # Apply the next load profile value to the original demand at each bus . if self . _step != len ( self . profile ) - 1 : pq_buses = [ b for b in self . case . buses if b . type == PQ ] for i , b in enumerate ( pq_buses ) : b . p_demand = self . _Pd0 [ i ] * self . profile [ self . _step + 1 ] self . _step += 1 logger . info ( "Entering step %d." % self . _step )
def generate_image_commands ( ) : '''The Image client holds the Singularity image command group , mainly deprecated commands ( image . import ) and additional command helpers that are commonly use but not provided by Singularity The levels of verbosity ( debug and quiet ) are passed from the main client via the environment variable MESSAGELEVEL . These commands are added to Client . image under main / _ _ init _ _ . py to expose subcommands : Client . image . export Client . image . imprt Client . image . decompress Client . image . create'''
class ImageClient ( object ) : group = "image" from spython . main . base . logger import println from spython . main . base . command import ( init_command , run_command ) from . utils import ( compress , decompress ) from . create import create from . importcmd import importcmd from . export import export ImageClient . create = create ImageClient . imprt = importcmd ImageClient . export = export ImageClient . decompress = decompress ImageClient . compress = compress ImageClient . println = println ImageClient . init_command = init_command ImageClient . run_command = run_command cli = ImageClient ( ) return cli
def create_bwa_index_from_fasta_file ( fasta_in , params = None ) : """Create a BWA index from an input fasta file . fasta _ in : the input fasta file from which to create the index params : dict of bwa index specific paramters This method returns a dictionary where the keys are the various output suffixes ( . amb , . ann , . bwt , . pac , . sa ) and the values are open file objects . The index prefix will be the same as fasta _ in , unless the - p parameter is passed in params ."""
if params is None : params = { } # Instantiate the app controller index = BWA_index ( params ) # call the application , passing the fasta file in results = index ( { 'fasta_in' : fasta_in } ) return results
def modify ( self , api_action , sgid , other , proto_spec ) : """Make a change to a security group . api _ action is an EC2 API name . Other is one of : - a group ( sg - nnnnn ) - a group with account ( < user id > / sg - nnnnn ) - a CIDR block ( n . n . n . n / n ) Proto spec is a triplet ( < proto > , low _ port , high _ port ) ."""
params = { 'group_id' : sgid , 'ip_permissions' : [ ] } perm = { } params [ 'ip_permissions' ] . append ( perm ) proto , from_port , to_port = proto_spec perm [ 'IpProtocol' ] = proto perm [ 'FromPort' ] = from_port or 0 perm [ 'ToPort' ] = to_port or from_port or 65535 if other . startswith ( "sg-" ) : perm [ 'UserIdGroupPairs' ] = [ { 'GroupId' : other } ] elif "/sg-" in other : account , group_id = other . split ( "/" , 1 ) perm [ 'UserIdGroupPairs' ] = [ { 'UserId' : account , 'GroupId' : group_id , } ] else : perm [ 'IpRanges' ] = [ { 'CidrIp' : other } ] return self . call ( api_action , ** params )
def rewrite_tg ( env , tg_name , code ) : """Re - write a transform generating function pipe specification by extracting the tranform generating part , and replacing it with the generated transform . so : tgen ( a , b , c ) . foo . bar becomes : tg = tgen ( a , b , c ) tg . foo . bar"""
visitor = ReplaceTG ( env , tg_name ) assert visitor . tg_name tree = visitor . visit ( ast . parse ( code ) ) if visitor . loc : loc = ' #' + visitor . loc else : loc = file_loc ( ) # The AST visitor didn ' t match a call node if visitor . trans_gen : tg = meta . dump_python_source ( visitor . trans_gen ) . strip ( ) else : tg = None return meta . dump_python_source ( tree ) . strip ( ) , tg , loc
def _send_update_port_statuses ( self , port_ids , status ) : """Sends update notifications to set the operational status of the list of router ports provided . To make each notification doesn ' t exceed the RPC length , each message contains a maximum of MAX _ PORTS _ IN _ BATCH port ids . : param port _ ids : List of ports to update the status : param status : operational status to update ( ex : bc . constants . PORT _ STATUS _ ACTIVE )"""
if not port_ids : return MAX_PORTS_IN_BATCH = 50 list_chunks_ports = [ port_ids [ i : i + MAX_PORTS_IN_BATCH ] for i in six . moves . range ( 0 , len ( port_ids ) , MAX_PORTS_IN_BATCH ) ] for chunk_ports in list_chunks_ports : self . plugin_rpc . send_update_port_statuses ( self . context , chunk_ports , status )
def atlas_zonefile_path ( zonefile_dir , zonefile_hash ) : """Calculate the on - disk path to storing a zonefile ' s information , given the zone file hash . If the zonefile hash is abcdef1234567890 , then the path will be $ zonefile _ dir / ab / cd / abcdef1234567890 . txt Returns the path ."""
# split into directories , but not too many zonefile_dir_parts = [ ] interval = 2 for i in xrange ( 0 , min ( len ( zonefile_hash ) , 4 ) , interval ) : zonefile_dir_parts . append ( zonefile_hash [ i : i + interval ] ) zonefile_path = os . path . join ( zonefile_dir , '/' . join ( zonefile_dir_parts ) , '{}.txt' . format ( zonefile_hash ) ) return zonefile_path
def send_email_template ( slug , base_url = None , context = None , user = None , to = None , cc = None , bcc = None , attachments = None , headers = None , connection = None , fail_silently = False ) : """Shortcut to send an email template ."""
email_template = EmailTemplate . objects . get_for_slug ( slug ) email = email_template . get_email_message ( base_url , context , user , to = to , cc = cc , bcc = bcc , attachments = attachments , headers = headers , connection = connection ) return email . send ( fail_silently = fail_silently )
def save_schedule ( self ) : '''Save the current schedule'''
self . persist ( ) # Fire the complete event back along with the list of schedule evt = salt . utils . event . get_event ( 'minion' , opts = self . opts , listen = False ) evt . fire_event ( { 'complete' : True } , tag = '/salt/minion/minion_schedule_saved' )
def _get_websocket ( self , reuse = True ) : """Reuse existing connection or create a new connection ."""
# Check if still connected if self . ws and reuse : if self . ws . connected : return self . ws logging . debug ( "Stale connection, reconnecting." ) self . ws = self . _create_connection ( ) return self . ws
def get_provides_by_kind ( self , kind ) : """Returns an array of provides of a certain kind"""
provs = [ ] for p in self . provides : if p . kind == kind : provs . append ( p ) return provs
def count ( self , data ) : """Compute histogram of data . Counts the number of elements from array ` ` data ` ` in each bin of the histogram . Results are returned in an array , call it ` ` h ` ` , of length ` ` nbin + 2 ` ` where ` ` h [ 0 ] ` ` is the number of data elements that fall below the range of the histogram , ` ` h [ - 1 ] ` ` ( i . e . , ` ` h [ nbin + 1 ] ` ` ) is the number that fall above the range , and ` ` h [ i ] ` ` is the number in the ` ` i ` ` - th bin for ` ` i = 1 . . . nbin ` ` . Argument ` ` data ` ` can also be a float , in which case the result is the same as from ` ` histogram ( [ data ] ) ` ` . Note that the expectation value of ` ` count ( f ( p ) ) ` ` over parameter values ` ` p ` ` drawn from a random distribution gives the probabilities for values of ` ` f ( p ) ` ` to fall in each histogram bin . Dividing by the bin widths gives the average probability density for random variable ` ` f ( p ) ` ` in each bin . Bin intervals are closed on the left and open on the right , except for the last interval which is closed on both ends ."""
if isinstance ( data , float ) or isinstance ( data , int ) : hist = numpy . zeros ( self . nbin + 2 , float ) if data > self . bins [ - 1 ] : hist [ - 1 ] = 1. elif data < self . bins [ 0 ] : hist [ 0 ] = 1. elif data == self . bins [ - 1 ] : if self . nbin > 1 : hist [ - 2 ] = 1. else : hist [ numpy . searchsorted ( self . bins , data , side = 'right' ) ] = 1. return hist if numpy . ndim ( data ) != 1 : data = numpy . reshape ( data , - 1 ) else : data = numpy . asarray ( data ) middle = numpy . histogram ( data , self . bins ) [ 0 ] below = numpy . sum ( data < self . bins [ 0 ] ) above = numpy . sum ( data > self . bins [ - 1 ] ) return numpy . array ( [ below ] + middle . tolist ( ) + [ above ] , float )
def find_place ( client , input , input_type , fields = None , location_bias = None , language = None ) : """A Find Place request takes a text input , and returns a place . The text input can be any kind of Places data , for example , a name , address , or phone number . : param input : The text input specifying which place to search for ( for example , a name , address , or phone number ) . : type input : string : param input _ type : The type of input . This can be one of either ' textquery ' or ' phonenumber ' . : type input _ type : string : param fields : The fields specifying the types of place data to return , separated by a comma . For full details see : https : / / developers . google . com / places / web - service / search # FindPlaceRequests : type input : list : param location _ bias : Prefer results in a specified area , by specifying either a radius plus lat / lng , or two lat / lng pairs representing the points of a rectangle . See : https : / / developers . google . com / places / web - service / search # FindPlaceRequests : type location _ bias : string : param language : The language in which to return results . : type language : string : rtype : result dict with the following keys : status : status code candidates : list of places"""
params = { "input" : input , "inputtype" : input_type } if input_type != "textquery" and input_type != "phonenumber" : raise ValueError ( "Valid values for the `input_type` param for " "`find_place` are 'textquery' or 'phonenumber', " "the given value is invalid: '%s'" % input_type ) if fields : invalid_fields = set ( fields ) - PLACES_FIND_FIELDS if invalid_fields : raise ValueError ( "Valid values for the `fields` param for " "`find_place` are '%s', these given field(s) " "are invalid: '%s'" % ( "', '" . join ( PLACES_FIND_FIELDS ) , "', '" . join ( invalid_fields ) ) ) params [ "fields" ] = convert . join_list ( "," , fields ) if location_bias : valid = [ "ipbias" , "point" , "circle" , "rectangle" ] if location_bias . split ( ":" ) [ 0 ] not in valid : raise ValueError ( "location_bias should be prefixed with one of: %s" % valid ) params [ "locationbias" ] = location_bias if language : params [ "language" ] = language return client . _request ( "/maps/api/place/findplacefromtext/json" , params )
def transitions ( self ) : """Dense [ k - 1 ] x4 transition frequency matrix"""
if self . _transitions is not None : return self . _transitions transitions = self . array . astype ( np . float ) transitions /= transitions . sum ( 1 ) [ : , np . newaxis ] self . _transitions = transitions return transitions
def initialized ( self , value ) : """Setter for * * self . _ _ initialized * * attribute . : param value : Attribute value . : type value : bool"""
if value is not None : assert type ( value ) is bool , "'{0}' attribute: '{1}' type is not 'bool'!" . format ( "initialized" , value ) self . component_initialized . emit ( ) if value else self . component_uninitialized . emit ( ) self . __initialized = value
def build_valid_keywords_grammar ( keywords = None ) : """Update parser grammar to add a list of allowed keywords ."""
from invenio_query_parser . parser import KeywordQuery , KeywordRule , NotKeywordValue , SimpleQuery , ValueQuery if keywords : KeywordRule . grammar = attr ( 'value' , re . compile ( r"(\d\d\d\w{{0,3}}|{0})\b" . format ( "|" . join ( keywords ) , re . I ) ) ) NotKeywordValue . grammar = attr ( 'value' , re . compile ( r'\b(?!\d\d\d\w{{0,3}}|{0}:)\S+\b:' . format ( ":|" . join ( keywords ) ) ) ) SimpleQuery . grammar = attr ( 'op' , [ NotKeywordValue , KeywordQuery , ValueQuery ] ) else : KeywordRule . grammar = attr ( 'value' , re . compile ( r"[\w\d]+(\.[\w\d]+)*" ) ) SimpleQuery . grammar = attr ( 'op' , [ KeywordQuery , ValueQuery ] )
def delete_table ( self , tablename ) : """Deletes a table from the database ."""
self . tables = filter ( lambda x : x . name != tablename , self . tables )
def screen_to_latlon ( self , x , y ) : """Return the latitude and longitude corresponding to a screen point : param x : screen x : param y : screen y : return : latitude and longitude at x , y"""
xtile = 1. * x / TILE_SIZE + self . xtile ytile = 1. * y / TILE_SIZE + self . ytile return self . num2deg ( xtile , ytile , self . zoom )
def _bfd_rx ( self , ** kwargs ) : """Return the BFD minimum receive interval XML . You should not use this method . You probably want ` BGP . bfd ` . Args : min _ rx ( str ) : BFD receive interval in milliseconds ( 300 , 500 , etc ) delete ( bool ) : Remove the configuration if ` ` True ` ` . Returns : XML to be passed to the switch . Raises : None"""
method_name = 'rbridge_id_router_router_bgp_router_bgp_attributes_' 'bfd_interval_min_rx' bfd_rx = getattr ( self . _rbridge , method_name ) config = bfd_rx ( ** kwargs ) if kwargs [ 'delete' ] : tag = 'min-rx' config . find ( './/*%s' % tag ) . set ( 'operation' , 'delete' ) pass return config
def output_domain ( gandi , domain , output_keys , justify = 12 ) : """Helper to output a domain information ."""
if 'nameservers' in domain : domain [ 'nameservers' ] = format_list ( domain [ 'nameservers' ] ) if 'services' in domain : domain [ 'services' ] = format_list ( domain [ 'services' ] ) if 'tags' in domain : domain [ 'tags' ] = format_list ( domain [ 'tags' ] ) output_generic ( gandi , domain , output_keys , justify ) if 'created' in output_keys : output_line ( gandi , 'created' , domain [ 'date_created' ] , justify ) if 'expires' in output_keys : date_end = domain . get ( 'date_registry_end' ) if date_end : days_left = ( date_end - datetime . now ( ) ) . days output_line ( gandi , 'expires' , '%s (in %d days)' % ( date_end , days_left ) , justify ) if 'updated' in output_keys : output_line ( gandi , 'updated' , domain [ 'date_updated' ] , justify )
def set_in_bounds ( self , obj , val ) : """Set to the given value , but cropped to be within the legal bounds . All objects are accepted , and no exceptions will be raised . See crop _ to _ bounds for details on how cropping is done ."""
if not callable ( val ) : bounded_val = self . crop_to_bounds ( val ) else : bounded_val = val super ( Number , self ) . __set__ ( obj , bounded_val )
def _to_dict ( self ) : """Return a json dictionary representing this model ."""
_dict = { } if hasattr ( self , 'class_name' ) and self . class_name is not None : _dict [ 'class' ] = self . class_name return _dict
def _ensure_click ( self ) : """Ensures a click gets made , because Selenium can be a bit buggy about clicks This method gets added to the selenium element returned in ' _ _ ensure _ element _ by _ xpath ' . We should probably add it to more selenium methods , such as all the ' find * * ' methods though . I wrote this method out of frustration with chromedriver and its problems with clicking items that need to be scrolled to in order to be clickable . In ' _ _ ensure _ element _ by _ xpath ' we scroll to the item before returning it , but chrome has some problems if it doesn ' t get some time to scroll to the item . This method ensures chromes gets enough time to scroll to the item before clicking it . I tried SEVERAL more ' correct ' methods to get around this , but none of them worked 100 % of the time ( waiting for the element to be ' clickable ' does not work ) ."""
# We ensure the element is scrolled into the middle of the viewport to ensure that # it is clickable . There are two main ways an element may not be clickable : # - It is outside of the viewport # - It is under a banner or toolbar # This script solves both cases script = ( "var viewPortHeight = Math.max(" "document.documentElement.clientHeight, window.innerHeight || 0);" "var elementTop = arguments[0].getBoundingClientRect().top;" "window.scrollBy(0, elementTop-(viewPortHeight/2));" ) self . parent . execute_script ( script , self ) # parent = the webdriver for _ in range ( 10 ) : try : self . click ( ) return except WebDriverException as e : exception_message = str ( e ) time . sleep ( 0.2 ) raise WebDriverException ( "Couldn't click item after trying 10 times, got error message: \n{}" . format ( exception_message ) )
def load_data ( self , filename , transpose = True , save_sparse_file = 'h5ad' , sep = ',' , ** kwargs ) : """Loads the specified data file . The file can be a table of read counts ( i . e . ' . csv ' or ' . txt ' ) , with genes as rows and cells as columns by default . The file can also be a pickle file ( output from ' save _ sparse _ data ' ) or an h5ad file ( output from ' save _ anndata ' ) . This function that loads the file specified by ' filename ' . Parameters filename - string The path to the tabular raw expression counts file . sep - string , optional , default ' , ' The delimeter used to read the input data table . By default assumes the input table is delimited by commas . save _ sparse _ file - str , optional , default ' h5ad ' If ' h5ad ' , writes the SAM ' adata _ raw ' object to a h5ad file ( the native AnnData file format ) to the same folder as the original data for faster loading in the future . If ' p ' , pickles the sparse data structure , cell names , and gene names in the same folder as the original data for faster loading in the future . transpose - bool , optional , default True By default , assumes file is ( genes x cells ) . Set this to False if the file has dimensions ( cells x genes ) ."""
if filename . split ( '.' ) [ - 1 ] == 'p' : raw_data , all_cell_names , all_gene_names = ( pickle . load ( open ( filename , 'rb' ) ) ) if ( transpose ) : raw_data = raw_data . T if raw_data . getformat ( ) == 'csc' : print ( "Converting sparse matrix to csr format..." ) raw_data = raw_data . tocsr ( ) save_sparse_file = None elif filename . split ( '.' ) [ - 1 ] != 'h5ad' : df = pd . read_csv ( filename , sep = sep , index_col = 0 ) if ( transpose ) : dataset = df . T else : dataset = df raw_data = sp . csr_matrix ( dataset . values ) all_cell_names = np . array ( list ( dataset . index . values ) ) all_gene_names = np . array ( list ( dataset . columns . values ) ) if filename . split ( '.' ) [ - 1 ] != 'h5ad' : self . adata_raw = AnnData ( X = raw_data , obs = { 'obs_names' : all_cell_names } , var = { 'var_names' : all_gene_names } ) if ( np . unique ( all_gene_names ) . size != all_gene_names . size ) : self . adata_raw . var_names_make_unique ( ) if ( np . unique ( all_cell_names ) . size != all_cell_names . size ) : self . adata_raw . obs_names_make_unique ( ) self . adata = self . adata_raw . copy ( ) self . adata . layers [ 'X_disp' ] = raw_data else : self . adata_raw = anndata . read_h5ad ( filename , ** kwargs ) self . adata = self . adata_raw . copy ( ) if 'X_disp' not in list ( self . adata . layers . keys ( ) ) : self . adata . layers [ 'X_disp' ] = self . adata . X save_sparse_file = None if ( save_sparse_file == 'p' ) : new_sparse_file = '.' . join ( filename . split ( '/' ) [ - 1 ] . split ( '.' ) [ : - 1 ] ) path = filename [ : filename . find ( filename . split ( '/' ) [ - 1 ] ) ] self . save_sparse_data ( path + new_sparse_file + '_sparse.p' ) elif ( save_sparse_file == 'h5ad' ) : new_sparse_file = '.' . join ( filename . split ( '/' ) [ - 1 ] . split ( '.' ) [ : - 1 ] ) path = filename [ : filename . find ( filename . split ( '/' ) [ - 1 ] ) ] self . save_anndata ( path + new_sparse_file + '_SAM.h5ad' )
def splitPolygon ( self , polygon , coplanarFront , coplanarBack , front , back ) : """Split ` polygon ` by this plane if needed , then put the polygon or polygon fragments in the appropriate lists . Coplanar polygons go into either ` coplanarFront ` or ` coplanarBack ` depending on their orientation with respect to this plane . Polygons in front or in back of this plane go into either ` front ` or ` back `"""
COPLANAR = 0 # all the vertices are within EPSILON distance from plane FRONT = 1 # all the vertices are in front of the plane BACK = 2 # all the vertices are at the back of the plane SPANNING = 3 # some vertices are in front , some in the back # Classify each point as well as the entire polygon into one of the above # four classes . polygonType = 0 vertexLocs = [ ] numVertices = len ( polygon . vertices ) for i in range ( numVertices ) : t = self . normal . dot ( polygon . vertices [ i ] . pos ) - self . w loc = - 1 if t < - Plane . EPSILON : loc = BACK elif t > Plane . EPSILON : loc = FRONT else : loc = COPLANAR polygonType |= loc vertexLocs . append ( loc ) # Put the polygon in the correct list , splitting it when necessary . if polygonType == COPLANAR : normalDotPlaneNormal = self . normal . dot ( polygon . plane . normal ) if normalDotPlaneNormal > 0 : coplanarFront . append ( polygon ) else : coplanarBack . append ( polygon ) elif polygonType == FRONT : front . append ( polygon ) elif polygonType == BACK : back . append ( polygon ) elif polygonType == SPANNING : f = [ ] b = [ ] for i in range ( numVertices ) : j = ( i + 1 ) % numVertices ti = vertexLocs [ i ] tj = vertexLocs [ j ] vi = polygon . vertices [ i ] vj = polygon . vertices [ j ] if ti != BACK : f . append ( vi ) if ti != FRONT : if ti != BACK : b . append ( vi . clone ( ) ) else : b . append ( vi ) if ( ti | tj ) == SPANNING : # interpolation weight at the intersection point t = ( self . w - self . normal . dot ( vi . pos ) ) / self . normal . dot ( vj . pos . minus ( vi . pos ) ) # intersection point on the plane v = vi . interpolate ( vj , t ) f . append ( v ) b . append ( v . clone ( ) ) if len ( f ) >= 3 : front . append ( Polygon ( f , polygon . shared ) ) if len ( b ) >= 3 : back . append ( Polygon ( b , polygon . shared ) )
def list_snapshots ( config = 'root' ) : '''List available snapshots CLI example : . . code - block : : bash salt ' * ' snapper . list _ snapshots config = myconfig'''
try : snapshots = snapper . ListSnapshots ( config ) return [ _snapshot_to_data ( s ) for s in snapshots ] except dbus . DBusException as exc : raise CommandExecutionError ( 'Error encountered while listing snapshots: {0}' . format ( _dbus_exception_to_reason ( exc , locals ( ) ) ) )
def get_field_type ( info ) : """A field python type"""
type_ = info . get_type ( ) cls = get_field_class ( type_ ) field = cls ( info , type_ , None ) field . setup ( ) return field . py_type
def run ( self ) : """The main entry point , performs the appropriate action for the given arguments ."""
self . _unlock_keychain ( ) item = self . keychain . item ( self . arguments . item , fuzzy_threshold = self . _fuzzy_threshold ( ) , ) if item is not None : self . stdout . write ( "%s\n" % item . password ) else : self . stderr . write ( "1pass: Could not find an item named '%s'\n" % ( self . arguments . item , ) ) sys . exit ( os . EX_DATAERR )
def _check_parameters ( self ) : """Internal function to verify the basic parameters of the SOM ."""
if self . _map_type != "planar" and self . _map_type != "toroid" : raise Exception ( "Invalid parameter for _map_type: " + self . _map_type ) if self . _grid_type != "rectangular" and self . _grid_type != "hexagonal" : raise Exception ( "Invalid parameter for _grid_type: " + self . _grid_type ) if self . _neighborhood != "gaussian" and self . _neighborhood != "bubble" : raise Exception ( "Invalid parameter for neighborhood: " + self . _neighborhood ) if self . _kernel_type != 0 and self . _kernel_type != 1 : raise Exception ( "Invalid parameter for kernelTye: " + self . _kernel_type ) if self . _verbose < 0 and self . _verbose > 2 : raise Exception ( "Invalid parameter for verbose: " + self . _kernel_type )
def estimate_descendents ( self ) : """Estimate from the descendent ( child ) tasks . : returns : deferred that when fired returns a datetime object for the estimated , or actual datetime , or None if there is no support for this task method . Currently the only supported methods here are " build " and " image " . : raises NoDescendentsError : If we expected to find descendents for this task , but there are none open . Possible explanations : * Koji has not assigned this task to a worker , because it ' s over capacity , or because it takes a few seconds to assign . You may see descendant tasks in FREE state here , instead of OPEN state . * The makeSRPMFromSCM descendent task for this build task is not yet complete . * The tagBuild descendent task for this build task is not yet complete . If you hit this NoDescendentsError , you may want to try again in a few minutes ."""
child_method = None if self . method == 'build' : child_method = 'buildArch' if self . method == 'image' : child_method = 'createImage' if child_method is None : defer . returnValue ( None ) # Find the open child task and estimate that . subtasks = yield self . descendents ( method = child_method , state = task_states . OPEN ) if not subtasks : raise NoDescendentsError ( 'no running %s for task %d' % ( child_method , self . id ) ) # Find subtask with the most recent start time : build_task = subtasks [ 0 ] for subtask in subtasks : if subtask . start_ts > build_task . start_ts : build_task = subtask subtask_completion = yield build_task . estimate_completion ( ) defer . returnValue ( subtask_completion )
def load_secret ( self , secret ) : """Ask YubiHSM to load a pre - existing YubiKey secret . The data is stored internally in the YubiHSM in temporary memory - this operation would typically be followed by one or more L { generate _ aead } commands to actually retreive the generated secret ( in encrypted form ) . @ param secret : YubiKey secret to load @ type secret : L { pyhsm . aead _ cmd . YHSM _ YubiKeySecret } or string @ returns : Number of bytes in YubiHSM internal buffer after load @ rtype : integer @ see : L { pyhsm . buffer _ cmd . YHSM _ Cmd _ Buffer _ Load }"""
if isinstance ( secret , pyhsm . aead_cmd . YHSM_YubiKeySecret ) : secret = secret . pack ( ) return pyhsm . buffer_cmd . YHSM_Cmd_Buffer_Load ( self . stick , secret ) . execute ( )
def erase_line ( self ) : # < ESC > [ 2K '''Erases the entire current line .'''
self . fill_region ( self . cur_r , 1 , self . cur_r , self . cols )
def parse ( self , tokens ) : """Sequence ( a ) - > b Applies the parser to a sequence of tokens producing a parsing result . It provides a way to invoke a parser hiding details related to the parser state . Also it makes error messages more readable by specifying the position of the rightmost token that has been reached ."""
try : ( tree , _ ) = self . run ( tokens , State ( ) ) return tree except NoParseError , e : max = e . state . max if len ( tokens ) > max : tok = tokens [ max ] else : tok = u'<EOF>' raise NoParseError ( u'%s: %s' % ( e . msg , tok ) , e . state )
def build_fasttext_cc_embedding_obj ( embedding_type ) : """FastText pre - trained word vectors for 157 languages , with 300 dimensions , trained on Common Crawl and Wikipedia . Released in 2018 , it succeesed the 2017 FastText Wikipedia embeddings . It ' s recommended to use the same tokenizer for your data that was used to construct the embeddings . This information and more can be find on their Website : https : / / fasttext . cc / docs / en / crawl - vectors . html . Args : embedding _ type : A string in the format ` fastext . cc . $ LANG _ CODE ` . e . g . ` fasttext . cc . de ` or ` fasttext . cc . es ` Returns : Object with the URL and filename used later on for downloading the file ."""
lang = embedding_type . split ( '.' ) [ 2 ] return { 'file' : 'cc.{}.300.vec.gz' . format ( lang ) , 'url' : 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{}.300.vec.gz' . format ( lang ) , 'extract' : False }
def hide_routemap_holder_route_map_content_set_tag_tag_rms ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) hide_routemap_holder = ET . SubElement ( config , "hide-routemap-holder" , xmlns = "urn:brocade.com:mgmt:brocade-ip-policy" ) route_map = ET . SubElement ( hide_routemap_holder , "route-map" ) name_key = ET . SubElement ( route_map , "name" ) name_key . text = kwargs . pop ( 'name' ) action_rm_key = ET . SubElement ( route_map , "action-rm" ) action_rm_key . text = kwargs . pop ( 'action_rm' ) instance_key = ET . SubElement ( route_map , "instance" ) instance_key . text = kwargs . pop ( 'instance' ) content = ET . SubElement ( route_map , "content" ) set = ET . SubElement ( content , "set" ) tag = ET . SubElement ( set , "tag" ) tag_rms = ET . SubElement ( tag , "tag-rms" ) tag_rms . text = kwargs . pop ( 'tag_rms' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def _live_receivers ( self , sender ) : """Filter sequence of receivers to get resolved , live receivers . This checks for weak references and resolves them , then returning only live receivers ."""
receivers = None if self . use_caching and not self . _dead_receivers : receivers = self . sender_receivers_cache . get ( sender ) # We could end up here with NO _ RECEIVERS even if we do check this case in # . send ( ) prior to calling _ live _ receivers ( ) due to concurrent . send ( ) call . if receivers is NO_RECEIVERS : return [ ] if receivers is None : with self . lock : self . _clear_dead_receivers ( ) senderkey = _make_id ( sender ) receivers = [ ] for ( receiverkey , r_senderkey ) , receiver in self . receivers : if r_senderkey == NONE_ID or r_senderkey == senderkey : receivers . append ( receiver ) if self . use_caching : if not receivers : self . sender_receivers_cache [ sender ] = NO_RECEIVERS else : # Note , we must cache the weakref versions . self . sender_receivers_cache [ sender ] = receivers non_weak_receivers = [ ] for receiver in receivers : if isinstance ( receiver , weakref . ReferenceType ) : # Dereference the weak reference . receiver = receiver ( ) if receiver is not None : non_weak_receivers . append ( receiver ) else : non_weak_receivers . append ( receiver ) return non_weak_receivers
def validate_functions ( ast : BELAst , bo ) : """Recursively validate function signatures Determine if function matches one of the available signatures . Also , 1 . Add entity types to AST NSArg , e . g . Abundance , . . . 2 . Add optional to AST Arg ( optional means it is not a fixed , required argument and needs to be sorted for canonicalization , e . g . reactants ( A , B , C ) ) Args : bo : bel object Returns : bel object"""
if isinstance ( ast , Function ) : log . debug ( f"Validating: {ast.name}, {ast.function_type}, {ast.args}" ) function_signatures = bo . spec [ "functions" ] [ "signatures" ] [ ast . name ] [ "signatures" ] function_name = ast . name ( valid_function , messages ) = check_function_args ( ast . args , function_signatures , function_name ) if not valid_function : message = ", " . join ( messages ) bo . validation_messages . append ( ( "ERROR" , "Invalid BEL Statement function {} - problem with function signatures: {}" . format ( ast . to_string ( ) , message ) , ) ) bo . parse_valid = False # Recursively process every NSArg by processing BELAst and Functions if hasattr ( ast , "args" ) : for arg in ast . args : validate_functions ( arg , bo ) return bo