signature
stringlengths
29
44.1k
implementation
stringlengths
0
85.2k
def set_min_string_length ( self , length = None ) : """stub"""
if self . get_min_string_length_metadata ( ) . is_read_only ( ) : raise NoAccess ( ) if not self . my_osid_object_form . _is_valid_cardinal ( length , self . get_min_string_length_metadata ( ) ) : raise InvalidArgument ( ) if self . my_osid_object_form . max_string_length is not None and length > self . my_osid_object_form . max_string_length - 1 : raise InvalidArgument ( ) self . my_osid_object_form . _my_map [ 'minStringLength' ] = length self . _min_string_length = length
def get_keys_for ( self , value ) : """Get keys for a given value . : param value : The value to look for : type value : object : return : The keys for the given value : rtype : list ( str )"""
if callable ( value ) : return value ( self ) hash_value = self . get_hash_for ( value ) return self . _index [ hash_value ] [ : ]
def _draw_ap_score ( self , score , label = None ) : """Helper function to draw the AP score annotation"""
label = label or "Avg Precision={:0.2f}" . format ( score ) if self . ap_score : self . ax . axhline ( y = score , color = "r" , ls = "--" , label = label )
async def connect ( self ) : """Create connection pool asynchronously ."""
self . pool = await aiomysql . create_pool ( loop = self . loop , db = self . database , connect_timeout = self . timeout , ** self . connect_params )
def get ( self , name , strict = True ) : """Get an attribute of the holder ( read - only access ) ."""
if not isinstance ( name , str ) or name . startswith ( '_' ) : raise AttributeError ( self . __class__ . __name__ , name ) elif strict and name not in self . _possible_attributes : raise AttributeError ( '%s is not a valid attribute of %r.' % ( name , self ) ) elif name in self . _attributes : return self . _attributes [ name ] else : raise exceptions . AttributeNotProvided ( name )
def register ( self , source_point_cloud , target_point_cloud , source_normal_cloud , target_normal_cloud , matcher , num_iterations = 1 , compute_total_cost = True , match_centroids = False , vis = False ) : """Iteratively register objects to one another . Parameters source _ point _ cloud : : obj : ` autolab _ core . PointCloud ` source object points target _ point _ cloud : : obj ` autolab _ core . PointCloud ` target object points source _ normal _ cloud : : obj : ` autolab _ core . NormalCloud ` source object outward - pointing normals target _ normal _ cloud : : obj : ` autolab _ core . NormalCloud ` target object outward - pointing normals matcher : : obj : ` PointToPlaneFeatureMatcher ` object to match the point sets num _ iterations : int the number of iterations to run compute _ total _ cost : bool whether or not to compute the total cost upon termination . match _ centroids : bool whether or not to match the centroids of the point clouds Returns : obj ` RegistrationResult ` results containing source to target transformation and cost"""
pass
def __getURL ( self , page = 1 , start_date = None , final_date = None , order = "asc" ) : """Get the API ' s URL to query to get data about users . : param page : number of the page . : param start _ date : start date of the range to search users ( Y - m - d ) . " param final _ date : final date of the range to search users ( Y - m - d ) . : param order : order of the query . Valid values are ' asc ' or ' desc ' . Default : asc : return : formatted URL . : rtype : str ."""
if not start_date or not final_date : url = self . __server + "search/users?client_id=" + self . __githubID + "&client_secret=" + self . __githubSecret + "&order=desc&q=sort:joined+type:user" + self . __urlLocations + self . __urlFilters + "&sort=joined&order=asc&per_page=100&page=" + str ( page ) else : url = self . __server + "search/users?client_id=" + self . __githubID + "&client_secret=" + self . __githubSecret + "&order=desc&q=sort:joined+type:user" + self . __urlLocations + self . __urlFilters + "+created:" + start_date + ".." + final_date + "&sort=joined&order=" + order + "&per_page=100&page=" + str ( page ) return url
def _response_item_to_object ( self , resp_item ) : """take json and make a resource out of it"""
item_cls = resources . get_model_class ( self . resource_type ) properties_dict = resp_item [ self . resource_type ] new_dict = helpers . remove_properties_containing_None ( properties_dict ) # raises exception if something goes wrong obj = item_cls ( new_dict ) return obj
def insert ( self , index , child , rel = None , type = None , media = None , condition = None , ** kwargs ) : '''Append a link to this container . : param child : a string indicating the location of the linked document : param rel : Specifies the relationship between the document and the linked document . If not given ` ` stylesheet ` ` is used . : param type : Specifies the content type of the linked document . If not given ` ` text / css ` ` is used . It an empty string is given , it won ' t be added . : param media : Specifies on what device the linked document will be displayed . If not given or ` ` all ` ` , the media is for all devices . : param kwargs : additional attributes'''
if child : srel = 'stylesheet' stype = 'text/css' minify = rel in ( None , srel ) and type in ( None , stype ) path = self . absolute_path ( child , minify = minify ) if path . endswith ( '.css' ) : rel = rel or srel type = type or stype value = Html ( 'link' , href = path , rel = rel , ** kwargs ) if type : value . attr ( 'type' , type ) if media not in ( None , 'all' ) : value . attr ( 'media' , media ) if condition : value = Html ( None , '<!--[if %s]>\n' % condition , value , '<![endif]-->\n' ) value = value . to_string ( ) if value not in self . children : if index is None : self . children . append ( value ) else : self . children . insert ( index , value )
def _toState ( self , state , * args , ** kwargs ) : """Transition to the next state . @ param state : Name of the next state ."""
try : method = getattr ( self , '_state_%s' % state ) except AttributeError : raise ValueError ( "No such state %r" % state ) log . msg ( "%s: to state %r" % ( self . __class__ . __name__ , state ) ) self . _state = state method ( * args , ** kwargs )
def find_discordant_snps ( self , individual1 , individual2 , individual3 = None , save_output = False ) : """Find discordant SNPs between two or three individuals . Parameters individual1 : Individual reference individual ( child if ` individual2 ` and ` individual3 ` are parents ) individual2 : Individual comparison individual individual3 : Individual other parent if ` individual1 ` is child and ` individual2 ` is a parent save _ output : bool specifies whether to save output to a CSV file in the output directory Returns pandas . DataFrame discordant SNPs and associated genetic data References . . [ 1 ] David Pike , " Search for Discordant SNPs in Parent - Child Raw Data Files , " David Pike ' s Utilities , http : / / www . math . mun . ca / ~ dapike / FF23utils / pair - discord . php . . [ 2 ] David Pike , " Search for Discordant SNPs when given data for child and both parents , " David Pike ' s Utilities , http : / / www . math . mun . ca / ~ dapike / FF23utils / trio - discord . php"""
self . _remap_snps_to_GRCh37 ( [ individual1 , individual2 , individual3 ] ) df = individual1 . snps # remove nulls for reference individual df = df . loc [ df [ "genotype" ] . notnull ( ) ] # add SNPs shared with ` individual2 ` df = df . join ( individual2 . snps [ "genotype" ] , rsuffix = "2" ) genotype1 = "genotype_" + individual1 . get_var_name ( ) genotype2 = "genotype_" + individual2 . get_var_name ( ) if individual3 is None : df = df . rename ( columns = { "genotype" : genotype1 , "genotype2" : genotype2 } ) # find discordant SNPs between reference and comparison individuals df = df . loc [ df [ genotype2 ] . notnull ( ) & ( ( df [ genotype1 ] . str . len ( ) == 1 ) & ( df [ genotype2 ] . str . len ( ) == 1 ) & ( df [ genotype1 ] != df [ genotype2 ] ) ) | ( ( df [ genotype1 ] . str . len ( ) == 2 ) & ( df [ genotype2 ] . str . len ( ) == 2 ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype2 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype2 ] . str [ 1 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype2 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype2 ] . str [ 1 ] ) ) ] if save_output : save_df_as_csv ( df , self . _output_dir , "discordant_snps_" + individual1 . get_var_name ( ) + "_" + individual2 . get_var_name ( ) + "_GRCh37.csv" , ) else : # add SNPs shared with ` individual3 ` df = df . join ( individual3 . snps [ "genotype" ] , rsuffix = "3" ) genotype3 = "genotype_" + individual3 . get_var_name ( ) df = df . rename ( columns = { "genotype" : genotype1 , "genotype2" : genotype2 , "genotype3" : genotype3 , } ) # find discordant SNPs between child and two parents df = df . loc [ ( df [ genotype2 ] . notnull ( ) & ( ( df [ genotype1 ] . str . len ( ) == 1 ) & ( df [ genotype2 ] . str . len ( ) == 1 ) & ( df [ genotype1 ] != df [ genotype2 ] ) ) | ( ( df [ genotype1 ] . str . len ( ) == 2 ) & ( df [ genotype2 ] . str . len ( ) == 2 ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype2 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype2 ] . str [ 1 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype2 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype2 ] . str [ 1 ] ) ) ) | ( df [ genotype3 ] . notnull ( ) & ( ( df [ genotype1 ] . str . len ( ) == 1 ) & ( df [ genotype3 ] . str . len ( ) == 1 ) & ( df [ genotype1 ] != df [ genotype3 ] ) ) | ( ( df [ genotype1 ] . str . len ( ) == 2 ) & ( df [ genotype3 ] . str . len ( ) == 2 ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype3 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 0 ] != df [ genotype3 ] . str [ 1 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype3 ] . str [ 0 ] ) & ( df [ genotype1 ] . str [ 1 ] != df [ genotype3 ] . str [ 1 ] ) ) ) | ( df [ genotype2 ] . notnull ( ) & df [ genotype3 ] . notnull ( ) & ( df [ genotype2 ] . str . len ( ) == 2 ) & ( df [ genotype2 ] . str [ 0 ] == df [ genotype2 ] . str [ 1 ] ) & ( df [ genotype2 ] == df [ genotype3 ] ) & ( df [ genotype1 ] != df [ genotype2 ] ) ) ] if save_output : save_df_as_csv ( df , self . _output_dir , "discordant_snps_" + individual1 . get_var_name ( ) + "_" + individual2 . get_var_name ( ) + "_" + individual3 . get_var_name ( ) + "_GRCh37.csv" , ) return df
def validate ( data = None , schema_id = None , filepath = None , root = None , definition = None , specs = None , validation_function = None , validation_error_handler = None ) : """This method is available to use YAML swagger definitions file or specs ( dict or object ) to validate data against its jsonschema . example : validate ( { " item " : 1 } , ' item _ schema ' , ' defs . yml ' , root = _ _ file _ _ ) validate ( request . json , ' User ' , specs = { ' definitions ' : { ' User ' : . . . } } ) : param data : data to validate , by defaull is request . json : param schema _ id : The definition id to use to validate ( from specs ) : param filepath : definition filepath to load specs : param root : root folder ( inferred if not provided ) , unused if path starts with ` / ` : param definition : Alias to schema _ id ( kept for backwards compatibility ) : param specs : load definitions from dict or object passed here instead of a file . : param validation _ function : custom validation function which takes the positional arguments : data to be validated at first and schema to validate against at second : param validation _ error _ handler : custom function to handle exceptions thrown when validating which takes the exception thrown as the first , the data being validated as the second and the schema being used to validate as the third argument"""
schema_id = schema_id or definition # for backwards compatibility with function signature if filepath is None and specs is None : abort ( Response ( 'Filepath or specs is needed to validate' , status = 500 ) ) if data is None : data = request . json # defaults elif callable ( data ) : # data = lambda : request . json data = data ( ) if not data : abort ( Response ( 'No data to validate' , status = 500 ) ) # not used anymore but kept to reuse with marshmallow endpoint = request . endpoint . lower ( ) . replace ( '.' , '_' ) verb = request . method . lower ( ) if filepath is not None : if not root : try : frame_info = inspect . stack ( ) [ 1 ] root = os . path . dirname ( os . path . abspath ( frame_info [ 1 ] ) ) except Exception : root = None else : root = os . path . dirname ( root ) if not filepath . startswith ( '/' ) : final_filepath = os . path . join ( root , filepath ) else : final_filepath = filepath full_doc = load_from_file ( final_filepath ) yaml_start = full_doc . find ( '---' ) swag = yaml . load ( full_doc [ yaml_start if yaml_start >= 0 else 0 : ] ) else : swag = copy . deepcopy ( specs ) params = [ item for item in swag . get ( 'parameters' , [ ] ) if item . get ( 'schema' ) ] definitions = { } main_def = { } raw_definitions = extract_definitions ( params , endpoint = endpoint , verb = verb ) if schema_id is None : for param in params : if param . get ( 'in' ) == 'body' : schema_id = param . get ( 'schema' , { } ) . get ( '$ref' ) if schema_id : schema_id = schema_id . split ( '/' ) [ - 1 ] break # consider only the first if schema_id is None : # if it is still none use first raw _ definition extracted if raw_definitions : schema_id = raw_definitions [ 0 ] . get ( 'id' ) for defi in raw_definitions : if defi [ 'id' ] . lower ( ) == schema_id . lower ( ) : main_def = defi . copy ( ) else : definitions [ defi [ 'id' ] ] = defi # support definitions informed in dict if schema_id in swag . get ( 'definitions' , { } ) : main_def = swag . get ( 'definitions' , { } ) . get ( schema_id ) main_def [ 'definitions' ] = definitions for key , value in definitions . items ( ) : if 'id' in value : del value [ 'id' ] if validation_function is None : validation_function = jsonschema . validate try : validation_function ( data , main_def ) except Exception as err : if validation_error_handler is not None : validation_error_handler ( err , data , main_def ) else : abort ( Response ( str ( err ) , status = 400 ) )
def get ( self , sid ) : """Constructs a AssignedAddOnExtensionContext : param sid : The unique string that identifies the resource : returns : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . assigned _ add _ on _ extension . AssignedAddOnExtensionContext : rtype : twilio . rest . api . v2010 . account . incoming _ phone _ number . assigned _ add _ on . assigned _ add _ on _ extension . AssignedAddOnExtensionContext"""
return AssignedAddOnExtensionContext ( self . _version , account_sid = self . _solution [ 'account_sid' ] , resource_sid = self . _solution [ 'resource_sid' ] , assigned_add_on_sid = self . _solution [ 'assigned_add_on_sid' ] , sid = sid , )
def put ( self , key , data , ttl_secs = None ) : """Like : meth : ` ~ simplekv . KeyValueStore . put ` , but with an additional parameter : : param ttl _ secs : Number of seconds until the key expires . See above for valid values . : raises exceptions . ValueError : If ` ` ttl _ secs ` ` is invalid . : raises exceptions . IOError : If storing failed or the file could not be read"""
self . _check_valid_key ( key ) if not isinstance ( data , bytes ) : raise IOError ( "Provided data is not of type bytes" ) return self . _put ( key , data , self . _valid_ttl ( ttl_secs ) )
def mutateString ( original , n , replacements = 'acgt' ) : """Mutate C { original } in C { n } places with chars chosen from C { replacements } . @ param original : The original C { str } to mutate . @ param n : The C { int } number of locations to mutate . @ param replacements : The C { str } of replacement letters . @ return : A new C { str } with C { n } places of C { original } mutated . @ raises ValueError : if C { n } is too high , or C { replacement } contains duplicates , or if no replacement can be made at a certain locus because C { replacements } is of length one , or if C { original } is of zero length ."""
if not original : raise ValueError ( 'Empty original string passed.' ) if n > len ( original ) : raise ValueError ( 'Cannot make %d mutations in a string of length %d' % ( n , len ( original ) ) ) if len ( replacements ) != len ( set ( replacements ) ) : raise ValueError ( 'Replacement string contains duplicates' ) if len ( replacements ) == 1 and original . find ( replacements ) != - 1 : raise ValueError ( 'Impossible replacement' ) result = list ( original ) length = len ( original ) for offset in range ( length ) : if uniform ( 0.0 , 1.0 ) < float ( n ) / ( length - offset ) : # Mutate . while True : new = choice ( replacements ) if new != result [ offset ] : result [ offset ] = new break n -= 1 if n == 0 : break return '' . join ( result )
async def send_message ( self , request : str , response_expected : bool , ** kwargs : Any ) -> Response : """Transport the message to the server and return the response . Args : request : The JSON - RPC request string . response _ expected : Whether the request expects a response . Returns : A Response object ."""
with async_timeout . timeout ( self . timeout ) : async with self . session . post ( self . endpoint , data = request , ssl = self . ssl ) as response : response_text = await response . text ( ) return Response ( response_text , raw = response )
def collect ( self ) : """Collects all metrics attached to this metricset , and returns it as a list , together with a timestamp in microsecond precision . The format of the return value should be " samples " : { " metric . name " : { " value " : some _ float } , . . . } , " timestamp " : unix epoch in microsecond precision"""
samples = { } if self . _counters : samples . update ( { label : { "value" : c . val } for label , c in compat . iteritems ( self . _counters ) if c is not noop_metric } ) if self . _gauges : samples . update ( { label : { "value" : g . val } for label , g in compat . iteritems ( self . _gauges ) if g is not noop_metric } ) if samples : return { "samples" : samples , "timestamp" : int ( time . time ( ) * 1000000 ) }
def _set_id_field ( new_class ) : """Lookup the id field for this entity and assign"""
# FIXME What does it mean when there are no declared fields ? # Does it translate to an abstract entity ? if new_class . meta_ . declared_fields : try : new_class . meta_ . id_field = next ( field for _ , field in new_class . meta_ . declared_fields . items ( ) if field . identifier ) except StopIteration : # If no id field is declared then create one new_class . _create_id_field ( )
def delete ( self , commit = True ) : """Delete model from database"""
db . session . delete ( self ) return commit and db . session . commit ( )
def get_upload_form ( self ) : """Construct form for accepting file upload ."""
return self . form_class ( self . request . POST , self . request . FILES )
def old_encode_aes ( key , plaintext ) : """Utility method to encode some given plaintext with the given key . Important thing to note : This is not a general purpose encryption method - it has specific semantics ( see below for details ) . Takes the given key , pads it to 32 bytes . Then takes the given plaintext and pads that to a 32 byte block size . Then encrypts using AES - 256 - CBC using a random IV . Then converts both the IV and the ciphertext to hex . Finally returns the IV appended by the ciphertext . : param key : string , < = 32 bytes long : param plaintext : string , any amount of data"""
# generate 16 cryptographically secure random bytes for our IV ( initial value ) iv = os . urandom ( 16 ) # set up an AES cipher object cipher = AES . new ( ensure_bytes ( old_pad ( key ) ) , mode = AES . MODE_CBC , IV = iv ) # encrypte the plaintext after padding it ciphertext = cipher . encrypt ( ensure_bytes ( old_pad ( plaintext ) ) ) # append the hexed IV and the hexed ciphertext iv_plus_encrypted = binascii . hexlify ( iv ) + binascii . hexlify ( ciphertext ) # return that return iv_plus_encrypted
def author_to_dict ( obj ) : """Who needs a switch / case statement when you can instead use this easy to comprehend drivel ?"""
def default ( ) : raise RuntimeError ( "unsupported type {t}" . format ( t = type ( obj ) . __name__ ) ) # a more pythonic way to handle this would be several try blocks to catch # missing attributes return { # GitAuthor has name , email , date properties 'GitAuthor' : lambda x : { 'name' : x . name , 'email' : x . email } , # InputGitAuthor only has _ identity , which returns a dict # XXX consider trying to rationalize this upstream . . . 'InputGitAuthor' : lambda x : x . _identity , } . get ( type ( obj ) . __name__ , lambda x : default ( ) ) ( obj )
def deltas ( last , now ) : """Return the change in counter values ( accounting for wrap - around ) ."""
return { xy : RouterDiagnostics ( * ( ( n - l ) & 0xFFFFFFFF for l , n in zip ( last [ xy ] , now [ xy ] ) ) ) for xy in last }
def remove_parameter ( self , name ) : """Remove the specified parameter from this query : param name : name of a parameter to remove : return : None"""
if name in self . __query : self . __query . pop ( name )
def CompleteTransaction ( self , dumpXml = None ) : """Completes a transaction . This method completes a transaction by sending the final configuration ( modification queries stored in configMap ) to UCS and returns the result ."""
from Ucs import ConfigMap , Pair from UcsBase import ManagedObject , WriteUcsWarning , WriteObject , UcsException self . _transactionInProgress = False ccm = self . ConfigConfMos ( self . _configMap , YesOrNo . FALSE , dumpXml ) self . _configMap = ConfigMap ( ) if ccm . errorCode == 0 : moList = [ ] for child in ccm . OutConfigs . GetChild ( ) : if ( isinstance ( child , Pair ) == True ) : for mo in child . GetChild ( ) : moList . append ( mo ) elif ( isinstance ( child , ManagedObject ) == True ) : moList . append ( child ) # WriteObject ( moList ) return moList else : # raise Exception ( ' [ Error ] : CompleteTransaction [ Code ] : ' + ccm . errorCode + ' [ Description ] : ' + ccm . errorDescr ) raise UcsException ( ccm . errorCode , ccm . errorDescr )
def set_block_entity_data ( self , pos_or_x , y = None , z = None , data = None ) : """Update block entity data . Returns : Old data if block entity data was already stored for that location , None otherwise ."""
if None not in ( y , z ) : # x y z supplied pos_or_x = pos_or_x , y , z coord_tuple = tuple ( int ( floor ( c ) ) for c in pos_or_x ) old_data = self . block_entities . get ( coord_tuple , None ) self . block_entities [ coord_tuple ] = data return old_data
def source ( uri , consts ) : '''read gl code'''
with open ( uri , 'r' ) as fp : content = fp . read ( ) # feed constant values for key , value in consts . items ( ) : content = content . replace ( f"%%{key}%%" , str ( value ) ) return content
def find_window ( className = None , windowName = None ) : """Find the first top - level window in the current desktop to match the given class name and / or window name . If neither are provided any top - level window will match . @ see : L { get _ window _ at } @ type className : str @ param className : ( Optional ) Class name of the window to find . If C { None } or not used any class name will match the search . @ type windowName : str @ param windowName : ( Optional ) Caption text of the window to find . If C { None } or not used any caption text will match the search . @ rtype : L { Window } or None @ return : A window that matches the request . There may be more matching windows , but this method only returns one . If no matching window is found , the return value is C { None } . @ raise WindowsError : An error occured while processing this request ."""
# I ' d love to reverse the order of the parameters # but that might create some confusion . : ( hWnd = win32 . FindWindow ( className , windowName ) if hWnd : return Window ( hWnd )
def get_language_tabs ( self , request , obj , available_languages , css_class = None ) : """Determine the language tabs to show ."""
current_language = self . get_form_language ( request , obj ) return get_language_tabs ( request , current_language , available_languages , css_class = css_class )
def posterior_samples_f ( self , X , size = 10 , ** predict_kwargs ) : """Samples the posterior GP at the points X . : param X : The points at which to take the samples . : type X : np . ndarray ( Nnew x self . input _ dim ) : param size : the number of a posteriori samples . : type size : int . : returns : set of simulations : rtype : np . ndarray ( Nnew x D x samples )"""
predict_kwargs [ "full_cov" ] = True # Always use the full covariance for posterior samples . m , v = self . _raw_predict ( X , ** predict_kwargs ) if self . normalizer is not None : m , v = self . normalizer . inverse_mean ( m ) , self . normalizer . inverse_variance ( v ) def sim_one_dim ( m , v ) : return np . random . multivariate_normal ( m , v , size ) . T if self . output_dim == 1 : return sim_one_dim ( m . flatten ( ) , v ) [ : , np . newaxis , : ] else : fsim = np . empty ( ( X . shape [ 0 ] , self . output_dim , size ) ) for d in range ( self . output_dim ) : if v . ndim == 3 : fsim [ : , d , : ] = sim_one_dim ( m [ : , d ] , v [ : , : , d ] ) else : fsim [ : , d , : ] = sim_one_dim ( m [ : , d ] , v ) return fsim
def police_priority_map_exceed_map_pri6_exceed ( self , ** kwargs ) : """Auto Generated Code"""
config = ET . Element ( "config" ) police_priority_map = ET . SubElement ( config , "police-priority-map" , xmlns = "urn:brocade.com:mgmt:brocade-policer" ) name_key = ET . SubElement ( police_priority_map , "name" ) name_key . text = kwargs . pop ( 'name' ) exceed = ET . SubElement ( police_priority_map , "exceed" ) map_pri6_exceed = ET . SubElement ( exceed , "map-pri6-exceed" ) map_pri6_exceed . text = kwargs . pop ( 'map_pri6_exceed' ) callback = kwargs . pop ( 'callback' , self . _callback ) return callback ( config )
def display_waypoints ( wploader , map ) : '''display the waypoints'''
mission_list = wploader . view_list ( ) polygons = wploader . polygon_list ( ) map . add_object ( mp_slipmap . SlipClearLayer ( 'Mission' ) ) for i in range ( len ( polygons ) ) : p = polygons [ i ] if len ( p ) > 1 : map . add_object ( mp_slipmap . SlipPolygon ( 'mission %u' % i , p , layer = 'Mission' , linewidth = 2 , colour = ( 255 , 255 , 255 ) ) ) labeled_wps = { } for i in range ( len ( mission_list ) ) : next_list = mission_list [ i ] for j in range ( len ( next_list ) ) : # label already printed for this wp ? if ( next_list [ j ] not in labeled_wps ) : map . add_object ( mp_slipmap . SlipLabel ( 'miss_cmd %u/%u' % ( i , j ) , polygons [ i ] [ j ] , str ( next_list [ j ] ) , 'Mission' , colour = ( 0 , 255 , 255 ) ) ) labeled_wps [ next_list [ j ] ] = ( i , j )
def overlap_tokens ( doc , other_doc ) : """Get the tokens from the original Doc that are also in the comparison Doc ."""
overlap = [ ] other_tokens = [ token . text for token in other_doc ] for token in doc : if token . text in other_tokens : overlap . append ( token ) return overlap
def create ( self , request , project ) : """Add a new relation between a job and a bug ."""
job_id = int ( request . data [ 'job_id' ] ) bug_id = int ( request . data [ 'bug_id' ] ) try : BugJobMap . create ( job_id = job_id , bug_id = bug_id , user = request . user , ) message = "Bug job map saved" except IntegrityError : message = "Bug job map skipped: mapping already exists" return Response ( { "message" : message } )
def envelope ( self ) : """Returns the address of the envelope sender address ( SMTP from , if not set the sender , if this one isn ' t set too , the author ) ."""
if not self . sender and not self . author : raise ValueError ( "Unable to determine message sender; no author or sender defined." ) return self . sender or self . author [ 0 ]
def sub_dirs ( path , invisible = False ) : """Child directories ( non - recursive )"""
dirs = [ x for x in os . listdir ( path ) if os . path . isdir ( os . path . join ( path , x ) ) ] if not invisible : dirs = [ x for x in dirs if not x . startswith ( '.' ) ] return dirs
def _load_github_hooks ( github_url = 'https://api.github.com' ) : """Request GitHub ' s IP block from their API . Return the IP network . If we detect a rate - limit error , raise an error message stating when the rate limit will reset . If something else goes wrong , raise a generic 503."""
try : resp = requests . get ( github_url + '/meta' ) if resp . status_code == 200 : return resp . json ( ) [ 'hooks' ] else : if resp . headers . get ( 'X-RateLimit-Remaining' ) == '0' : reset_ts = int ( resp . headers [ 'X-RateLimit-Reset' ] ) reset_string = time . strftime ( '%a, %d %b %Y %H:%M:%S GMT' , time . gmtime ( reset_ts ) ) raise ServiceUnavailable ( 'Rate limited from GitHub until ' + reset_string ) else : raise ServiceUnavailable ( 'Error reaching GitHub' ) except ( KeyError , ValueError , requests . exceptions . ConnectionError ) : raise ServiceUnavailable ( 'Error reaching GitHub' )
def get_access_token ( self , code ) : """Gets the access token for the app given the code Parameters : - code - the response code"""
payload = { 'redirect_uri' : self . redirect_uri , 'code' : code , 'grant_type' : 'authorization_code' } headers = self . _make_authorization_headers ( ) response = requests . post ( self . OAUTH_TOKEN_URL , data = payload , headers = headers , verify = LOGIN_VERIFY_SSL_CERT ) if response . status_code is not 200 : raise MercedesMeAuthError ( response . reason ) token_info = response . json ( ) token_info = self . _add_custom_values_to_token_info ( token_info ) self . _save_token_info ( token_info ) return token_info
def export_true_table ( ) : """Export value , checker function output true table . Help to organize thought . klass . _ _ dict _ _ 指的是在类定义中定义的 , 从父类继承而来的不在此中 。"""
tester_list = [ ( "inspect.isroutine" , lambda v : inspect . isroutine ( v ) ) , ( "inspect.isfunction" , lambda v : inspect . isfunction ( v ) ) , ( "inspect.ismethod" , lambda v : inspect . ismethod ( v ) ) , ( "isinstance.property" , lambda v : isinstance ( v , property ) ) , ( "isinstance.staticmethod" , lambda v : isinstance ( v , staticmethod ) ) , ( "isinstance.classmethod" , lambda v : isinstance ( v , classmethod ) ) , ] class_attr_value_paris = [ ( "attribute" , MyClass . attribute ) , ( "property_method" , MyClass . property_method ) , ( "regular_method" , MyClass . regular_method ) , ( "static_method" , MyClass . static_method ) , ( "class_method" , MyClass . class_method ) , ( "__dict__['static_method']" , Base . __dict__ [ "static_method" ] ) , ( "__dict__['class_method']" , Base . __dict__ [ "class_method" ] ) , ] myclass = MyClass ( ) instance_attr_value_paris = [ ( "attribute" , myclass . attribute ) , ( "property_method" , myclass . property_method ) , ( "regular_method" , myclass . regular_method ) , ( "static_method" , myclass . static_method ) , ( "class_method" , MyClass . class_method ) , # ( " _ _ dict _ _ [ ' static _ method ' ] " , myclass . _ _ dict _ _ [ " static _ method " ] ) , # ( " _ _ dict _ _ [ ' class _ method ' ] " , myclass . _ _ dict _ _ [ " class _ method " ] ) , ] print ( inspect . getargspec ( MyClass . regular_method ) ) print ( inspect . getargspec ( MyClass . static_method ) ) print ( inspect . getargspec ( MyClass . class_method ) ) print ( inspect . getargspec ( myclass . regular_method ) ) print ( inspect . getargspec ( myclass . static_method ) ) print ( inspect . getargspec ( myclass . class_method ) ) # index是方法名 , column是属性名 def create_true_table_dataframe ( index_tester , column_attr ) : df = pd . DataFrame ( ) for attr , value in column_attr : col = list ( ) for name , tester in index_tester : try : if tester ( value ) : flag = 1 else : flag = 0 except : flag = - 99 col . append ( flag ) df [ attr ] = col df . index = [ name for name , _ in index_tester ] return df version = "%s.%s" % ( sys . version_info . major , sys . version_info . minor ) df = create_true_table_dataframe ( tester_list , class_attr_value_paris ) df . to_csv ( "%s_class.csv" % version , index = True ) df = create_true_table_dataframe ( tester_list , instance_attr_value_paris ) df . to_csv ( "%s_instance.csv" % version , index = True )
def run ( self , sqlTail = '' , raw = False ) : """Compile filters and run the query and returns the entire result . You can use sqlTail to add things such as order by . If raw , returns the raw tuple data ( not wrapped into a raba object )"""
sql , sqlValues = self . getSQLQuery ( ) cur = self . con . execute ( '%s %s' % ( sql , sqlTail ) , sqlValues ) res = [ ] for v in cur : if not raw : res . append ( RabaPupa ( self . rabaClass , v [ 0 ] ) ) else : return v return res
def get_session ( ) : """Gets a session . If there ' s no yet , creates one . : returns : a session"""
if hasattr ( g , 'session' ) : return g . session sess = create_session ( bind = current_app . config [ 'DATABASE_ENGINE' ] ) try : g . session = sess except RuntimeError : pass return sess
def get_season_stats ( self , season_key ) : """Calling Season Stats API . Arg : season _ key : key of the season Return : json data"""
season_stats_url = self . api_path + "season/" + season_key + "/stats/" response = self . get_response ( season_stats_url ) return response
def list_endpoint_groups ( self , retrieve_all = True , ** _params ) : """Fetches a list of all VPN endpoint groups for a project ."""
return self . list ( 'endpoint_groups' , self . endpoint_groups_path , retrieve_all , ** _params )
def update_authoring_nodes ( self , editor ) : """Updates given editor Model authoring nodes . : param editor : Editor . : type editor : Editor : return : Method success . : rtype : bool"""
editor_node = foundations . common . get_first_item ( self . get_editor_nodes ( editor ) ) file_node = editor_node . parent file = editor . file file_node . name = editor_node . name = os . path . basename ( file ) file_node . path = editor_node . path = file self . node_changed ( file_node ) return True
def get_quality ( self , qual_idx = None ) : """Get the signal qualifier , using shortcuts or combobox ."""
if self . annot is None : # remove if buttons are disabled msg = 'No score file loaded' error_dialog = QErrorMessage ( ) error_dialog . setWindowTitle ( 'Error getting quality' ) error_dialog . showMessage ( msg ) error_dialog . exec ( ) lg . debug ( msg ) return window_start = self . parent . value ( 'window_start' ) window_length = self . parent . value ( 'window_length' ) try : self . annot . set_stage_for_epoch ( window_start , QUALIFIERS [ qual_idx ] , attr = 'quality' ) except KeyError : msg = ( 'The start of the window does not correspond to any epoch ' + 'in sleep scoring file' ) error_dialog = QErrorMessage ( ) error_dialog . setWindowTitle ( 'Error getting quality' ) error_dialog . showMessage ( msg ) error_dialog . exec ( ) lg . debug ( msg ) else : lg . debug ( 'User staged ' + str ( window_start ) + ' as ' + QUALIFIERS [ qual_idx ] ) self . set_quality_index ( ) self . parent . overview . mark_quality ( window_start , window_length , QUALIFIERS [ qual_idx ] ) self . display_stats ( ) self . parent . traces . page_next ( )
def _extend_module_definitions ( self , graph ) : """Using collected module definitions extend linkages"""
for mod_id in self . _modules : mod_identity = self . _get_triplet_value ( graph , URIRef ( mod_id ) , SBOL . module ) modules = [ ] for mod in graph . triples ( ( mod_identity , SBOL . module , None ) ) : md = self . _get_rdf_identified ( graph , mod [ 2 ] ) definition_id = self . _get_triplet_value ( graph , mod [ 2 ] , SBOL . definition ) md [ 'definition' ] = self . _modules [ definition_id ] maps_to = [ ] for m in graph . triples ( ( mod [ 2 ] , SBOL . mapsTo , None ) ) : mt = self . _get_rdf_identified ( graph , m [ 2 ] ) mt [ 'refinement' ] = self . _get_triplet_value ( graph , m [ 2 ] , SBOL . refinement ) local_id = self . _get_triplet_value ( graph , m [ 2 ] , SBOL . local ) remote_id = self . _get_triplet_value ( graph , m [ 2 ] , SBOL . remote ) mt [ 'local' ] = self . _functional_component_store [ local_id ] mt [ 'remote' ] = self . _functional_component_store [ remote_id ] maps_to . append ( MapsTo ( ** mt ) ) modules . append ( Module ( maps_to = maps_to , ** md ) ) self . _modules [ mod_id ] . modules = modules
def check_for_uncombined_files ( self ) : """Go through working directory and check for uncombined files . ( I . e . , location1 _ specimens . txt and location2 _ specimens . txt but no specimens . txt . ) Show a warning if uncombined files are found . Return True if no uncombined files are found OR user elects to continue anyway ."""
wd_files = os . listdir ( self . WD ) if self . data_model_num == 2 : ftypes = [ 'er_specimens.txt' , 'er_samples.txt' , 'er_sites.txt' , 'er_locations.txt' , 'pmag_specimens.txt' , 'pmag_samples.txt' , 'pmag_sites.txt' , 'rmag_specimens.txt' , 'rmag_results.txt' , 'rmag_anisotropy.txt' ] else : ftypes = [ 'specimens.txt' , 'samples.txt' , 'sites.txt' , 'locations.txt' ] uncombined = set ( ) for ftype in ftypes : if ftype not in wd_files : for f in wd_files : if f . endswith ( '_' + ftype ) : uncombined . add ( ftype ) if uncombined : msg = 'It looks like you may have uncombined files of type(s) {} in your working directory.\nYou may want to go back to Step 1 and finish combining all files.\nIf you continue, the program will try to extract as much information as possible from your measurement file.' . format ( ", " . join ( list ( uncombined ) ) ) dlg = pw . ChooseOne ( self , 'Continue anyway' , 'Go back' , msg , title = "Warning!" ) res = dlg . ShowModal ( ) if res == wx . ID_NO : return return True
def _gen_delta_depend ( self , path , derivative , multiplier , prettyname , device ) : """For some metrics we need to divide the delta for one metric with the delta of another . Publishes a metric if the convertion goes well ."""
primary_delta = derivative [ path ] shortpath = "." . join ( path . split ( "." ) [ : - 1 ] ) basename = path . split ( "." ) [ - 1 ] secondary_delta = None if basename in self . DIVIDERS . keys ( ) : mateKey = "." . join ( [ shortpath , self . DIVIDERS [ basename ] ] ) else : return if mateKey in derivative . keys ( ) : secondary_delta = derivative [ mateKey ] else : return # If we find a corresponding secondary _ delta , publish a metric if primary_delta > 0 and secondary_delta > 0 : value = ( float ( primary_delta ) / secondary_delta ) * multiplier self . _replace_and_publish ( path , prettyname , value , device )
def cashdraw ( self , pin ) : """Send pulse to kick the cash drawer"""
if pin == 2 : self . _raw ( CD_KICK_2 ) elif pin == 5 : self . _raw ( CD_KICK_5 ) else : raise CashDrawerError ( )
def to_yaml ( cls , config , compact = False , indent = 2 , level = 0 ) : """Convert HOCON input into a YAML output : return : YAML string representation : type return : basestring"""
lines = "" if isinstance ( config , ConfigTree ) : if len ( config ) > 0 : if level > 0 : lines += '\n' bet_lines = [ ] for key , item in config . items ( ) : bet_lines . append ( '{indent}{key}: {value}' . format ( indent = '' . rjust ( level * indent , ' ' ) , key = key . strip ( '"' ) , # for dotted keys enclosed with " " to not be interpreted as nested key , value = cls . to_yaml ( item , compact , indent , level + 1 ) ) ) lines += '\n' . join ( bet_lines ) elif isinstance ( config , list ) : config_list = [ line for line in config if line is not None ] if len ( config_list ) == 0 : lines += '[]' else : lines += '\n' bet_lines = [ ] for item in config_list : bet_lines . append ( '{indent}- {value}' . format ( indent = '' . rjust ( level * indent , ' ' ) , value = cls . to_yaml ( item , compact , indent , level + 1 ) ) ) lines += '\n' . join ( bet_lines ) elif isinstance ( config , basestring ) : # if it contains a \ n then it ' s multiline lines = config . split ( '\n' ) if len ( lines ) == 1 : lines = config else : lines = '|\n' + '\n' . join ( [ line . rjust ( level * indent , ' ' ) for line in lines ] ) elif config is None or isinstance ( config , NoneValue ) : lines = 'null' elif config is True : lines = 'true' elif config is False : lines = 'false' else : lines = str ( config ) return lines
def deserialize ( self , obj = None , ignore_non_existing = False ) : """: type obj dict | None : type ignore _ non _ existing bool"""
if not isinstance ( obj , dict ) : if ignore_non_existing : return raise TypeError ( "Wrong data '{}' passed for '{}' deserialization" . format ( obj , self . __class__ . __name__ ) ) definitions = { k : v for k , v in self . __class__ . __dict__ . items ( ) if k [ : 1 ] != "_" } def_property_keys = set ( definitions . keys ( ) ) property_keys = set ( obj . keys ( ) ) existing_keys = def_property_keys & property_keys non_defined_keys = property_keys - def_property_keys non_existing_keys = def_property_keys - property_keys if not ignore_non_existing and non_defined_keys : raise TypeError ( self . __class__ . __name__ + " doesn't contain properties: {}" . format ( ", " . join ( non_defined_keys ) ) ) for k in existing_keys : v = obj [ k ] attr_type = definitions [ k ] try : if isinstance ( attr_type , list ) and self . _isclass ( attr_type [ 0 ] , BaseView ) : if isinstance ( v , list ) : obj_list = [ attr_type [ 0 ] ( serialized_obj = v_item , ignore_non_existing = ignore_non_existing ) for v_item in v ] else : obj_list = [ attr_type [ 0 ] ( serialized_obj = v , ignore_non_existing = ignore_non_existing ) ] self . __setattr__ ( k , obj_list ) elif self . _isclass ( attr_type , BaseView ) : self . __setattr__ ( k , attr_type ( v ) ) else : self . __setattr__ ( k , v ) except IndexError : self . __setattr__ ( k , v ) # check test _ empty _ view _ deserialization test suite for test - case for k in non_existing_keys : attr_type = definitions [ k ] if attr_type is None : self . __setattr__ ( k , None ) elif isinstance ( attr_type , ( list , set , tuple , dict ) ) and len ( attr_type ) == 0 : self . __setattr__ ( k , attr_type . __class__ ( ) ) elif isinstance ( attr_type , ( list , set , tuple ) ) and self . _isclass ( attr_type [ 0 ] , BaseView ) : self . __setattr__ ( k , attr_type . __class__ ( ) ) else : self . __setattr__ ( k , attr_type . __class__ ( attr_type ) ) return self
def gen_checkbox_view ( sig_dic ) : '''for checkbox'''
view_zuoxiang = ''' <div class="col-sm-4"><span class="des">{0}</span></div> <div class="col-sm-8"> ''' . format ( sig_dic [ 'zh' ] ) dic_tmp = sig_dic [ 'dic' ] for key in dic_tmp . keys ( ) : tmp_str = ''' <span> {{% if "{0}" in postinfo.extinfo["{1}"] %}} {2} {{% end %}} </span> ''' . format ( key , sig_dic [ 'en' ] , dic_tmp [ key ] ) view_zuoxiang += tmp_str view_zuoxiang += '''</div>''' return view_zuoxiang
def stop ( self ) : """Stops playback"""
if self . isPlaying is True : self . _execute ( "stop" ) self . _changePlayingState ( False )
def to_sysbase ( self ) : """Convert model parameters to system base . This function calls the ` ` data _ to _ sys _ base ` ` function of the loaded models . Returns None"""
if self . config . base : for item in self . devman . devices : self . __dict__ [ item ] . data_to_sys_base ( )
def get_loss ( self , y_pred , y_true , X = None , training = False ) : """Return the loss for this batch . Parameters y _ pred : torch tensor Predicted target values y _ true : torch tensor True target values . X : input data , compatible with skorch . dataset . Dataset By default , you should be able to pass : * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list / tuple of the former three * a Dataset If this doesn ' t work with your data , you have to pass a ` ` Dataset ` ` that can deal with the data . training : bool ( default = False ) Whether train mode should be used or not ."""
y_true = to_tensor ( y_true , device = self . device ) return self . criterion_ ( y_pred , y_true )
def get_attributes ( self , sids , flds , ** overrides ) : """Check cache first , then defer to data manager : param sids : security identifiers : param flds : fields to retrieve : param overrides : key - value pairs to pass to the mgr get _ attributes method : return : DataFrame with flds as columns and sids as the row indices"""
# Unfortunately must be inefficient with request flds = _force_array ( flds ) sids = _force_array ( sids ) cached = self . _cache_get_attribute ( sids , flds , ** overrides ) if not cached : # build get df = self . dm . get_attributes ( sids , flds , ** overrides ) [ self . _cache_update_attribute ( sid , df . ix [ sid : sid ] , ** overrides ) for sid in sids ] return df else : # Retrieve all missing and merge with existing cache for sid in sids : missed = flds if sid not in cached else set ( flds ) - set ( cached [ sid ] . columns ) if missed : df = self . dm . get_attributes ( sid , missed , ** overrides ) self . _cache_update_attribute ( sid , df , ** overrides ) # now just retrieve from cache data = self . _cache_get_attribute ( sids , flds , ** overrides ) # reindex and grab columns to sort frame = pd . concat ( data . values ( ) ) return frame
def to_xml ( self , xmllint = False ) : """Serialize all properties as pretty - printed XML Args : xmllint ( boolean ) : Format with ` ` xmllint ` ` in addition to pretty - printing"""
root = self . _tree . getroot ( ) ret = ET . tostring ( ET . ElementTree ( root ) , pretty_print = True ) if xmllint : ret = xmllint_format ( ret ) return ret
def repeats ( seq , size ) : '''Count times that a sequence of a certain size is repeated . : param seq : Input sequence . : type seq : coral . DNA or coral . RNA : param size : Size of the repeat to count . : type size : int : returns : Occurrences of repeats and how many : rtype : tuple of the matched sequence and how many times it occurs'''
seq = str ( seq ) n_mers = [ seq [ i : i + size ] for i in range ( len ( seq ) - size + 1 ) ] counted = Counter ( n_mers ) # No one cares about patterns that appear once , so exclude them found_repeats = [ ( key , value ) for key , value in counted . iteritems ( ) if value > 1 ] return found_repeats
async def _start_payloads ( self ) : """Start all queued payloads"""
with self . _lock : for coroutine in self . _payloads : task = self . event_loop . create_task ( coroutine ( ) ) self . _tasks . add ( task ) self . _payloads . clear ( ) await asyncio . sleep ( 0 )
def undo ( self , i = - 1 ) : """Undo an item in the history logs : parameter int i : integer for indexing ( can be positive or negative ) . Defaults to - 1 if not provided ( the latest recorded history item ) : raises ValueError : if no history items have been recorded"""
_history_enabled = self . history_enabled param = self . get_history ( i ) self . disable_history ( ) param . undo ( ) # TODO : do we really want to remove this ? then what ' s the point of redo ? self . remove_parameter ( uniqueid = param . uniqueid ) if _history_enabled : self . enable_history ( )
def run ( self ) : """Main thread function to maintain connection and receive remote status ."""
_LOGGER . info ( "Started" ) while True : self . _maybe_reconnect ( ) line = '' try : # If someone is sending a command , we can lose our connection so grab a # copy beforehand . We don ' t need the lock because if the connection is # open , we are the only ones that will read from telnet ( the reconnect # code runs synchronously in this loop ) . t = self . _telnet if t is not None : line = t . read_until ( b"\n" ) except EOFError : try : self . _lock . acquire ( ) self . _disconnect_locked ( ) continue finally : self . _lock . release ( ) self . _recv_cb ( line . decode ( 'ascii' ) . rstrip ( ) )
def create_instance ( credentials , project , zone , name , startup_script = None , startup_script_url = None , metadata = None , machine_type = 'f1-micro' , tags = None , disk_size_gb = 10 , wait_until_done = False ) : """Create instance with startup script . TODO : docstring"""
if startup_script is not None and startup_script_url is not None : raise ValueError ( 'Cannot specify a startup script string and URL ' 'at the same time!' ) access_token = credentials . get_access_token ( ) if metadata is None : metadata = { } meta_items = [ { 'key' : k , 'value' : v } for k , v in metadata . items ( ) ] if tags is None : tags = [ ] if startup_script is not None : meta_items . insert ( 0 , { 'key' : 'startup-script' , 'value' : startup_script } ) elif startup_script_url is not None : meta_items . insert ( 0 , { 'key' : 'startup-script-url' , 'value' : startup_script_url } ) payload = { "name" : name , "zone" : "projects/%s/zones/%s" % ( project , zone ) , "machineType" : "projects/%s/zones/%s/machineTypes/%s" % ( project , zone , machine_type ) , "metadata" : { "items" : meta_items } , "tags" : { "items" : tags } , "disks" : [ { "type" : "PERSISTENT" , "boot" : True , "mode" : "READ_WRITE" , "autoDelete" : True , "deviceName" : name , "initializeParams" : { "sourceImage" : "projects/ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20170815a" , "diskType" : "projects/%s/zones/%s/diskTypes/pd-standard" % ( project , zone ) , "diskSizeGb" : str ( disk_size_gb ) } } ] , "canIpForward" : False , "networkInterfaces" : [ { "network" : "projects/%s/global/networks/default" % project , "subnetwork" : "projects/%s/regions/%s/subnetworks/default" % ( project , zone [ : - 2 ] ) , "accessConfigs" : [ { "name" : "External NAT" , "type" : "ONE_TO_ONE_NAT" } ] } ] , "description" : "" , "scheduling" : { "preemptible" : False , "onHostMaintenance" : "MIGRATE" , "automaticRestart" : True } , "serviceAccounts" : [ { "email" : "default" , "scopes" : [ 'https://www.googleapis.com/auth/compute' , "https://www.googleapis.com/auth/devstorage.read_write" , "https://www.googleapis.com/auth/logging.write" , "https://www.googleapis.com/auth/monitoring.write" , "https://www.googleapis.com/auth/servicecontrol" , "https://www.googleapis.com/auth/service.management.readonly" , "https://www.googleapis.com/auth/trace.append" ] } ] } # header = ' Authorization : Bearer 1 / fFBGRNJru1FQd44AzqT3Zg ' headers = { 'Authorization' : 'Bearer %s' % access_token . access_token } # print ( ' Test : ' , json . dumps ( payload , indent = 4 , sort _ keys = True ) ) _LOGGER . debug ( 'Access token: %s' % access_token . access_token ) _LOGGER . debug ( 'Payload: %s' , json . dumps ( payload , sort_keys = True , indent = 4 ) ) r = requests . post ( 'https://www.googleapis.com/compute/v1/' 'projects/%s/zones/%s/instances' % ( project , zone ) , headers = headers , json = payload ) r . raise_for_status ( ) op_name = r . json ( ) [ 'name' ] _LOGGER . info ( 'Submitted request to create intsance ' '(HTTP code: %d).' , r . status_code ) if wait_until_done : _LOGGER . info ( 'Waiting until operation is done...' ) wait_for_zone_op ( access_token , project , zone , op_name ) return op_name
def _pars_total_indexes ( names , dims , fnames , pars ) : """Obtain all the indexes for parameters ` pars ` in the sequence of names . ` names ` references variables that are in column - major order Parameters names : sequence of str All the parameter names . dim : sequence of list of int Dimensions , in same order as ` names ` . fnames : sequence of str All the scalar parameter names pars : sequence of str The parameters of interest . It is assumed all elements in ` pars ` are in ` names ` . Returns indexes : OrderedDict of list of int Dictionary uses parameter names as keys . Indexes are column - major order . For each parameter there is also a key ` par ` + ' _ rowmajor ' that stores the row - major indexing . Note Inside each parameter ( vector or array ) , the sequence uses column - major ordering . For example , if we have parameters alpha and beta , having dimensions [ 2 , 2 ] and [ 2 , 3 ] respectively , the whole parameter sequence is alpha [ 0,0 ] , alpha [ 1,0 ] , alpha [ 0 , 1 ] , alpha [ 1 , 1 ] , beta [ 0 , 0 ] , beta [ 1 , 0 ] , beta [ 0 , 1 ] , beta [ 1 , 1 ] , beta [ 0 , 2 ] , beta [ 1 , 2 ] . In short , like R matrix ( . . . , bycol = TRUE ) . Example > > > pars _ oi = [ ' mu ' , ' tau ' , ' eta ' , ' theta ' , ' lp _ _ ' ] > > > dims _ oi = [ [ ] , [ ] , [ 8 ] , [ 8 ] , [ ] ] > > > fnames _ oi = [ ' mu ' , ' tau ' , ' eta [ 1 ] ' , ' eta [ 2 ] ' , ' eta [ 3 ] ' , ' eta [ 4 ] ' , . . . ' eta [ 5 ] ' , ' eta [ 6 ] ' , ' eta [ 7 ] ' , ' eta [ 8 ] ' , ' theta [ 1 ] ' , ' theta [ 2 ] ' , . . . ' theta [ 3 ] ' , ' theta [ 4 ] ' , ' theta [ 5 ] ' , ' theta [ 6 ] ' , ' theta [ 7 ] ' , . . . ' theta [ 8 ] ' , ' lp _ _ ' ] > > > pars = [ ' mu ' , ' tau ' , ' eta ' , ' theta ' , ' lp _ _ ' ] > > > _ pars _ total _ indexes ( pars _ oi , dims _ oi , fnames _ oi , pars ) . . . # doctest : + ELLIPSIS OrderedDict ( [ ( ' mu ' , ( 0 , ) ) , ( ' tau ' , ( 1 , ) ) , ( ' eta ' , ( 2 , 3 , . . ."""
starts = _calc_starts ( dims ) def par_total_indexes ( par ) : # if ` par ` is a scalar , it will match one of ` fnames ` if par in fnames : p = fnames . index ( par ) idx = tuple ( [ p ] ) return OrderedDict ( [ ( par , idx ) , ( par + '_rowmajor' , idx ) ] ) else : p = names . index ( par ) idx = starts [ p ] + np . arange ( np . prod ( dims [ p ] ) ) idx_rowmajor = starts [ p ] + _idx_col2rowm ( dims [ p ] ) return OrderedDict ( [ ( par , tuple ( idx ) ) , ( par + '_rowmajor' , tuple ( idx_rowmajor ) ) ] ) indexes = OrderedDict ( ) for par in pars : indexes . update ( par_total_indexes ( par ) ) return indexes
def get_small_image ( self , page = 1 ) : """Downloads and returns the small sized image of a single page . The page kwarg specifies which page to return . One is the default ."""
url = self . get_small_image_url ( page = page ) return self . _get_url ( url )
def _install_interrupt_handler ( ) : """Suppress KeyboardInterrupt traceback display in specific situations If not running in dev mode , and if executed from the command line , then we raise SystemExit instead of KeyboardInterrupt . This provides a clean exit . : returns : None if no action is taken , original interrupt handler otherwise"""
# These would clutter the quilt . x namespace , so they ' re imported here instead . import os import sys import signal import pkg_resources from . tools import const # Check to see what entry points / scripts are configred to run quilt from the CLI # By doing this , we have these benefits : # * Avoid closing someone ' s Jupyter / iPython / bPython session when they hit ctrl - c # * Avoid calling exit ( ) when being used as an external lib # * Provide exceptions when running in Jupyter / iPython / bPython # * Provide exceptions when running in unexpected circumstances quilt = pkg_resources . get_distribution ( 'quilt' ) executable = os . path . basename ( sys . argv [ 0 ] ) entry_points = quilt . get_entry_map ( ) . get ( 'console_scripts' , [ ] ) # When python is run with ' - c ' , this was executed via ' python - c " < some python code > " ' if executable == '-c' : # This is awkward and somewhat hackish , but we have to ensure that this is * us * # executing via ' python - c ' if len ( sys . argv ) > 1 and sys . argv [ 1 ] == 'quilt testing' : # it ' s us . Let ' s pretend ' - c ' is an entry point . entry_points [ '-c' ] = 'blah' sys . argv . pop ( 1 ) if executable not in entry_points : return # We ' re running as a console script . # If not in dev mode , use SystemExit instead of raising KeyboardInterrupt def handle_interrupt ( signum , stack ) : # Check for dev mode if _DEV_MODE is None : # Args and environment have not been parsed , and no _ DEV _ MODE state has been set . dev_mode = True if len ( sys . argv ) > 1 and sys . argv [ 1 ] == '--dev' else False dev_mode = True if os . environ . get ( 'QUILT_DEV_MODE' , '' ) . strip ( ) . lower ( ) == 'true' else dev_mode else : # Use forced dev - mode if _ DEV _ MODE is set dev_mode = _DEV_MODE # In order to display the full traceback , we lose control of the exit code here . # Dev mode ctrl - c exit just produces the generic exit error code 1 if dev_mode : raise KeyboardInterrupt ( ) # Normal exit # avoid annoying prompt displacement when hitting ctrl - c print ( ) exit ( const . EXIT_KB_INTERRUPT ) return signal . signal ( signal . SIGINT , handle_interrupt )
def shift ( x , offset , dim , wrap , name = None ) : """Shift operation . Shift x right by + offset in dimension dim . Args : x : a Tensor offset : an integer . If negative , shift left instead of right . dim : a Dimension of x wrap : a boolean - whether to wrap ( True ) or pad with zeros ( False ) . name : an optional string Returns : a Tensor with the same shape and dtype as x"""
return ShiftOperation ( x , offset , dim , wrap , name = name ) . outputs [ 0 ]
def init_match_settings ( self ) : """Adds all items to the match settings comboboxes : return :"""
self . mode_type_combobox . addItems ( game_mode_types ) self . map_type_combobox . addItems ( map_types )
def lemma ( lemma_key ) : """Returns the Lemma object with the given key . Parameters lemma _ key : str Key of the returned lemma . Returns Lemma Lemma matching the ` lemma _ key ` ."""
if lemma_key in LEMMAS_DICT : return LEMMAS_DICT [ lemma_key ] split_lemma_key = lemma_key . split ( '.' ) synset_key = '.' . join ( split_lemma_key [ : 3 ] ) lemma_literal = split_lemma_key [ 3 ] lemma_obj = Lemma ( synset_key , lemma_literal ) LEMMAS_DICT [ lemma_key ] = lemma_obj return lemma_obj
def _dispatch_call_args ( cls = None , bound_call = None , unbound_call = None , attr = '_call' ) : """Check the arguments of ` ` _ call ( ) ` ` or similar for conformity . The ` ` _ call ( ) ` ` method of ` Operator ` is allowed to have the following signatures : Python 2 and 3: - ` ` _ call ( self , x ) ` ` - ` ` _ call ( self , vec , out ) ` ` - ` ` _ call ( self , x , out = None ) ` ` Python 3 only : - ` ` _ call ( self , x , * , out = None ) ` ` ( ` ` out ` ` as keyword - only argument ) For disambiguation , the instance name ( the first argument ) * * must * * be ' self ' . The name of the ` ` out ` ` argument * * must * * be ' out ' , the second argument may have any name . Additional variable ` ` * * kwargs ` ` and keyword - only arguments ( Python 3 only ) are also allowed . Not allowed : - ` ` _ call ( self ) ` ` - - No arguments except instance : - ` ` _ call ( x ) ` ` - - ' self ' missing , i . e . ` ` @ staticmethod ` ` - ` ` _ call ( cls , x ) ` ` - - ' self ' missing , i . e . ` ` @ classmethod ` ` - ` ` _ call ( self , out , x ) ` ` - - ` ` out ` ` as second argument - ` ` _ call ( self , * x ) ` ` - - Variable arguments - ` ` _ call ( self , x , y , out = None ) ` ` - - more positional arguments - ` ` _ call ( self , x , out = False ) ` ` - - default other than None for ` ` out ` ` In particular , static or class methods are not allowed . Parameters cls : ` class ` , optional The ` ` _ call ( ) ` ` method of this class is checked . If omitted , provide ` ` unbound _ call ` ` instead to check directly . bound _ call : callable , optional Check this bound method instead of ` ` cls ` ` unbound _ call : callable , optional Check this unbound function instead of ` ` cls ` ` attr : string , optional Check this attribute instead of ` ` _ call ` ` , e . g . ` ` _ _ call _ _ ` ` Returns has _ out : bool Whether the call has an ` ` out ` ` argument out _ is _ optional : bool Whether the ` ` out ` ` argument is optional spec : ` inspect . ArgSpec ` or ` inspect . FullArgSpec ` Argument specification of the checked call function Raises ValueError if the signature of the function is malformed"""
py3 = ( sys . version_info . major > 2 ) specs = [ '_call(self, x[, **kwargs])' , '_call(self, x, out[, **kwargs])' , '_call(self, x, out=None[, **kwargs])' ] if py3 : specs += [ '_call(self, x, *, out=None[, **kwargs])' ] spec_msg = "\nPossible signatures are ('[, **kwargs]' means optional):\n\n" spec_msg += '\n' . join ( specs ) spec_msg += '\n\nStatic or class methods are not allowed.' if sum ( arg is not None for arg in ( cls , bound_call , unbound_call ) ) != 1 : raise ValueError ( 'exactly one object to check must be given' ) if cls is not None : # Get the actual implementation , including ancestors for parent in cls . mro ( ) : call = parent . __dict__ . get ( attr , None ) if call is not None : break # Static and class methods are not allowed if isinstance ( call , staticmethod ) : raise TypeError ( "'{}.{}' is a static method. " "" . format ( cls . __name__ , attr ) + spec_msg ) elif isinstance ( call , classmethod ) : raise TypeError ( "'{}.{}' is a class method. " "" . format ( cls . __name__ , attr ) + spec_msg ) elif bound_call is not None : call = bound_call if not inspect . ismethod ( call ) : raise TypeError ( '{} is not a bound method' . format ( call ) ) else : call = unbound_call if py3 : # support kw - only args and annotations spec = inspect . getfullargspec ( call ) kw_only = spec . kwonlyargs kw_only_defaults = spec . kwonlydefaults else : spec = inspect . getargspec ( call ) kw_only = ( ) kw_only_defaults = { } signature = _function_signature ( call ) pos_args = spec . args if unbound_call is not None : # Add ' self ' to positional arg list to satisfy the checker pos_args . insert ( 0 , 'self' ) pos_defaults = spec . defaults varargs = spec . varargs # Variable args are not allowed if varargs is not None : raise ValueError ( "bad signature '{}': variable arguments not allowed" "" . format ( signature ) + spec_msg ) if len ( pos_args ) not in ( 2 , 3 ) : raise ValueError ( "bad signature '{}'" . format ( signature ) + spec_msg ) true_pos_args = pos_args [ 1 : ] if len ( true_pos_args ) == 1 : # ' out ' kw - only if 'out' in true_pos_args : # ' out ' positional and ' x ' kw - only - > no raise ValueError ( "bad signature '{}': `out` cannot be the only " "positional argument" "" . format ( signature ) + spec_msg ) else : if 'out' not in kw_only : has_out = out_optional = False elif kw_only_defaults [ 'out' ] is not None : raise ValueError ( "bad signature '{}': `out` can only default to " "`None`, got '{}'" "" . format ( signature , kw_only_defaults [ 'out' ] ) + spec_msg ) else : has_out = True out_optional = True elif len ( true_pos_args ) == 2 : # Both args positional if true_pos_args [ 0 ] == 'out' : # out must come second py3_txt = ' or keyword-only. ' if py3 else '. ' raise ValueError ( "bad signature '{}': `out` can only be the " "second positional argument" . format ( signature ) + py3_txt + spec_msg ) elif true_pos_args [ 1 ] != 'out' : # ' out ' must be ' out ' raise ValueError ( "bad signature '{}': output parameter must " "be called 'out', got '{}'" "" . format ( signature , true_pos_args [ 1 ] ) + spec_msg ) else : has_out = True out_optional = bool ( pos_defaults ) if pos_defaults and pos_defaults [ - 1 ] is not None : raise ValueError ( "bad signature '{}': `out` can only " "default to `None`, got '{}'" "" . format ( signature , pos_defaults [ - 1 ] ) + spec_msg ) else : # Too many positional args raise ValueError ( "bad signature '{}': too many positional arguments" " " . format ( signature ) + spec_msg ) return has_out , out_optional , spec
def ensure_dir_exists ( cls , filepath ) : """Ensure that a directory exists If it doesn ' t exist , try to create it and protect against a race condition if another process is doing the same ."""
directory = os . path . dirname ( filepath ) if directory != '' and not os . path . exists ( directory ) : try : os . makedirs ( directory ) except OSError as e : if e . errno != errno . EEXIST : raise
def resource_name ( cls ) : """Represents the resource name"""
if cls . __name__ == "NURESTRootObject" or cls . __name__ == "NURESTObject" : return "Not Implemented" if cls . __resource_name__ is None : raise NotImplementedError ( '%s has no defined resource name. Implement resource_name property first.' % cls ) return cls . __resource_name__
def get_push_command ( self , remote = None , revision = None ) : """Get the command to push changes from the local repository to a remote repository ."""
if revision : raise NotImplementedError ( compact ( """ Bazaar repository support doesn't include the ability to push specific revisions! """ ) ) command = [ 'bzr' , 'push' ] if remote : command . append ( remote ) return command
def history ( ctx , archive_name ) : '''Get archive history'''
_generate_api ( ctx ) var = ctx . obj . api . get_archive ( archive_name ) click . echo ( pprint . pformat ( var . get_history ( ) ) )
def exists_uda ( self , name , database = None ) : """Checks if a given UDAF exists within a specified database Parameters name : string , UDAF name database : string , database name Returns if _ exists : boolean"""
return len ( self . list_udas ( database = database , like = name ) ) > 0
def list_to_csv ( my_list , csv_file ) : """Save a matrix ( list of lists ) to a file as a CSV . . code : : python my _ list = [ [ " Name " , " Location " ] , [ " Chris " , " South Pole " ] , [ " Harry " , " Depth of Winter " ] , [ " Bob " , " Skull " ] ] reusables . list _ to _ csv ( my _ list , " example . csv " ) example . csv . . code : : csv " Name " , " Location " " Chris " , " South Pole " " Harry " , " Depth of Winter " " Bob " , " Skull " : param my _ list : list of lists to save to CSV : param csv _ file : File to save data to"""
if PY3 : csv_handler = open ( csv_file , 'w' , newline = '' ) else : csv_handler = open ( csv_file , 'wb' ) try : writer = csv . writer ( csv_handler , delimiter = ',' , quoting = csv . QUOTE_ALL ) writer . writerows ( my_list ) finally : csv_handler . close ( )
def analyze_log ( fp , configs , url_rules ) : """Analyze log file"""
url_classifier = URLClassifier ( url_rules ) analyzer = LogAnalyzer ( url_classifier = url_classifier , min_msecs = configs . min_msecs ) for line in fp : analyzer . analyze_line ( line ) return analyzer . get_data ( )
def find_octagonal_number ( n ) : """This function calculates the nth octagonal number . An Octagonal number is a figurate number that represents an octagon . The octagonal number for n is given by the formula : 3 * n ^ 2 - 2 * n . Examples : find _ octagonal _ number ( 5 ) - > 65 find _ octagonal _ number ( 10 ) - > 280 find _ octagonal _ number ( 15 ) - > 645 : param n : The position ( n ) of the octagonal number to find . : return : The nth octagonal number ."""
return 3 * n * n - 2 * n
def output_summary ( self , output_stream = sys . stdout ) : """outputs a usage tip and the list of acceptable commands . This is useful as the output of the ' help ' option . parameters : output _ stream - an open file - like object suitable for use as the target of a print function"""
if self . app_name or self . app_description : print ( 'Application: ' , end = '' , file = output_stream ) if self . app_name : print ( self . app_name , self . app_version , file = output_stream ) if self . app_description : print ( self . app_description , file = output_stream ) if self . app_name or self . app_description : print ( '' , file = output_stream ) names_list = self . get_option_names ( ) print ( "usage:\n%s [OPTIONS]... " % self . app_invocation_name , end = '' , file = output_stream ) bracket_count = 0 # this section prints the non - switch command line arguments for key in names_list : an_option = self . option_definitions [ key ] if an_option . is_argument : if an_option . default is None : # there ' s no option , assume the user must set this print ( an_option . name , end = '' , file = output_stream ) elif ( inspect . isclass ( an_option . value ) or inspect . ismodule ( an_option . value ) ) : # this is already set and it could have expanded , most # likely this is a case where a sub - command has been # loaded and we ' re looking to show the help for it . # display show it as a constant already provided rather # than as an option the user must provide print ( an_option . default , end = '' , file = output_stream ) else : # this is an argument that the user may alternatively # provide print ( "[ %s" % an_option . name , end = '' , file = output_stream ) bracket_count += 1 print ( ']' * bracket_count , '\n' , file = output_stream ) names_list . sort ( ) if names_list : print ( 'OPTIONS:' , file = output_stream ) pad = ' ' * 4 for name in names_list : if name in self . options_banned_from_help : continue option = self . _get_option ( name ) line = ' ' * 2 # always start with 2 spaces if option . short_form : line += '-%s, ' % option . short_form line += '--%s' % name line += '\n' doc = option . doc if option . doc is not None else '' if doc : line += '%s%s\n' % ( pad , doc ) try : value = option . value type_of_value = type ( value ) converter_function = to_string_converters [ type_of_value ] default = converter_function ( value ) except KeyError : default = option . value if default is not None : if ( ( option . secret or 'password' in name . lower ( ) ) and not self . option_definitions . admin . expose_secrets . default ) : default = '*********' if name not in ( 'help' , ) : # don ' t bother with certain dead obvious ones line += '%s(default: %s)\n' % ( pad , default ) print ( line , file = output_stream )
def validate_one_touch_signature ( self , signature , nonce , method , url , params ) : """Function to validate signature in X - Authy - Signature key of headers . : param string signature : X - Authy - Signature key of headers . : param string nonce : X - Authy - Signature - Nonce key of headers . : param string method : GET or POST - configured in app settings for OneTouch . : param string url : base callback url . : param dict params : params sent by Authy . : return bool : True if calculated signature and X - Authy - Signature are identical else False ."""
if not signature or not isinstance ( signature , str ) : raise AuthyFormatException ( "Invalid signature - should not be empty. It is required" ) if not nonce : raise AuthyFormatException ( "Invalid nonce - should not be empty. It is required" ) if not method or not ( 'get' == method . lower ( ) or 'post' == method . lower ( ) ) : raise AuthyFormatException ( "Invalid method - should not be empty. It is required" ) if not params or not isinstance ( params , dict ) : raise AuthyFormatException ( "Invalid params - should not be empty. It is required" ) query_params = self . __make_http_query ( params ) # Sort and replace encoded params in case - sensitive order sorted_params = '&' . join ( sorted ( query_params . replace ( '/' , '%2F' ) . replace ( '%20' , '+' ) . split ( '&' ) ) ) sorted_params = re . sub ( "\\%5B([0-9])*\\%5D" , "%5B%5D" , sorted_params ) sorted_params = re . sub ( "\\=None" , "=" , sorted_params ) data = nonce + "|" + method + "|" + url + "|" + sorted_params try : calculated_signature = base64 . b64encode ( hmac . new ( self . api_key . encode ( ) , data . encode ( ) , hashlib . sha256 ) . digest ( ) ) return calculated_signature . decode ( ) == signature except : calculated_signature = base64 . b64encode ( hmac . new ( self . api_key , data , hashlib . sha256 ) . digest ( ) ) return calculated_signature == signature
def _docstring_getblocks ( self ) : """Gets the longest continuous block of docstrings from the buffer code string if any of those lines are docstring lines ."""
# If there are no lines to look at , we have nothing to do here . if self . ibuffer [ 0 ] == self . ibuffer [ 1 ] : return [ ] lines = self . context . bufferstr [ self . ibuffer [ 0 ] : self . ibuffer [ 1 ] ] docblock = [ ] result = [ ] self . _doclines = [ ] # We need to keep track of the line number for the start of the # documentation strings . docline = 0 doclength = 0 first = self . docparser . RE_DOCS . match ( lines [ 0 ] ) if first is not None : docblock . append ( first . group ( "docstring" ) ) docline = self . ibuffer [ 0 ] self . _doclines . append ( docline ) doclength += len ( lines [ 0 ] ) + 1 # + 1 for \ n removed by split . # We need to search backwards in the main buffer string for # additional tags to add to the block i = self . ibuffer [ 0 ] - 1 while i > 0 : current = self . context . bufferstr [ i ] docmatch = self . docparser . RE_DOCS . match ( current ) if docmatch is not None : docblock . append ( docmatch . group ( "docstring" ) ) docline = i doclength += len ( current ) + 1 else : break i -= 1 # Reverse the docblock list since we were going backwards and appending . if len ( docblock ) > 0 : docblock . reverse ( ) # Now handle the lines following the first line . Also handle the # possibility of multiple , separate blocks that are still valid XML . # We have to keep going until we have exceed the operational changes # or found the decorating element . i = self . ibuffer [ 0 ] + 1 while ( i < len ( self . context . bufferstr ) and ( i < self . ibuffer [ 1 ] or len ( docblock ) > 0 ) ) : line = self . context . bufferstr [ i ] docmatch = self . docparser . RE_DOCS . match ( line ) if docmatch is not None : docblock . append ( docmatch . group ( "docstring" ) ) doclength += len ( line ) if docline == 0 : docline = i # Only track actual documentation lines that are within the # operations list of lines . if i < self . ibuffer [ 1 ] : self . _doclines . append ( i ) elif len ( docblock ) > 0 : key = self . _docstring_key ( line ) result . append ( ( docblock , docline , doclength , key ) ) docblock = [ ] docline = 0 doclength = 0 # We need to exit the loop if we have exceeded the length of # the operational changes if len ( docblock ) == 0 and i > self . ibuffer [ 1 ] : break i += 1 return result
def _chimera_neighbors ( M , N , L , q ) : "Returns a list of neighbors of ( x , y , u , k ) in a perfect : math : ` C _ { M , N , L } `"
( x , y , u , k ) = q n = [ ( x , y , 1 - u , l ) for l in range ( L ) ] if u == 0 : if x : n . append ( ( x - 1 , y , u , k ) ) if x < M - 1 : n . append ( ( x + 1 , y , u , k ) ) else : if y : n . append ( ( x , y - 1 , u , k ) ) if y < N - 1 : n . append ( ( x , y + 1 , u , k ) ) return n
def project_texture ( texture_xy , texture_z , angle = DEFAULT_ANGLE ) : """Creates a texture by adding z - values to an existing texture and projecting . When working with surfaces there are two ways to accomplish the same thing : 1 . project the surface and map a texture to the projected surface 2 . map a texture to the surface , and then project the result The first method , which does not use this function , is preferred because it is easier to do occlusion removal that way . This function is provided for cases where you do not wish to generate a surface ( and don ' t care about occlusion removal . ) Args : texture _ xy ( texture ) : the texture to project texture _ z ( np . array ) : the Z - values to use in the projection angle ( float ) : the angle to project at , in degrees ( 0 = overhead , 90 = side view ) Returns : layer : A layer ."""
z_coef = np . sin ( np . radians ( angle ) ) y_coef = np . cos ( np . radians ( angle ) ) surface_x , surface_y = texture return ( surface_x , - surface_y * y_coef + surface_z * z_coef )
def as_colr ( self , label_args = None , type_args = None , value_args = None ) : """Like _ _ str _ _ , except it returns a colorized Colr instance ."""
label_args = label_args or { 'fore' : 'red' } type_args = type_args or { 'fore' : 'yellow' } value_args = value_args or { 'fore' : 'blue' , 'style' : 'bright' } return Colr ( self . default_format . format ( label = Colr ( ':\n ' ) . join ( Colr ( 'Expecting style value' , ** label_args ) , Colr ( ',\n ' ) . join ( Colr ( ', ' ) . join ( Colr ( v , ** type_args ) for v in t [ 1 ] ) for t in _stylemap ) ) , value = Colr ( repr ( self . value ) , ** value_args ) ) )
def writeMibObjects ( self , * varBinds , ** context ) : """Create , destroy or modify Managed Objects Instances . Given one or more py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType ` objects , create , destroy or modify all or none of the referenced Managed Objects Instances . If a non - existing Managed Object Instance is written , the new Managed Object Instance will be created with the value given in the ` varBinds ` . If existing Managed Object Instance is being written , its value is changed to the new one . Unless it ' s a : py : class : ` RowStatus ` object of a SMI table , in which case the outcome of the * write * operation depends on the : py : class : ` RowStatus ` transition . The whole table row could be created or destroyed or brought on / offline . When SMI table row is brought online ( i . e . into the * active * state ) , all columns will be checked for consistency . Error will be reported and write operation will fail if inconsistency is found . Parameters varBinds : : py : class : ` tuple ` of : py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType ` objects representing Managed Objects Instances to modify . Other Parameters \*\*context: Query parameters : * ` cbFun ` ( callable ) - user - supplied callable that is invoked to pass the new value of the Managed Object Instance or an error . If not provided , default function will raise exception in case of an error . * ` acFun ` ( callable ) - user - supplied callable that is invoked to authorize access to the requested Managed Object Instance . If not supplied , no access control will be performed . Notes The signature of the callback functions ( e . g . ` cbFun ` , ` acFun ` ) is this : . . code - block : python def cbFun ( varBinds , * * context ) : errors = context . get ( errors ) if errors : print ( errors [ 0 ] . error ) else : print ( ' , ' . join ( ' % s = % s ' % varBind for varBind in varBinds ) ) In case of errors , the ` errors ` key in the ` context ` dict will contain a sequence of ` dict ` objects describing one or more errors that occur . If a non - existing Managed Object is referenced , no error will be reported , but the values returned in the ` varBinds ` would be one of : : py : class : ` NoSuchObject ` ( indicating non - existent Managed Object ) or : py : class : ` NoSuchInstance ` ( if Managed Object exists , but can ' t be modified ."""
if 'cbFun' not in context : context [ 'cbFun' ] = self . _defaultErrorHandler self . flipFlopFsm ( self . FSM_WRITE_VAR , * varBinds , ** context )
def editRecord ( self , record , pos = None ) : """Prompts the user to edit using a preset editor defined in the setRecordEditors method . : param record | < orb . Table > : return < bool > | success"""
typ = type ( record ) editor = self . _recordEditors . get ( typ ) if not editor : return False if self . popupEditing ( ) : popup = self . popupWidget ( ) edit = popup . centralWidget ( ) # create a new editor if required if type ( edit ) != editor : if edit : edit . close ( ) edit . deleteLater ( ) edit = editor ( popup ) edit . setAutoCommitOnSave ( True ) popup . setCentralWidget ( edit ) popup . accepted . connect ( edit . save ) edit . aboutToSaveRecord . connect ( self . recordUpdated ) edit . saved . connect ( self . refresh ) edit . setRecord ( record ) popup . popup ( pos ) else : if editor . edit ( record , autoCommit = False ) : self . recordUpdated . emit ( record ) record . commit ( ) self . refresh ( ) return True
def extract_pp_helices ( in_pdb ) : """Uses DSSP to find polyproline helices in a pdb file . Returns a length 3 list with a helix id , the chain id and a dict containing the coordinates of each residues CA . Parameters in _ pdb : string Path to a PDB file ."""
t_phi = - 75.0 t_phi_d = 29.0 t_psi = 145.0 t_psi_d = 29.0 pph_dssp = subprocess . check_output ( [ global_settings [ 'dssp' ] [ 'path' ] , in_pdb ] ) dssp_residues = [ ] go = False for line in pph_dssp . splitlines ( ) : if go : res_num = int ( line [ : 5 ] . strip ( ) ) chain = line [ 11 : 13 ] . strip ( ) ss_type = line [ 16 ] phi = float ( line [ 103 : 109 ] . strip ( ) ) psi = float ( line [ 109 : 116 ] . strip ( ) ) dssp_residues . append ( ( res_num , ss_type , chain , phi , psi ) ) else : if line [ 2 ] == '#' : go = True pass pp_chains = [ ] chain = [ ] ch_on = False for item in dssp_residues : if ( item [ 1 ] == ' ' ) and ( t_phi - t_phi_d < item [ 3 ] < t_phi + t_phi_d ) and ( t_psi - t_psi_d < item [ 4 ] < t_psi + t_psi_d ) : chain . append ( item ) ch_on = True else : if ch_on : pp_chains . append ( chain ) chain = [ ] ch_on = False pp_chains = [ x for x in pp_chains if len ( x ) > 1 ] pp_helices = [ ] with open ( in_pdb , 'r' ) as pdb : pdb_atoms = split_pdb_lines ( pdb . read ( ) ) for pp_helix in pp_chains : chain_id = pp_helix [ 0 ] [ 2 ] res_range = [ x [ 0 ] for x in pp_helix ] helix = [ ] for atom in pdb_atoms : if ( atom [ 2 ] == "CA" ) and ( atom [ 5 ] == chain_id ) and ( atom [ 6 ] in res_range ) : helix . append ( tuple ( atom [ 8 : 11 ] ) ) pp_helices . append ( helix ) return pp_helices
def intersectingInterval ( self , start , end ) : """given an interval , get intervals in the tree that are intersected . : param start : start of the intersecting interval : param end : end of the intersecting interval : return : the list of intersected intervals"""
# find all intervals in this node that intersect start and end l = [ ] for x in self . data . starts : xStartsAfterInterval = ( x . start > end and not self . openEnded ) or ( x . start >= end and self . openEnded ) xEndsBeforeInterval = ( x . end < start and not self . openEnded ) or ( x . end <= start and self . openEnded ) if ( ( not xStartsAfterInterval ) and ( not xEndsBeforeInterval ) ) : l . append ( x ) # process left subtree ( if we have one ) if the requested interval begins # before mid if self . left is not None and start <= self . data . mid : l += self . left . intersectingInterval ( start , end ) # process right subtree ( if we have one ) if the requested interval ends # after mid if self . right is not None and end >= self . data . mid : l += self . right . intersectingInterval ( start , end ) return l
def tag ( self , * tags ) : """Tags the job with one or more unique indentifiers . Tags must be hashable . Duplicate tags are discarded . : param tags : A unique list of ` ` Hashable ` ` tags . : return : The invoked job instance"""
if not all ( isinstance ( tag , collections . Hashable ) for tag in tags ) : raise TypeError ( 'Tags must be hashable' ) self . tags . update ( tags ) return self
def parse_command_line ( self , argv = None ) : """Parse the jhubctl command line arguments . This overwrites traitlets ' default ` parse _ command _ line ` method and tailors it to jhubctl ' s needs ."""
argv = sys . argv [ 1 : ] if argv is None else argv self . argv = [ py3compat . cast_unicode ( arg ) for arg in argv ] # Append Provider Class to the list of configurable items . ProviderClass = getattr ( providers , self . provider_type ) self . classes . append ( ProviderClass ) if any ( x in self . argv for x in ( '-h' , '--help-all' , '--help' ) ) : self . print_help ( '--help-all' in self . argv ) self . exit ( 0 ) if '--version' in self . argv or '-V' in self . argv : self . print_version ( ) self . exit ( 0 ) # Generate a configuration file if flag is given . if '--generate-config' in self . argv : conf = self . generate_config_file ( ) with open ( self . config_file , 'w' ) as f : f . write ( conf ) self . exit ( 0 ) # If not config , parse commands . # # Run sanity checks . # Check that the minimum number of arguments have been called . if len ( self . argv ) < 2 : raise JhubctlError ( "Not enough arguments. \n\n" "Expected: jhubctl <action> <resource> <name>" ) # Check action self . resource_action = self . argv [ 0 ] if self . resource_action not in self . subcommands : raise JhubctlError ( f"Subcommand is not recognized; must be one of these: {self.subcommands}" ) # Check resource self . resource_type = self . argv [ 1 ] if self . resource_type not in self . resources : raise JhubctlError ( f"First argument after a subcommand must one of these" f"resources: {self.resources}" ) # Get name of resource . try : self . resource_name = self . argv [ 2 ] except IndexError : if self . resource_action != "get" : raise JhubctlError ( "Not enough arguments. \n\n" "Expected: jhubctl <action> <resource> <name>" ) else : self . resource_name = None # flatten flags & aliases , so cl - args get appropriate priority : flags , aliases = self . flatten_flags ( ) loader = KVArgParseConfigLoader ( argv = argv , aliases = aliases , flags = flags , log = self . log ) config = loader . load_config ( ) self . update_config ( config ) # store unparsed args in extra _ args self . extra_args = loader . extra_args
def _gatk4_cmd ( jvm_opts , params , data ) : """Retrieve unified command for GATK4 , using ' gatk ' . GATK3 is ' gatk3 ' ."""
gatk_cmd = utils . which ( os . path . join ( os . path . dirname ( os . path . realpath ( sys . executable ) ) , "gatk" ) ) return "%s && export PATH=%s:\"$PATH\" && gatk --java-options '%s' %s" % ( utils . clear_java_home ( ) , utils . get_java_binpath ( gatk_cmd ) , " " . join ( jvm_opts ) , " " . join ( [ str ( x ) for x in params ] ) )
def remove ( self , ** kwargs ) : """Remove this container . Similar to the ` ` docker rm ` ` command . Args : v ( bool ) : Remove the volumes associated with the container link ( bool ) : Remove the specified link and not the underlying container force ( bool ) : Force the removal of a running container ( uses ` ` SIGKILL ` ` ) Raises : : py : class : ` docker . errors . APIError ` If the server returns an error ."""
return self . client . api . remove_container ( self . id , ** kwargs )
def sleep ( self , time ) : """Perform an asyncio sleep for the time specified in seconds . T his method should be used in place of time . sleep ( ) : param time : time in seconds : returns : No return value"""
try : task = asyncio . ensure_future ( self . core . sleep ( time ) ) self . loop . run_until_complete ( task ) except asyncio . CancelledError : pass except RuntimeError : pass
def _M2_const ( Xvar , mask_X , xvarsum , xconst , Yvar , mask_Y , yvarsum , yconst , weights = None ) : r"""Computes the unnormalized covariance matrix between X and Y , exploiting constant input columns Computes the unnormalized covariance matrix : math : ` C = X ^ \ top Y ` ( for symmetric = False ) or : math : ` C = \ frac { 1 } { 2 } ( X ^ \ top Y + Y ^ \ top X ) ` ( for symmetric = True ) . Suppose the data matrices can be column - permuted to have the form . . math : X & = & ( X _ { \ mathrm { var } } , X _ { \ mathrm { const } } ) Y & = & ( Y _ { \ mathrm { var } } , Y _ { \ mathrm { const } } ) with rows : . . math : x _ t & = & ( x _ { \ mathrm { var } , t } , x _ { \ mathrm { const } } ) y _ t & = & ( y _ { \ mathrm { var } , t } , y _ { \ mathrm { const } } ) where : math : ` x _ { \ mathrm { const } } , \ : y _ { \ mathrm { const } } ` are constant vectors . The resulting matrix has the general form : . . math : C & = & [ X _ { \ mathrm { var } } ^ \ top Y _ { \ mathrm { var } } x _ { sum } y _ { \ mathrm { const } } ^ \ top ] & & [ x _ { \ mathrm { const } } ^ \ top y _ { sum } ^ \ top x _ { sum } x _ { sum } ^ \ top ] where : math : ` x _ { sum } = \ sum _ t x _ { \ mathrm { var } , t } ` and : math : ` y _ { sum } = \ sum _ t y _ { \ mathrm { var } , t } ` . Parameters Xvar : ndarray ( T , m ) Part of the data matrix X with : math : ` m \ le M ` variable columns . mask _ X : ndarray ( M ) Boolean array of size M of the full columns . False for constant column , True for variable column in X . xvarsum : ndarray ( m ) Column sum of variable part of data matrix X xconst : ndarray ( M - m ) Values of the constant part of data matrix X Yvar : ndarray ( T , n ) Part of the data matrix Y with : math : ` n \ le N ` variable columns . mask _ Y : ndarray ( N ) Boolean array of size N of the full columns . False for constant column , True for variable column in Y . yvarsum : ndarray ( n ) Column sum of variable part of data matrix Y yconst : ndarray ( N - n ) Values of the constant part of data matrix Y weights : None or ndarray ( N ) weights for all time steps . Returns C : ndarray ( M , N ) Unnormalized covariance matrix ."""
C = np . zeros ( ( len ( mask_X ) , len ( mask_Y ) ) ) # Block 11 C [ np . ix_ ( mask_X , mask_Y ) ] = _M2_dense ( Xvar , Yvar , weights = weights ) # other blocks xsum_is_0 = _is_zero ( xvarsum ) ysum_is_0 = _is_zero ( yvarsum ) xconst_is_0 = _is_zero ( xconst ) yconst_is_0 = _is_zero ( yconst ) # TODO : maybe we don ' t need the checking here , if we do the decision in the higher - level function M2 # TODO : if not zero , we could still exploit the zeros in const and compute ( and write ! ) this outer product # TODO : only to a sub - matrix # Block 12 and 21 if weights is not None : wsum = np . sum ( weights ) xvarsum = np . sum ( weights [ : , None ] * Xvar , axis = 0 ) yvarsum = np . sum ( weights [ : , None ] * Yvar , axis = 0 ) else : wsum = Xvar . shape [ 0 ] if not ( xsum_is_0 or yconst_is_0 ) or not ( ysum_is_0 or xconst_is_0 ) : C [ np . ix_ ( mask_X , ~ mask_Y ) ] = np . outer ( xvarsum , yconst ) C [ np . ix_ ( ~ mask_X , mask_Y ) ] = np . outer ( xconst , yvarsum ) # Block 22 if not ( xconst_is_0 or yconst_is_0 ) : C [ np . ix_ ( ~ mask_X , ~ mask_Y ) ] = np . outer ( wsum * xconst , yconst ) return C
def validate ( self , asset , amount , portfolio , algo_datetime , algo_current_data ) : """Fail if the algo has passed this Asset ' s end _ date , or before the Asset ' s start date ."""
# If the order is for 0 shares , then silently pass through . if amount == 0 : return normalized_algo_dt = pd . Timestamp ( algo_datetime ) . normalize ( ) # Fail if the algo is before this Asset ' s start _ date if asset . start_date : normalized_start = pd . Timestamp ( asset . start_date ) . normalize ( ) if normalized_algo_dt < normalized_start : metadata = { 'asset_start_date' : normalized_start } self . handle_violation ( asset , amount , algo_datetime , metadata = metadata ) # Fail if the algo has passed this Asset ' s end _ date if asset . end_date : normalized_end = pd . Timestamp ( asset . end_date ) . normalize ( ) if normalized_algo_dt > normalized_end : metadata = { 'asset_end_date' : normalized_end } self . handle_violation ( asset , amount , algo_datetime , metadata = metadata )
def sync ( self ) : """Syncs the parent app changes with the current app instance . : return : Synced App object ."""
app = self . _api . post ( url = self . _URL [ 'sync' ] . format ( id = self . id ) ) . json ( ) return App ( api = self . _api , ** app )
def rdf_graph_from_yaml ( yaml_root ) : """Convert the YAML object into an RDF Graph object ."""
G = Graph ( ) for top_entry in yaml_root : assert len ( top_entry ) == 1 node = list ( top_entry . keys ( ) ) [ 0 ] build_relations ( G , node , top_entry [ node ] , None ) return G
def get_request_feature ( self , name ) : """Parses the request for a particular feature . Arguments : name : A feature name . Returns : A feature parsed from the URL if the feature is supported , or None ."""
if '[]' in name : # array - type return self . request . query_params . getlist ( name ) if name in self . features else None elif '{}' in name : # object - type ( keys are not consistent ) return self . _extract_object_params ( name ) if name in self . features else { } else : # single - type return self . request . query_params . get ( name ) if name in self . features else None
def vlm_set_input ( self , psz_name , psz_input ) : '''Set a media ' s input MRL . This will delete all existing inputs and add the specified one . @ param psz _ name : the media to work on . @ param psz _ input : the input MRL . @ return : 0 on success , - 1 on error .'''
return libvlc_vlm_set_input ( self , str_to_bytes ( psz_name ) , str_to_bytes ( psz_input ) )
def average ( self , dt = None ) : """Modify the current instance to the average of a given instance ( default now ) and the current instance . : type dt : Date or date : rtype : Date"""
if dt is None : dt = Date . today ( ) return self . add ( days = int ( self . diff ( dt , False ) . in_days ( ) / 2 ) )
def add_coord ( self , x , y ) : """Adds a coord to the polyline and creates another circle"""
x = x * self . x_factor y = y * self . y_factor self . plotData . add_coord ( x , y ) self . circles_list . append ( gui . SvgCircle ( x , y , self . circle_radius ) ) self . append ( self . circles_list [ - 1 ] ) if len ( self . circles_list ) > self . maxlen : self . remove_child ( self . circles_list [ 0 ] ) del self . circles_list [ 0 ]