idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
42,300
def chunks ( seq , chunk_size ) : return ( seq [ i : i + chunk_size ] for i in range ( 0 , len ( seq ) , chunk_size ) )
Split seq into chunk_size - sized chunks .
42,301
def load_csv_data ( resource_name ) : data_bytes = pkgutil . get_data ( 'clkhash' , 'data/{}' . format ( resource_name ) ) if data_bytes is None : raise ValueError ( "No data resource found with name {}" . format ( resource_name ) ) else : data = data_bytes . decode ( 'utf8' ) reader = csv . reader ( data . splitlines ( ) ) next ( reader , None ) return [ row [ 0 ] for row in reader ]
Loads first column of specified CSV file from package data .
42,302
def save_csv ( data , headers , file ) : print ( ',' . join ( headers ) , file = file ) writer = csv . writer ( file ) writer . writerows ( data )
Output generated data to file as CSV with header .
42,303
def random_date ( start , end ) : delta = end - start int_delta = ( delta . days * 24 * 60 * 60 ) + delta . seconds random_second = random . randrange ( int_delta ) return start + timedelta ( seconds = random_second )
Generate a random datetime between two datetime objects .
42,304
def generate_random_person ( self , n ) : assert self . all_male_first_names is not None assert self . all_female_first_names is not None assert self . all_last_names is not None for i in range ( n ) : sex = 'M' if random . random ( ) > 0.5 else 'F' dob = random_date ( self . earliest_birthday , self . latest_birthday ) . strftime ( "%Y/%m/%d" ) first_name = random . choice ( self . all_male_first_names ) if sex == 'M' else random . choice ( self . all_female_first_names ) last_name = random . choice ( self . all_last_names ) yield ( str ( i ) , first_name + ' ' + last_name , dob , sex )
Generator that yields details on a person with plausible name sex and age .
42,305
def load_names ( self ) : self . all_male_first_names = load_csv_data ( 'male-first-names.csv' ) self . all_female_first_names = load_csv_data ( 'female-first-names.csv' ) self . all_last_names = load_csv_data ( 'CSV_Database_of_Last_Names.csv' )
Loads a name database from package data
42,306
def generate_subsets ( self , sz , overlap = 0.8 , subsets = 2 ) : overlap_sz = int ( math . floor ( overlap * sz ) ) unique_sz = sz - overlap_sz total_unique_sz = unique_sz * subsets total_sz = overlap_sz + total_unique_sz if total_sz > len ( self . names ) : msg = 'insufficient names for requested size and overlap' raise ValueError ( msg ) sset = random . sample ( self . names , total_sz ) sset_overlap , sset_unique = sset [ : overlap_sz ] , sset [ overlap_sz : ] assert len ( sset_unique ) == subsets * unique_sz uniques = ( sset_unique [ p * unique_sz : ( p + 1 ) * unique_sz ] for p in range ( subsets ) ) return tuple ( sset_overlap + u for u in uniques )
Return random subsets with nonempty intersection .
42,307
def _unpack_data ( data ) : xs = [ None ] * len ( data ) ys = [ None ] * len ( data ) metadata = [ None ] * len ( data ) for idx , example in enumerate ( data ) : if isinstance ( example , ( list , tuple ) ) : xs [ idx ] , ys [ idx ] , metadata [ idx ] = _unpack_list ( example ) if isinstance ( example , dict ) : xs [ idx ] , ys [ idx ] , metadata [ idx ] = _unpack_dict ( example ) return xs , ys , metadata
Break Xs Ys and metadata out into separate lists for data preprocessing . Run basic data validation .
42,308
def collections ( cloud = None , api_key = None , version = None , ** kwargs ) : url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "collections" } return api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This is a status report endpoint . It is used to get the status on all of the collections currently trained as well as some basic statistics on their accuracies .
42,309
def vectorize ( data , cloud = None , api_key = None , version = None , ** kwargs ) : batch = detect_batch ( data ) data = data_preprocess ( data , batch = batch ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version , "method" : "vectorize" } return api_handler ( data , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
Support for raw features from the custom collections API
42,310
def _api_handler ( self , * args , ** kwargs ) : keyword_arguments = { } keyword_arguments . update ( self . keywords ) keyword_arguments . update ( kwargs ) return api_handler ( * args , ** keyword_arguments )
Thin wrapper around api_handler from indicoio . utils . api to add in stored keyword argument to the JSON body
42,311
def add_data ( self , data , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : if not len ( data ) : raise IndicoError ( "No input data provided." ) batch = isinstance ( data [ 0 ] , ( list , tuple , dict ) ) if not batch : data = [ data ] X , Y , metadata = _unpack_data ( data ) X = data_preprocess ( X , batch = True ) data = _pack_data ( X , Y , metadata ) if not batch : data = data [ 0 ] url_params = { "batch" : batch , "api_key" : api_key , "version" : version , 'method' : "add_data" } return self . _api_handler ( data , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This is the basic training endpoint . Given a piece of text and a score either categorical or numeric this endpoint will train a new model given the additional piece of information .
42,312
def train ( self , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : url_params = { "batch" : batch , "api_key" : api_key , "version" : version , 'method' : "train" } return self . _api_handler ( self . keywords [ 'collection' ] , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This is the basic training endpoint . Given an existing dataset this endpoint will train a model .
42,313
def info ( self , cloud = None , api_key = None , version = None , ** kwargs ) : url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "info" } return self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
Return the current state of the model associated with a given collection
42,314
def remove_example ( self , data , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : batch = detect_batch ( data ) data = data_preprocess ( data , batch = batch ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version , 'method' : 'remove_example' } return self . _api_handler ( data , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This is an API made to remove a single instance of training data . This is useful in cases where a single instance of content has been modified but the remaining examples remain valid . For example if a piece of content has been retagged .
42,315
def wait ( self , interval = 1 , ** kwargs ) : while True : status = self . info ( ** kwargs ) . get ( 'status' ) if status == "ready" : break if status != "training" : raise IndicoError ( "Collection status failed with: {0}" . format ( status ) ) time . sleep ( interval )
Block until the collection s model is completed training
42,316
def register ( self , make_public = False , cloud = None , api_key = None , version = None , ** kwargs ) : kwargs [ 'make_public' ] = make_public url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "register" } return self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This API endpoint allows you to register you collection in order to share read or write access to the collection with another user .
42,317
def authorize ( self , email , permission_type = 'read' , cloud = None , api_key = None , version = None , ** kwargs ) : kwargs [ 'permission_type' ] = permission_type kwargs [ 'email' ] = email url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "authorize" } return self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This API endpoint allows you to authorize another user to access your model in a read or write capacity . Before calling authorize you must first make sure your model has been registered .
42,318
def deauthorize ( self , email , cloud = None , api_key = None , version = None , ** kwargs ) : kwargs [ 'email' ] = email url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "deauthorize" } return self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs )
This API endpoint allows you to remove another user s access to your collection .
42,319
def rename ( self , name , cloud = None , api_key = None , version = None , ** kwargs ) : kwargs [ 'name' ] = name url_params = { "batch" : False , "api_key" : api_key , "version" : version , "method" : "rename" } result = self . _api_handler ( None , cloud = cloud , api = "custom" , url_params = url_params , ** kwargs ) self . keywords [ 'collection' ] = name return result
If you d like to change the name you use to access a given collection you can call the rename endpoint . This is especially useful if the name you use for your model is not available for registration .
42,320
def convert_to_py_error ( error_message ) : message = error_message . lower ( ) for err_msg , err_type in ERR_MSGS : if err_msg in message : return err_type ( error_message ) else : return IndicoError ( error_message )
Raise specific exceptions for ease of error handling
42,321
def facial_localization ( image , cloud = None , batch = False , api_key = None , version = None , ** kwargs ) : image = data_preprocess ( image , batch = batch ) url_params = { "batch" : batch , "api_key" : api_key , "version" : version } return api_handler ( image , cloud = cloud , api = "faciallocalization" , url_params = url_params , ** kwargs )
Given an image returns a list of faces found within the image . For each face we return a dictionary containing the upper left corner and lower right corner . If crop is True the cropped face is included in the dictionary . Input should be in a numpy ndarray or a filename .
42,322
def hkdf ( master_secret , num_keys , hash_algo = 'SHA256' , salt = None , info = None , key_size = DEFAULT_KEY_SIZE ) : try : hash_function = _HASH_FUNCTIONS [ hash_algo ] except KeyError : msg = "unsupported hash function '{}'" . format ( hash_algo ) raise_from ( ValueError ( msg ) , None ) hkdf = HKDF ( algorithm = hash_function ( ) , length = num_keys * key_size , salt = salt , info = info , backend = default_backend ( ) ) keybytes = hkdf . derive ( master_secret ) keys = tuple ( keybytes [ i * key_size : ( i + 1 ) * key_size ] for i in range ( num_keys ) ) return keys
Executes the HKDF key derivation function as described in rfc5869 to derive num_keys keys of size key_size from the master_secret .
42,323
def validate_row_lengths ( fields , data ) : for i , row in enumerate ( data ) : if len ( fields ) != len ( row ) : msg = 'Row {} has {} entries when {} are expected.' . format ( i , len ( row ) , len ( fields ) ) raise FormatError ( msg )
Validate the data row lengths according to the specification in fields .
42,324
def validate_entries ( fields , data ) : validators = [ f . validate for f in fields ] for i , row in enumerate ( data ) : for entry , v in zip ( row , validators ) : try : v ( entry ) except InvalidEntryError as e : msg = ( 'Invalid entry in row {row_index}, column ' "'{column_name}'. {original_message}" ) . format ( row_index = i , column_name = cast ( FieldSpec , e . field_spec ) . identifier , original_message = e . args [ 0 ] ) e_invalid_entry = EntryError ( msg ) e_invalid_entry . field_spec = e . field_spec e_invalid_entry . row_index = i raise_from ( e_invalid_entry , e )
Validate the data entries according to the specification in fields .
42,325
def validate_header ( fields , column_names ) : if len ( fields ) != len ( column_names ) : msg = 'Header has {} columns when {} are expected.' . format ( len ( column_names ) , len ( fields ) ) raise FormatError ( msg ) for f , column in zip ( fields , column_names ) : if f . identifier != column : msg = "Column has identifier '{}' when '{}' is expected." . format ( column , f . identifier ) raise FormatError ( msg )
Validate the column_names according to the specification in fields .
42,326
def infer_partial_network ( regressor_type , regressor_kwargs , tf_matrix , tf_matrix_gene_names , target_gene_name , target_gene_expression , include_meta = False , early_stop_window_length = EARLY_STOP_WINDOW_LENGTH , seed = DEMON_SEED ) : def fn ( ) : ( clean_tf_matrix , clean_tf_matrix_gene_names ) = clean ( tf_matrix , tf_matrix_gene_names , target_gene_name ) try : trained_regressor = fit_model ( regressor_type , regressor_kwargs , clean_tf_matrix , target_gene_expression , early_stop_window_length , seed ) except ValueError as e : raise ValueError ( "Regression for target gene {0} failed. Cause {1}." . format ( target_gene_name , repr ( e ) ) ) links_df = to_links_df ( regressor_type , regressor_kwargs , trained_regressor , clean_tf_matrix_gene_names , target_gene_name ) if include_meta : meta_df = to_meta_df ( trained_regressor , target_gene_name ) return links_df , meta_df else : return links_df fallback_result = ( None , None ) if include_meta else None return retry ( fn , fallback_result = fallback_result , warning_msg = 'infer_data failed for target {0}' . format ( target_gene_name ) )
Ties together regressor model training with regulatory links and meta data extraction .
42,327
def create_graph ( expression_matrix , gene_names , tf_names , regressor_type , regressor_kwargs , client , target_genes = 'all' , limit = None , include_meta = False , early_stop_window_length = EARLY_STOP_WINDOW_LENGTH , repartition_multiplier = 1 , seed = DEMON_SEED ) : assert expression_matrix . shape [ 1 ] == len ( gene_names ) assert client , "client is required" tf_matrix , tf_matrix_gene_names = to_tf_matrix ( expression_matrix , gene_names , tf_names ) future_tf_matrix = client . scatter ( tf_matrix , broadcast = True ) [ future_tf_matrix_gene_names ] = client . scatter ( [ tf_matrix_gene_names ] , broadcast = True ) delayed_link_dfs = [ ] delayed_meta_dfs = [ ] for target_gene_index in target_gene_indices ( gene_names , target_genes ) : target_gene_name = delayed ( gene_names [ target_gene_index ] , pure = True ) target_gene_expression = delayed ( expression_matrix [ : , target_gene_index ] , pure = True ) if include_meta : delayed_link_df , delayed_meta_df = delayed ( infer_partial_network , pure = True , nout = 2 ) ( regressor_type , regressor_kwargs , future_tf_matrix , future_tf_matrix_gene_names , target_gene_name , target_gene_expression , include_meta , early_stop_window_length , seed ) if delayed_link_df is not None : delayed_link_dfs . append ( delayed_link_df ) delayed_meta_dfs . append ( delayed_meta_df ) else : delayed_link_df = delayed ( infer_partial_network , pure = True ) ( regressor_type , regressor_kwargs , future_tf_matrix , future_tf_matrix_gene_names , target_gene_name , target_gene_expression , include_meta , early_stop_window_length , seed ) if delayed_link_df is not None : delayed_link_dfs . append ( delayed_link_df ) all_links_df = from_delayed ( delayed_link_dfs , meta = _GRN_SCHEMA ) all_meta_df = from_delayed ( delayed_meta_dfs , meta = _META_SCHEMA ) if limit : maybe_limited_links_df = all_links_df . nlargest ( limit , columns = [ 'importance' ] ) else : maybe_limited_links_df = all_links_df n_parts = len ( client . ncores ( ) ) * repartition_multiplier if include_meta : return maybe_limited_links_df . repartition ( npartitions = n_parts ) , all_meta_df . repartition ( npartitions = n_parts ) else : return maybe_limited_links_df . repartition ( npartitions = n_parts )
Main API function . Create a Dask computation graph .
42,328
def generate ( self , ** options ) : if options . get ( 'unsafe' , False ) : return unsafe_url ( ** options ) else : return self . generate_new ( options )
Generates an encrypted URL with the specified options
42,329
def to_utf8 ( x ) : if isinstance ( x , basestring ) : return x . encode ( 'utf-8' ) if isinstance ( x , unicode ) else x try : l = iter ( x ) except TypeError : return x return [ to_utf8 ( i ) for i in l ]
Tries to utf - 8 encode x when possible
42,330
def signing_base ( self , request , consumer , token ) : sig = ( escape ( request . method ) , escape ( OAuthHook . get_normalized_url ( request . url ) ) , escape ( OAuthHook . get_normalized_parameters ( request ) ) , ) key = '%s&' % escape ( consumer . secret ) if token is not None : key += escape ( token . secret ) raw = '&' . join ( sig ) return key , raw
This method generates the OAuth signature . It s defined here to avoid circular imports .
42,331
def _split_url_string ( query_string ) : parameters = parse_qs ( to_utf8 ( query_string ) , keep_blank_values = True ) for k , v in parameters . iteritems ( ) : parameters [ k ] = urllib . unquote ( v [ 0 ] ) return parameters
Turns a query_string into a Python dictionary with unquoted values
42,332
def get_normalized_parameters ( request ) : if ( 'Content-Type' not in request . headers or request . headers . get ( 'Content-Type' ) . startswith ( 'application/x-www-form-urlencoded' ) ) and not isinstance ( request . data , basestring ) : data_and_params = dict ( request . data . items ( ) + request . params . items ( ) ) for key , value in data_and_params . items ( ) : request . data_and_params [ to_utf8 ( key ) ] = to_utf8 ( value ) if request . data_and_params . has_key ( 'oauth_signature' ) : del request . data_and_params [ 'oauth_signature' ] items = [ ] for key , value in request . data_and_params . iteritems ( ) : if isinstance ( value , basestring ) : items . append ( ( key , value ) ) else : try : value = list ( value ) except TypeError , e : assert 'is not iterable' in str ( e ) items . append ( ( key , value ) ) else : items . extend ( ( key , item ) for item in value ) query_string = urlparse ( request . url ) [ 4 ] items . extend ( [ ( to_utf8 ( k ) , to_utf8 ( v ) ) for k , v in OAuthHook . _split_url_string ( query_string ) . items ( ) ] ) items . sort ( ) return urllib . urlencode ( items ) . replace ( '+' , '%20' ) . replace ( '%7E' , '~' )
Returns a string that contains the parameters that must be signed . This function is called by SignatureMethod subclass CustomSignatureMethod_HMAC_SHA1
42,333
def get_normalized_url ( url ) : scheme , netloc , path , params , query , fragment = urlparse ( url ) if scheme == 'http' and netloc [ - 3 : ] == ':80' : netloc = netloc [ : - 3 ] elif scheme == 'https' and netloc [ - 4 : ] == ':443' : netloc = netloc [ : - 4 ] if scheme not in ( 'http' , 'https' ) : raise ValueError ( "Unsupported URL %s (%s)." % ( url , scheme ) ) return urlunparse ( ( scheme , netloc , path , None , None , None ) )
Returns a normalized url without params
42,334
def authorization_header ( oauth_params ) : authorization_headers = 'OAuth realm="",' authorization_headers += ',' . join ( [ '{0}="{1}"' . format ( k , urllib . quote ( str ( v ) ) ) for k , v in oauth_params . items ( ) ] ) return authorization_headers
Return Authorization header
42,335
def _prepare_input ( expression_data , gene_names , tf_names ) : if isinstance ( expression_data , pd . DataFrame ) : expression_matrix = expression_data . as_matrix ( ) gene_names = list ( expression_data . columns ) else : expression_matrix = expression_data assert expression_matrix . shape [ 1 ] == len ( gene_names ) if tf_names is None : tf_names = gene_names elif tf_names == 'all' : tf_names = gene_names else : if len ( tf_names ) == 0 : raise ValueError ( 'Specified tf_names is empty' ) if not set ( gene_names ) . intersection ( set ( tf_names ) ) : raise ValueError ( 'Intersection of gene_names and tf_names is empty.' ) return expression_matrix , gene_names , tf_names
Wrangle the inputs into the correct formats .
42,336
def _http_call ( self , url , method , ** kwargs ) : logging . debug ( "Request[{0}]: {1}" . format ( method , url ) ) start_time = datetime . datetime . now ( ) logging . debug ( "Header: {0}" . format ( kwargs [ 'headers' ] ) ) logging . debug ( "Params: {0}" . format ( kwargs [ 'data' ] ) ) response = requests . request ( method , url , verify = False , ** kwargs ) duration = datetime . datetime . now ( ) - start_time logging . debug ( "Response[{0:d}]: {1}, Duration: {2}.{3}s." . format ( response . status_code , response . reason , duration . seconds , duration . microseconds ) ) return response
Makes a http call . Logs response information .
42,337
def calculate_width_and_height ( url_parts , options ) : width = options . get ( 'width' , 0 ) has_width = width height = options . get ( 'height' , 0 ) has_height = height flip = options . get ( 'flip' , False ) flop = options . get ( 'flop' , False ) if flip : width = width * - 1 if flop : height = height * - 1 if not has_width and not has_height : if flip : width = "-0" if flop : height = "-0" if width or height : url_parts . append ( '%sx%s' % ( width , height ) )
Appends width and height information to url
42,338
def url_for ( ** options ) : url_parts = get_url_parts ( ** options ) image_hash = hashlib . md5 ( b ( options [ 'image_url' ] ) ) . hexdigest ( ) url_parts . append ( image_hash ) return "/" . join ( url_parts )
Returns the url for the specified options
42,339
def parse_gntp ( data , password = None ) : data = gntp . shim . u ( data ) match = GNTP_INFO_LINE_SHORT . match ( data ) if not match : raise errors . ParseError ( 'INVALID_GNTP_INFO' ) info = match . groupdict ( ) if info [ 'messagetype' ] == 'REGISTER' : return GNTPRegister ( data , password = password ) elif info [ 'messagetype' ] == 'NOTIFY' : return GNTPNotice ( data , password = password ) elif info [ 'messagetype' ] == 'SUBSCRIBE' : return GNTPSubscribe ( data , password = password ) elif info [ 'messagetype' ] == '-OK' : return GNTPOK ( data ) elif info [ 'messagetype' ] == '-ERROR' : return GNTPError ( data ) raise errors . ParseError ( 'INVALID_GNTP_MESSAGE' )
Attempt to parse a message as a GNTP message
42,340
def _parse_info ( self , data ) : match = GNTP_INFO_LINE . match ( data ) if not match : raise errors . ParseError ( 'ERROR_PARSING_INFO_LINE' ) info = match . groupdict ( ) if info [ 'encryptionAlgorithmID' ] == 'NONE' : info [ 'encryptionAlgorithmID' ] = None return info
Parse the first line of a GNTP message to get security and other info values
42,341
def set_password ( self , password , encryptAlgo = 'MD5' ) : if not password : self . info [ 'encryptionAlgorithmID' ] = None self . info [ 'keyHashAlgorithm' ] = None return self . password = gntp . shim . b ( password ) self . encryptAlgo = encryptAlgo . upper ( ) if not self . encryptAlgo in self . hash_algo : raise errors . UnsupportedError ( 'INVALID HASH "%s"' % self . encryptAlgo ) hashfunction = self . hash_algo . get ( self . encryptAlgo ) password = password . encode ( 'utf8' ) seed = time . ctime ( ) . encode ( 'utf8' ) salt = hashfunction ( seed ) . hexdigest ( ) saltHash = hashfunction ( seed ) . digest ( ) keyBasis = password + saltHash key = hashfunction ( keyBasis ) . digest ( ) keyHash = hashfunction ( key ) . hexdigest ( ) self . info [ 'keyHashAlgorithmID' ] = self . encryptAlgo self . info [ 'keyHash' ] = keyHash . upper ( ) self . info [ 'salt' ] = salt . upper ( )
Set a password for a GNTP Message
42,342
def _decode_hex ( self , value ) : result = '' for i in range ( 0 , len ( value ) , 2 ) : tmp = int ( value [ i : i + 2 ] , 16 ) result += chr ( tmp ) return result
Helper function to decode hex string to proper hex string
42,343
def _validate_password ( self , password ) : self . password = password if password is None : raise errors . AuthError ( 'Missing password' ) keyHash = self . info . get ( 'keyHash' , None ) if keyHash is None and self . password is None : return True if keyHash is None : raise errors . AuthError ( 'Invalid keyHash' ) if self . password is None : raise errors . AuthError ( 'Missing password' ) keyHashAlgorithmID = self . info . get ( 'keyHashAlgorithmID' , 'MD5' ) password = self . password . encode ( 'utf8' ) saltHash = self . _decode_hex ( self . info [ 'salt' ] ) keyBasis = password + saltHash self . key = self . hash_algo [ keyHashAlgorithmID ] ( keyBasis ) . digest ( ) keyHash = self . hash_algo [ keyHashAlgorithmID ] ( self . key ) . hexdigest ( ) if not keyHash . upper ( ) == self . info [ 'keyHash' ] . upper ( ) : raise errors . AuthError ( 'Invalid Hash' ) return True
Validate GNTP Message against stored password
42,344
def validate ( self ) : for header in self . _requiredHeaders : if not self . headers . get ( header , False ) : raise errors . ParseError ( 'Missing Notification Header: ' + header )
Verify required headers
42,345
def _format_info ( self ) : info = 'GNTP/%s %s' % ( self . info . get ( 'version' ) , self . info . get ( 'messagetype' ) , ) if self . info . get ( 'encryptionAlgorithmID' , None ) : info += ' %s:%s' % ( self . info . get ( 'encryptionAlgorithmID' ) , self . info . get ( 'ivValue' ) , ) else : info += ' NONE' if self . info . get ( 'keyHashAlgorithmID' , None ) : info += ' %s:%s.%s' % ( self . info . get ( 'keyHashAlgorithmID' ) , self . info . get ( 'keyHash' ) , self . info . get ( 'salt' ) ) return info
Generate info line for GNTP Message
42,346
def _parse_dict ( self , data ) : d = { } for line in data . split ( '\r\n' ) : match = GNTP_HEADER . match ( line ) if not match : continue key = match . group ( 1 ) . strip ( ) val = match . group ( 2 ) . strip ( ) d [ key ] = val return d
Helper function to parse blocks of GNTP headers into a dictionary
42,347
def add_resource ( self , data ) : data = gntp . shim . b ( data ) identifier = hashlib . md5 ( data ) . hexdigest ( ) self . resources [ identifier ] = data return 'x-growl-resource://%s' % identifier
Add binary resource
42,348
def decode ( self , data , password = None ) : self . password = password self . raw = gntp . shim . u ( data ) parts = self . raw . split ( '\r\n\r\n' ) self . info = self . _parse_info ( self . raw ) self . headers = self . _parse_dict ( parts [ 0 ] )
Decode GNTP Message
42,349
def validate ( self ) : for header in self . _requiredHeaders : if not self . headers . get ( header , False ) : raise errors . ParseError ( 'Missing Registration Header: ' + header ) for notice in self . notifications : for header in self . _requiredNotificationHeaders : if not notice . get ( header , False ) : raise errors . ParseError ( 'Missing Notification Header: ' + header )
Validate required headers and validate notification headers
42,350
def decode ( self , data , password ) : self . raw = gntp . shim . u ( data ) parts = self . raw . split ( '\r\n\r\n' ) self . info = self . _parse_info ( self . raw ) self . _validate_password ( password ) self . headers = self . _parse_dict ( parts [ 0 ] ) for i , part in enumerate ( parts ) : if i == 0 : continue if part . strip ( ) == '' : continue notice = self . _parse_dict ( part ) if notice . get ( 'Notification-Name' , False ) : self . notifications . append ( notice ) elif notice . get ( 'Identifier' , False ) : notice [ 'Data' ] = self . _decode_binary ( part , notice ) self . resources [ notice . get ( 'Identifier' ) ] = notice
Decode existing GNTP Registration message
42,351
def add_notification ( self , name , enabled = True ) : notice = { } notice [ 'Notification-Name' ] = name notice [ 'Notification-Enabled' ] = enabled self . notifications . append ( notice ) self . add_header ( 'Notifications-Count' , len ( self . notifications ) )
Add new Notification to Registration message
42,352
def encode ( self ) : buff = _GNTPBuffer ( ) buff . writeln ( self . _format_info ( ) ) for k , v in self . headers . items ( ) : buff . writeheader ( k , v ) buff . writeln ( ) if len ( self . notifications ) > 0 : for notice in self . notifications : for k , v in notice . items ( ) : buff . writeheader ( k , v ) buff . writeln ( ) for resource , data in self . resources . items ( ) : buff . writeheader ( 'Identifier' , resource ) buff . writeheader ( 'Length' , len ( data ) ) buff . writeln ( ) buff . write ( data ) buff . writeln ( ) buff . writeln ( ) return buff . getvalue ( )
Encode a GNTP Registration Message
42,353
def register ( self ) : logger . info ( 'Sending registration to %s:%s' , self . hostname , self . port ) register = gntp . core . GNTPRegister ( ) register . add_header ( 'Application-Name' , self . applicationName ) for notification in self . notifications : enabled = notification in self . defaultNotifications register . add_notification ( notification , enabled ) if self . applicationIcon : if self . _checkIcon ( self . applicationIcon ) : register . add_header ( 'Application-Icon' , self . applicationIcon ) else : resource = register . add_resource ( self . applicationIcon ) register . add_header ( 'Application-Icon' , resource ) if self . password : register . set_password ( self . password , self . passwordHash ) self . add_origin_info ( register ) self . register_hook ( register ) return self . _send ( 'register' , register )
Send GNTP Registration
42,354
def notify ( self , noteType , title , description , icon = None , sticky = False , priority = None , callback = None , identifier = None , custom = { } ) : logger . info ( 'Sending notification [%s] to %s:%s' , noteType , self . hostname , self . port ) assert noteType in self . notifications notice = gntp . core . GNTPNotice ( ) notice . add_header ( 'Application-Name' , self . applicationName ) notice . add_header ( 'Notification-Name' , noteType ) notice . add_header ( 'Notification-Title' , title ) if self . password : notice . set_password ( self . password , self . passwordHash ) if sticky : notice . add_header ( 'Notification-Sticky' , sticky ) if priority : notice . add_header ( 'Notification-Priority' , priority ) if icon : if self . _checkIcon ( icon ) : notice . add_header ( 'Notification-Icon' , icon ) else : resource = notice . add_resource ( icon ) notice . add_header ( 'Notification-Icon' , resource ) if description : notice . add_header ( 'Notification-Text' , description ) if callback : notice . add_header ( 'Notification-Callback-Target' , callback ) if identifier : notice . add_header ( 'Notification-Coalescing-ID' , identifier ) for key in custom : notice . add_header ( key , custom [ key ] ) self . add_origin_info ( notice ) self . notify_hook ( notice ) return self . _send ( 'notify' , notice )
Send a GNTP notifications
42,355
def subscribe ( self , id , name , port ) : sub = gntp . core . GNTPSubscribe ( ) sub . add_header ( 'Subscriber-ID' , id ) sub . add_header ( 'Subscriber-Name' , name ) sub . add_header ( 'Subscriber-Port' , port ) if self . password : sub . set_password ( self . password , self . passwordHash ) self . add_origin_info ( sub ) self . subscribe_hook ( sub ) return self . _send ( 'subscribe' , sub )
Send a Subscribe request to a remote machine
42,356
def add_origin_info ( self , packet ) : packet . add_header ( 'Origin-Machine-Name' , platform . node ( ) ) packet . add_header ( 'Origin-Software-Name' , 'gntp.py' ) packet . add_header ( 'Origin-Software-Version' , __version__ ) packet . add_header ( 'Origin-Platform-Name' , platform . system ( ) ) packet . add_header ( 'Origin-Platform-Version' , platform . platform ( ) )
Add optional Origin headers to message
42,357
def _send ( self , messagetype , packet ) : packet . validate ( ) data = packet . encode ( ) logger . debug ( 'To : %s:%s <%s>\n%s' , self . hostname , self . port , packet . __class__ , data ) s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM ) s . settimeout ( self . socketTimeout ) try : s . connect ( ( self . hostname , self . port ) ) s . send ( data ) recv_data = s . recv ( 1024 ) while not recv_data . endswith ( gntp . shim . b ( "\r\n\r\n" ) ) : recv_data += s . recv ( 1024 ) except socket . error : exc = sys . exc_info ( ) [ 1 ] raise errors . NetworkError ( exc ) response = gntp . core . parse_gntp ( recv_data ) s . close ( ) logger . debug ( 'From : %s:%s <%s>\n%s' , self . hostname , self . port , response . __class__ , response ) if type ( response ) == gntp . core . GNTPOK : return True logger . error ( 'Invalid response: %s' , response . error ( ) ) return response . error ( )
Send the GNTP Packet
42,358
def local_path ( force_download = False ) : return cache . fetch ( filename = ALLELE_XML_FILENAME , url = ALLELE_XML_URL , decompress = ALLELE_XML_DECOMPRESS , force = force_download )
Downloads allele database from IEDB returns local path to XML file .
42,359
def delete ( ) : path = cache . local_path ( filename = ALLELE_XML_FILENAME , url = ALLELE_XML_URL , decompress = ALLELE_XML_DECOMPRESS ) os . remove ( path )
Deletes local XML file
42,360
def load_alleles ( ) : result = [ ] path = local_path ( ) etree = xml . etree . ElementTree . parse ( path ) for allele in etree . iterfind ( "MhcAlleleName" ) : name_element = allele . find ( "DisplayedRestriction" ) mhc_class_element = allele . find ( "Class" ) if name_element is None or mhc_class_element is None : continue name = name_element . text synonyms = set ( [ ] ) for synonym_element in allele . iterfind ( "Synonyms" ) : for synonym in synonym_element . text . split ( "," ) : synonyms . add ( synonym . strip ( ) ) mhc_class = mhc_class_element . text organism_element = allele . find ( "Organsim" ) if organism_element is None : organism = None else : organism = organism_element . text locus_element = allele . find ( "Locus" ) if locus_element is None : locus = None else : locus = locus_element . text allele_object = Allele ( name = name , mhc_class = mhc_class , locus = locus , organism = organism , synonyms = synonyms ) result . append ( allele_object ) return result
Parses the IEDB MhcAlleleName XML file and returns a list of Allele namedtuple objects containing information about that each allele s HLA class and source organism .
42,361
def load_alleles_dict ( ) : alleles = load_alleles ( ) result = { } for allele in alleles : for name in { allele . name } . union ( allele . synonyms ) : result [ name ] = allele return result
Create a dictionary mapping each unique allele name to a namedtuple containing information about that alleles class locus species &c .
42,362
async def async_run_command ( self , command , retry = False ) : if not self . is_connected : await self . async_connect ( ) try : result = await asyncio . wait_for ( self . _client . run ( "%s && %s" % ( _PATH_EXPORT_COMMAND , command ) ) , 9 ) except asyncssh . misc . ChannelOpenError : if not retry : await self . async_connect ( ) return self . async_run_command ( command , retry = True ) else : self . _connected = False _LOGGER . error ( "No connection to host" ) return [ ] except TimeoutError : del self . _client self . _connected = False _LOGGER . error ( "Host timeout." ) return [ ] self . _connected = True return result . stdout . split ( '\n' )
Run commands through an SSH connection .
42,363
async def async_connect ( self ) : kwargs = { 'username' : self . _username if self . _username else None , 'client_keys' : [ self . _ssh_key ] if self . _ssh_key else None , 'port' : self . _port , 'password' : self . _password if self . _password else None , 'known_hosts' : None } self . _client = await asyncssh . connect ( self . _host , ** kwargs ) self . _connected = True
Fetches the client or creates a new one .
42,364
async def async_run_command ( self , command , first_try = True ) : await self . async_connect ( ) try : with ( await self . _io_lock ) : self . _writer . write ( '{}\n' . format ( "%s && %s" % ( _PATH_EXPORT_COMMAND , command ) ) . encode ( 'ascii' ) ) data = ( ( await asyncio . wait_for ( self . _reader . readuntil ( self . _prompt_string ) , 9 ) ) . split ( b'\n' ) [ 1 : - 1 ] ) except ( BrokenPipeError , LimitOverrunError ) : if first_try : return await self . async_run_command ( command , False ) else : _LOGGER . warning ( "connection is lost to host." ) return [ ] except TimeoutError : _LOGGER . error ( "Host timeout." ) return [ ] finally : self . _writer . close ( ) return [ line . decode ( 'utf-8' ) for line in data ]
Run a command through a Telnet connection . Connect to the Telnet server if not currently connected otherwise use the existing connection .
42,365
async def async_connect ( self ) : self . _reader , self . _writer = await asyncio . open_connection ( self . _host , self . _port ) with ( await self . _io_lock ) : try : await asyncio . wait_for ( self . _reader . readuntil ( b'login: ' ) , 9 ) except asyncio . streams . IncompleteReadError : _LOGGER . error ( "Unable to read from router on %s:%s" % ( self . _host , self . _port ) ) return except TimeoutError : _LOGGER . error ( "Host timeout." ) self . _writer . write ( ( self . _username + '\n' ) . encode ( 'ascii' ) ) await self . _reader . readuntil ( b'Password: ' ) self . _writer . write ( ( self . _password + '\n' ) . encode ( 'ascii' ) ) self . _prompt_string = ( await self . _reader . readuntil ( b'#' ) ) . split ( b'\n' ) [ - 1 ] self . _connected = True
Connect to the ASUS - WRT Telnet server .
42,366
async def _parse_lines ( lines , regex ) : results = [ ] if inspect . iscoroutinefunction ( lines ) : lines = await lines for line in lines : if line : match = regex . search ( line ) if not match : _LOGGER . debug ( "Could not parse row: %s" , line ) continue results . append ( match . groupdict ( ) ) return results
Parse the lines using the given regular expression .
42,367
async def async_get_connected_devices ( self ) : devices = { } dev = await self . async_get_wl ( ) devices . update ( dev ) dev = await self . async_get_arp ( ) devices . update ( dev ) dev = await self . async_get_neigh ( devices ) devices . update ( dev ) if not self . mode == 'ap' : dev = await self . async_get_leases ( devices ) devices . update ( dev ) ret_devices = { } for key in devices : if not self . require_ip or devices [ key ] . ip is not None : ret_devices [ key ] = devices [ key ] return ret_devices
Retrieve data from ASUSWRT .
42,368
async def async_get_current_transfer_rates ( self , use_cache = True ) : now = datetime . utcnow ( ) data = await self . async_get_bytes_total ( use_cache ) if self . _rx_latest is None or self . _tx_latest is None : self . _latest_transfer_check = now self . _rx_latest = data [ 0 ] self . _tx_latest = data [ 1 ] return self . _latest_transfer_data time_diff = now - self . _latest_transfer_check if time_diff . total_seconds ( ) < 30 : return self . _latest_transfer_data if data [ 0 ] < self . _rx_latest : rx = data [ 0 ] else : rx = data [ 0 ] - self . _rx_latest if data [ 1 ] < self . _tx_latest : tx = data [ 1 ] else : tx = data [ 1 ] - self . _tx_latest self . _latest_transfer_check = now self . _rx_latest = data [ 0 ] self . _tx_latest = data [ 1 ] self . _latest_transfer_data = ( math . ceil ( rx / time_diff . total_seconds ( ) ) if rx > 0 else 0 , math . ceil ( tx / time_diff . total_seconds ( ) ) if tx > 0 else 0 ) return self . _latest_transfer_data
Gets current transfer rates calculated in per second in bytes .
42,369
async def async_current_transfer_human_readable ( self , use_cache = True ) : rx , tx = await self . async_get_current_transfer_rates ( use_cache ) return "%s/s" % convert_size ( rx ) , "%s/s" % convert_size ( tx )
Gets current transfer rates in a human readable format .
42,370
def load_dataframe ( mhc_class = None , hla = None , exclude_hla = None , human_only = False , peptide_length = None , assay_method = None , assay_group = None , only_standard_amino_acids = True , reduced_alphabet = None , nrows = None ) : path = local_path ( ) df = pd . read_csv ( path , header = [ 0 , 1 ] , skipinitialspace = True , nrows = nrows , low_memory = False , error_bad_lines = False , encoding = "latin-1" ) print ( df . head ( ) ) df = df . dropna ( axis = 1 , how = "all" ) n = len ( df ) epitope_column_key = ( "Epitope" , "Description" ) mhc_allele_column_key = ( "MHC" , "Allele Name" ) assay_group_column_key = ( "Assay" , "Assay Group" ) assay_method_column_key = ( "Assay" , "Method/Technique" ) epitopes = df [ epitope_column_key ] . str . upper ( ) null_epitope_seq = epitopes . isnull ( ) n_null = null_epitope_seq . sum ( ) if n_null > 0 : logging . info ( "Dropping %d null sequences" , n_null ) mask = ~ null_epitope_seq if only_standard_amino_acids : bad_epitope_seq = epitopes . str . contains ( bad_amino_acids , na = False ) . astype ( "bool" ) n_bad = bad_epitope_seq . sum ( ) if n_bad > 0 : logging . info ( "Dropping %d bad sequences" , n_bad ) mask &= ~ bad_epitope_seq if human_only : organism = df [ 'Host Organism Name' ] mask &= organism . str . startswith ( 'Homo sapiens' , na = False ) . astype ( 'bool' ) mhc = df [ mhc_allele_column_key ] if mhc_class is not None : if mhc_class == 1 : mhc_class = "I" elif mhc_class == 2 : mhc_class = "II" if mhc_class not in { "I" , "II" } : raise ValueError ( "Invalid MHC class: %s" % mhc_class ) allele_dict = load_alleles_dict ( ) mhc_class_mask = [ False ] * len ( df ) for i , allele_name in enumerate ( mhc ) : allele_object = allele_dict . get ( allele_name ) if allele_object and allele_object . mhc_class == mhc_class : mhc_class_mask [ i ] = True mask &= np . array ( mhc_class_mask ) if hla : mask &= df [ mhc_allele_column_key ] . str . contains ( hla , na = False ) if exclude_hla : mask &= ~ ( df [ mhc_allele_column_key ] . str . contains ( exclude_hla , na = False ) ) if assay_group : mask &= df [ assay_group_column_key ] . str . contains ( assay_group ) if assay_method : mask &= df [ assay_method_column_key ] . str . contains ( assay_method ) if peptide_length : assert peptide_length > 0 mask &= df [ epitope_column_key ] . str . len ( ) == peptide_length df = df [ mask ] logging . info ( "Returning %d / %d entries after filtering" , len ( df ) , n ) return df
Load IEDB T - cell data without aggregating multiple entries for same epitope
42,371
def load_dataframe ( mhc_class = None , hla = None , exclude_hla = None , human_only = False , peptide_length = None , assay_method = None , assay_group = None , only_standard_amino_acids = True , reduced_alphabet = None , warn_bad_lines = True , nrows = None ) : df = pd . read_csv ( local_path ( ) , header = [ 0 , 1 ] , skipinitialspace = True , nrows = nrows , low_memory = False , error_bad_lines = False , encoding = "latin-1" , warn_bad_lines = warn_bad_lines ) df = df . dropna ( axis = 1 , how = "all" ) n = len ( df ) epitope_column_key = ( "Epitope" , "Description" ) mhc_allele_column_key = ( "MHC" , "Allele Name" ) epitopes = df [ epitope_column_key ] = df [ epitope_column_key ] . str . upper ( ) null_epitope_seq = epitopes . isnull ( ) n_null = null_epitope_seq . sum ( ) if n_null > 0 : logging . info ( "Dropping %d null sequences" , n_null ) mask = ~ null_epitope_seq if only_standard_amino_acids : bad_epitope_seq = epitopes . str . contains ( bad_amino_acids , na = False ) . astype ( "bool" ) n_bad = bad_epitope_seq . sum ( ) if n_bad > 0 : logging . info ( "Dropping %d bad sequences" , n_bad ) mask &= ~ bad_epitope_seq if human_only : mask &= df [ mhc_allele_column_key ] . str . startswith ( "HLA" ) . astype ( "bool" ) if mhc_class == 1 : mask &= df [ "MHC" ] [ "MHC allele class" ] == "I" elif mhc_class == 2 : mask &= df [ "MHC" ] [ "MHC allele class" ] == "II" if hla : mask &= df [ mhc_allele_column_key ] . str . contains ( hla , na = False ) if exclude_hla : mask &= ~ ( df [ mhc_allele_column_key ] . str . contains ( exclude_hla , na = False ) ) if assay_group : mask &= df [ "Assay" ] [ "Assay Group" ] . str . contains ( assay_group ) if assay_method : mask &= df [ "Assay" ] [ "Method/Technique" ] . str . contains ( assay_method ) if peptide_length : assert peptide_length > 0 mask &= df [ epitope_column_key ] . str . len ( ) == peptide_length df = df [ mask ] . copy ( ) logging . info ( "Returning %d / %d entries after filtering" , len ( df ) , n ) return df
Load IEDB MHC data without aggregating multiple entries for the same epitope
42,372
def get_fieldsets ( self , request , obj = None ) : fieldsets = list ( super ( CreateUpdateAdmin , self ) . get_fieldsets ( request = request , obj = obj ) ) fields = set ( ) to_add = set ( ) for fs in fieldsets : fields = fields . union ( fs [ 1 ] [ 'fields' ] ) for k , v in self . ownership_info [ 'fields' ] . items ( ) : if ( hasattr ( self . model , k ) and k not in fields and ( not self . exclude or ( self . exclude and k not in self . exclude ) ) ) : if ( 'readonly' in v and not v [ 'readonly' ] ) or obj : to_add . add ( k ) if len ( to_add ) > 0 : fieldsets . append ( ( self . ownership_info [ 'label' ] , { 'fields' : tuple ( to_add ) } ) ) return tuple ( fieldsets )
Add ownership info fields in fieldset with proper separation .
42,373
def get_readonly_fields ( self , request , obj = None ) : fields = list ( super ( CreateUpdateAdmin , self ) . get_readonly_fields ( request = request , obj = obj ) ) for k , v in self . ownership_info [ 'fields' ] . items ( ) : if ( hasattr ( self . model , k ) and ( 'readonly' in v and v [ 'readonly' ] ) and k not in fields and ( not self . exclude or ( self . exclude and k not in self . exclude ) ) ) : fields . append ( k ) return tuple ( fields )
Makes created_by create_date & update_date readonly when editing .
42,374
def get_authorization ( self , request ) : from django . utils . six import text_type from rest_framework import HTTP_HEADER_ENCODING auth = request . data . get ( self . key , b'' ) or request . META . get ( self . header_key , b'' ) if isinstance ( auth , text_type ) : auth = auth . encode ( HTTP_HEADER_ENCODING ) return auth
This function extracts the authorization JWT string . It first looks for specified key in header and then looks for the same in body part .
42,375
def main ( args = None ) : retcode = 0 try : ci = CliInterface ( ) args = ci . parser . parse_args ( ) result = args . func ( args ) if result is not None : print ( result ) retcode = 0 except Exception : retcode = 1 traceback . print_exc ( ) sys . exit ( retcode )
Call the CLI interface and wait for the result .
42,376
def write_toc ( self , args ) : ordered = False if args . ordered_list_marker is not None : list_marker = args . ordered_list_marker ordered = True elif args . unordered_list_marker is not None : list_marker = args . unordered_list_marker else : list_marker = md_parser [ args . parser ] [ 'list' ] [ 'unordered' ] [ 'default_marker' ] toc_struct = build_multiple_tocs ( filenames = args . filename , ordered = ordered , no_links = args . no_links , no_indentation = args . no_indentation , no_list_coherence = args . no_list_coherence , keep_header_levels = int ( args . header_levels ) , parser = args . parser , list_marker = list_marker ) if args . in_place : write_strings_on_files_between_markers ( filenames = args . filename , strings = toc_struct , marker = args . toc_marker ) else : for toc in toc_struct : print ( toc , end = '' )
Write the table of contents .
42,377
def patch ( func = None , obj = None , name = None , avoid_doublewrap = True ) : if obj is None : if isinstance ( func , ( type , ModuleType ) ) : obj = func func = None elif isinstance ( func , ( list , tuple ) ) and all ( [ isinstance ( i , ( ModuleType , type ) ) for i in func ] ) : obj = func func = None if func is None : return functools . partial ( patch , obj = obj , name = name , avoid_doublewrap = avoid_doublewrap ) if name is None : name = func . __name__ if isinstance ( obj , ( list , tuple ) ) and all ( [ isinstance ( i , ( ModuleType , type ) ) for i in obj ] ) : return [ patch ( func = func , obj = o , name = name , avoid_doublewrap = avoid_doublewrap ) for o in obj ] if not isinstance ( obj , ( ModuleType , type ) ) : raise ValueError ( "Argument passed to @patch decorator must be a " "class or module, or a list of classes and modules" ) try : call = getattr ( obj , name ) except AttributeError : raise TypeError ( "%(func_repr)s does not exist" % { 'func_repr' : '.' . join ( filter ( None , [ getattr ( obj , '__module__' , None ) , obj . __name__ , func . __name__ ] , ) ) , } ) if avoid_doublewrap and getattr ( call , 'wrapper' , None ) is func : return try : original_callable = six . get_method_function ( call ) except AttributeError : original_callable = call @ six . wraps ( func ) def wrapper ( * args , ** kwargs ) : return func ( original_callable , * args , ** kwargs ) wrapper . original = call wrapper . wrapper = func if six . PY2 and inspect . isclass ( obj ) : if hasattr ( call , 'im_self' ) : if call . im_self : wrapper = classmethod ( wrapper ) else : wrapper = staticmethod ( wrapper ) setattr ( obj , name , wrapper ) return getattr ( obj , name )
Decorator for monkeypatching functions on modules and classes .
42,378
def get_mobile_number ( mobile ) : blanks = [ ' ' , '.' , ',' , '(' , ')' , '-' ] for b in blanks : mobile = mobile . replace ( b , '' ) return mobile
Returns a mobile number after removing blanks
42,379
def has_object_permission ( self , request , view , obj ) : return ( request . user . is_superuser or super ( IAWPOrSuperuser , self ) . has_object_permission ( request = request , view = view , obj = obj ) )
Checks if user is superuser or it has permission over object
42,380
def write_string_on_file_between_markers ( filename : str , string : str , marker : str ) : r if filename == '-' : raise StdinIsNotAFileToBeWritten final_string = marker + '\n\n' + string . rstrip ( ) + '\n\n' + marker + '\n' marker_line_positions = fpyutils . get_line_matches ( filename , marker , 2 , loose_matching = True ) if 1 in marker_line_positions : if 2 in marker_line_positions : fpyutils . remove_line_interval ( filename , marker_line_positions [ 1 ] , marker_line_positions [ 2 ] , filename ) else : fpyutils . remove_line_interval ( filename , marker_line_positions [ 1 ] , marker_line_positions [ 1 ] , filename ) fpyutils . insert_string_at_line ( filename , final_string , marker_line_positions [ 1 ] , filename , append = False )
r Write the table of contents on a single file .
42,381
def write_strings_on_files_between_markers ( filenames : list , strings : list , marker : str ) : r assert len ( filenames ) == len ( strings ) if len ( filenames ) > 0 : for f in filenames : assert isinstance ( f , str ) if len ( strings ) > 0 : for s in strings : assert isinstance ( s , str ) file_id = 0 for f in filenames : write_string_on_file_between_markers ( f , strings [ file_id ] , marker ) file_id += 1
r Write the table of contents on multiple files .
42,382
def build_toc ( filename : str , ordered : bool = False , no_links : bool = False , no_indentation : bool = False , no_list_coherence : bool = False , keep_header_levels : int = 3 , parser : str = 'github' , list_marker : str = '-' ) -> str : r toc = str ( ) header_type_counter = dict ( ) header_type_curr = 0 header_type_prev = 0 header_duplicate_counter = dict ( ) no_of_indentation_spaces_prev = 0 if ordered : list_marker_log = build_list_marker_log ( parser , list_marker ) if filename == '-' : f = sys . stdin else : f = open ( filename , 'r' ) line = f . readline ( ) if ordered : list_marker_log = build_list_marker_log ( parser , list_marker ) else : list_marker_log = list ( ) is_within_code_fence = False code_fence = None is_document_end = False if not no_indentation and not no_list_coherence : indentation_list = build_indentation_list ( parser ) while line : if filename != '-' : file_pointer_pos = f . tell ( ) if f . readline ( ) == str ( ) : is_document_end = True f . seek ( file_pointer_pos ) if is_within_code_fence : is_within_code_fence = not is_closing_code_fence ( line , code_fence , is_document_end , parser ) line = f . readline ( ) else : code_fence = is_opening_code_fence ( line , parser ) if code_fence is not None : is_within_code_fence = True line = f . readline ( ) if not is_within_code_fence or code_fence is None : header = get_md_header ( line , header_duplicate_counter , keep_header_levels , parser , no_links ) if header is not None : header_type_curr = header [ 'type' ] if ordered : increase_index_ordered_list ( header_type_counter , header_type_prev , header_type_curr , parser ) index = header_type_counter [ header_type_curr ] else : index = 1 if no_indentation : no_of_indentation_spaces_curr = 0 else : if not no_list_coherence : if not toc_renders_as_coherent_list ( header_type_curr , indentation_list , parser ) : raise TocDoesNotRenderAsCoherentList no_of_indentation_spaces_curr = compute_toc_line_indentation_spaces ( header_type_curr , header_type_prev , no_of_indentation_spaces_prev , parser , ordered , list_marker , list_marker_log , index ) toc_line_no_indent = build_toc_line_without_indentation ( header , ordered , no_links , index , parser , list_marker ) toc += build_toc_line ( toc_line_no_indent , no_of_indentation_spaces_curr ) + '\n' header_type_prev = header_type_curr no_of_indentation_spaces_prev = no_of_indentation_spaces_curr line = f . readline ( ) f . close ( ) return toc
r Build the table of contents of a single file .
42,383
def build_multiple_tocs ( filenames : list , ordered : bool = False , no_links : bool = False , no_indentation : bool = False , no_list_coherence : bool = False , keep_header_levels : int = 3 , parser : str = 'github' , list_marker : str = '-' ) -> list : r if len ( filenames ) > 0 : for f in filenames : assert isinstance ( f , str ) if len ( filenames ) == 0 : filenames . append ( '-' ) file_id = 0 toc_struct = list ( ) while file_id < len ( filenames ) : toc_struct . append ( build_toc ( filenames [ file_id ] , ordered , no_links , no_indentation , no_list_coherence , keep_header_levels , parser , list_marker ) ) file_id += 1 return toc_struct
r Parse files by line and build the table of contents of each file .
42,384
def increase_index_ordered_list ( header_type_count : dict , header_type_prev : int , header_type_curr : int , parser : str = 'github' ) : r assert header_type_prev >= 0 assert header_type_curr >= 1 if header_type_prev == 0 : header_type_prev = header_type_curr if ( header_type_curr not in header_type_count or header_type_prev < header_type_curr ) : header_type_count [ header_type_curr ] = 0 header_type_count [ header_type_curr ] += 1 if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : if header_type_count [ header_type_curr ] > md_parser [ 'github' ] [ 'list' ] [ 'ordered' ] [ 'max_marker_number' ] : raise GithubOverflowOrderedListMarker
r Compute the current index for ordered list table of contents .
42,385
def build_list_marker_log ( parser : str = 'github' , list_marker : str = '.' ) -> list : r if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : assert list_marker in md_parser [ parser ] [ 'list' ] [ 'ordered' ] [ 'closing_markers' ] list_marker_log = list ( ) if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : list_marker_log = [ str ( md_parser [ 'github' ] [ 'list' ] [ 'ordered' ] [ 'min_marker_number' ] ) + list_marker for i in range ( 0 , md_parser [ 'github' ] [ 'header' ] [ 'max_levels' ] ) ] elif parser == 'redcarpet' : pass return list_marker_log
r Create a data structure that holds list marker information .
42,386
def compute_toc_line_indentation_spaces ( header_type_curr : int = 1 , header_type_prev : int = 0 , no_of_indentation_spaces_prev : int = 0 , parser : str = 'github' , ordered : bool = False , list_marker : str = '-' , list_marker_log : list = build_list_marker_log ( 'github' , '.' ) , index : int = 1 ) -> int : r assert header_type_curr >= 1 assert header_type_prev >= 0 assert no_of_indentation_spaces_prev >= 0 if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : if ordered : assert list_marker in md_parser [ parser ] [ 'list' ] [ 'ordered' ] [ 'closing_markers' ] else : assert list_marker in md_parser [ parser ] [ 'list' ] [ 'unordered' ] [ 'bullet_markers' ] if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : if ordered : assert len ( list_marker_log ) == md_parser [ 'github' ] [ 'header' ] [ 'max_levels' ] for e in list_marker_log : assert isinstance ( e , str ) assert index >= 1 if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : if header_type_prev == 0 : no_of_indentation_spaces_curr = 0 elif header_type_curr == header_type_prev : no_of_indentation_spaces_curr = no_of_indentation_spaces_prev else : if ordered : list_marker_prev = str ( list_marker_log [ header_type_curr - 1 ] ) else : list_marker_prev = list_marker if header_type_curr > header_type_prev : no_of_indentation_spaces_curr = ( no_of_indentation_spaces_prev + len ( list_marker_prev ) + len ( ' ' ) ) elif header_type_curr < header_type_prev : no_of_indentation_spaces_curr = ( no_of_indentation_spaces_prev - ( len ( list_marker_prev ) + len ( ' ' ) ) ) if ordered : for i in range ( ( header_type_curr - 1 ) + 1 , md_parser [ 'github' ] [ 'header' ] [ 'max_levels' ] ) : list_marker_log [ i ] = str ( md_parser [ 'github' ] [ 'list' ] [ 'ordered' ] [ 'min_marker_number' ] ) + list_marker if ordered : list_marker_log [ header_type_curr - 1 ] = str ( index ) + list_marker elif parser == 'redcarpet' : no_of_indentation_spaces_curr = 4 * ( header_type_curr - 1 ) return no_of_indentation_spaces_curr
r Compute the number of indentation spaces for the TOC list element .
42,387
def build_toc_line_without_indentation ( header : dict , ordered : bool = False , no_links : bool = False , index : int = 1 , parser : str = 'github' , list_marker : str = '-' ) -> str : r assert 'type' in header assert 'text_original' in header assert 'text_anchor_link' in header assert isinstance ( header [ 'type' ] , int ) assert isinstance ( header [ 'text_original' ] , str ) assert isinstance ( header [ 'text_anchor_link' ] , str ) assert header [ 'type' ] >= 1 assert index >= 1 if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : if ordered : assert list_marker in md_parser [ parser ] [ 'list' ] [ 'ordered' ] [ 'closing_markers' ] else : assert list_marker in md_parser [ parser ] [ 'list' ] [ 'unordered' ] [ 'bullet_markers' ] toc_line_no_indent = str ( ) if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : if ordered : list_marker = str ( index ) + list_marker if no_links : line = header [ 'text_original' ] else : line = '[' + header [ 'text_original' ] + ']' + '(#' + header [ 'text_anchor_link' ] + ')' toc_line_no_indent = list_marker + ' ' + line return toc_line_no_indent
r Return a list element of the table of contents .
42,388
def build_toc_line ( toc_line_no_indent : str , no_of_indentation_spaces : int = 0 ) -> str : r assert no_of_indentation_spaces >= 0 indentation = no_of_indentation_spaces * ' ' toc_line = indentation + toc_line_no_indent return toc_line
r Build the TOC line .
42,389
def build_anchor_link ( header_text_trimmed : str , header_duplicate_counter : str , parser : str = 'github' ) -> str : r if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : header_text_trimmed = header_text_trimmed . lower ( ) header_text_trimmed = re . sub ( r'[^\w\s\- ]' , '' , header_text_trimmed ) header_text_trimmed = header_text_trimmed . replace ( ' ' , '-' ) ht = header_text_trimmed if header_text_trimmed not in header_duplicate_counter : header_duplicate_counter [ header_text_trimmed ] = 0 if header_duplicate_counter [ header_text_trimmed ] > 0 : header_text_trimmed = header_text_trimmed + '-' + str ( header_duplicate_counter [ header_text_trimmed ] ) header_duplicate_counter [ ht ] += 1 return header_text_trimmed elif parser == 'redcarpet' : STRIPPED = " -&+$,/:;=?@\"#{}|^~[]`\\*()%.!'" header_text_trimmed_len = len ( header_text_trimmed ) inserted = 0 stripped = 0 header_text_trimmed_middle_stage = '' for i in range ( 0 , header_text_trimmed_len ) : if header_text_trimmed [ i ] == '<' : while i < header_text_trimmed_len and header_text_trimmed [ i ] != '>' : i += 1 elif header_text_trimmed [ i ] == '&' : while i < header_text_trimmed_len and header_text_trimmed [ i ] != ';' : i += 1 elif not curses . ascii . isascii ( header_text_trimmed [ i ] ) or STRIPPED . find ( header_text_trimmed [ i ] ) != - 1 : if inserted and not stripped : header_text_trimmed_middle_stage += '-' stripped = 1 else : header_text_trimmed_middle_stage += header_text_trimmed [ i ] . lower ( ) stripped = 0 inserted += 1 if stripped > 0 and inserted > 0 : header_text_trimmed_middle_stage = header_text_trimmed_middle_stage [ 0 : - 1 ] if inserted == 0 and header_text_trimmed_len > 0 : hash = 5381 for i in range ( 0 , header_text_trimmed_len ) : hash = ( ( hash << 5 ) + hash ) + ord ( header_text_trimmed [ i ] ) header_text_trimmed_middle_stage = 'part-' + '{0:x}' . format ( hash ) return header_text_trimmed_middle_stage
r Apply the specified slug rule to build the anchor link .
42,390
def get_md_header ( header_text_line : str , header_duplicate_counter : dict , keep_header_levels : int = 3 , parser : str = 'github' , no_links : bool = False ) -> dict : r result = get_atx_heading ( header_text_line , keep_header_levels , parser , no_links ) if result is None : return result else : header_type , header_text_trimmed = result header = { 'type' : header_type , 'text_original' : header_text_trimmed , 'text_anchor_link' : build_anchor_link ( header_text_trimmed , header_duplicate_counter , parser ) } return header
r Build a data structure with the elements needed to create a TOC line .
42,391
def is_valid_code_fence_indent ( line : str , parser : str = 'github' ) -> bool : r if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : return len ( line ) - len ( line . lstrip ( ' ' ) ) <= md_parser [ 'github' ] [ 'code fence' ] [ 'min_marker_characters' ] elif parser == 'redcarpet' : return False
r Determine if the given line has valid indentation for a code block fence .
42,392
def is_opening_code_fence ( line : str , parser : str = 'github' ) : r if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : markers = md_parser [ 'github' ] [ 'code fence' ] [ 'marker' ] marker_min_length = md_parser [ 'github' ] [ 'code fence' ] [ 'min_marker_characters' ] if not is_valid_code_fence_indent ( line ) : return None line = line . lstrip ( ' ' ) . rstrip ( '\n' ) if not line . startswith ( ( markers [ 0 ] * marker_min_length , markers [ 1 ] * marker_min_length ) ) : return None if line == len ( line ) * line [ 0 ] : info_string = str ( ) else : info_string = line . lstrip ( line [ 0 ] ) if markers [ 0 ] in info_string or markers [ 1 ] in info_string : return None if line . rstrip ( markers [ 0 ] ) != line and line . rstrip ( markers [ 1 ] ) != line : return None return line . rstrip ( info_string ) elif parser == 'redcarpet' : return None
r Determine if the given line is possibly the opening of a fenced code block .
42,393
def is_closing_code_fence ( line : str , fence : str , is_document_end : bool = False , parser : str = 'github' ) -> bool : r if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' ) : markers = md_parser [ 'github' ] [ 'code fence' ] [ 'marker' ] marker_min_length = md_parser [ 'github' ] [ 'code fence' ] [ 'min_marker_characters' ] if not is_valid_code_fence_indent ( line ) : return False fence = fence . lstrip ( ' ' ) if not fence . startswith ( ( markers [ 0 ] , markers [ 1 ] ) ) : return False if len ( fence ) < marker_min_length : return False fence = fence . rstrip ( '\n' ) . rstrip ( ' ' ) if fence != len ( fence ) * fence [ 0 ] : return False if is_document_end : return True line = line . lstrip ( ' ' ) if not line . startswith ( fence ) : return False line = line . rstrip ( '\n' ) . rstrip ( ' ' ) if len ( line ) < len ( fence ) : return False if line != len ( line ) * line [ 0 ] : return False return True elif parser == 'redcarpet' : return False
r Determine if the given line is the end of a fenced code block .
42,394
def build_indentation_list ( parser : str = 'github' ) : r indentation_list = list ( ) if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : for i in range ( 0 , md_parser [ parser ] [ 'header' ] [ 'max_levels' ] ) : indentation_list . append ( False ) return indentation_list
r Create a data structure that holds the state of indentations .
42,395
def toc_renders_as_coherent_list ( header_type_curr : int = 1 , indentation_list : list = build_indentation_list ( 'github' ) , parser : str = 'github' ) -> bool : r assert header_type_curr >= 1 if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : assert len ( indentation_list ) == md_parser [ parser ] [ 'header' ] [ 'max_levels' ] for e in indentation_list : assert isinstance ( e , bool ) renders_as_list = True if ( parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet' ) : indentation_list [ header_type_curr - 1 ] = True for i in range ( header_type_curr , md_parser [ 'github' ] [ 'header' ] [ 'max_levels' ] ) : indentation_list [ i ] = False i = header_type_curr - 1 while i >= 0 and indentation_list [ i ] : i -= 1 if i >= 0 : renders_as_list = False return renders_as_list
r Check if the TOC will render as a working list .
42,396
def is_owner ( self , user ) : if user . is_authenticated : return self . created_by . id == user . id return False
Checks if user is the owner of object
42,397
def run ( xml_report_dir , xml_report_filter = 'TEST-' , html_report_path = '.' , generate_exec_time_graphs = True , html_report_dir = 'report.th' , initial_java_heap_size = None , maximum_java_heap_size = None ) : cmd = [ ] cmd . append ( 'java' ) if initial_java_heap_size : cmd . append ( '-Xms{}' . format ( initial_java_heap_size ) ) if maximum_java_heap_size : cmd . append ( '-Xmx{}' . format ( maximum_java_heap_size ) ) cmd . append ( '-Dunitth.xml.report.filter={}' . format ( xml_report_filter ) ) cmd . append ( '-Dunitth.html.report.path={}' . format ( html_report_path ) ) cmd . append ( '-Dunitth.generate.exectimegraphs={}' . format ( '{}' . format ( generate_exec_time_graphs ) . lower ( ) ) ) cmd . append ( '-Dunitth.report.dir={}' . format ( html_report_dir ) ) cmd . append ( '-jar' ) cmd . append ( '"{}"' . format ( resource_filename ( 'unitth' , 'lib/unitth/unitth.jar' ) ) ) cmd . append ( xml_report_dir ) subprocess . check_call ( ' ' . join ( cmd ) , shell = True )
Use UnitTH to generate a test history report
42,398
def run_cmd ( call , cmd , * , echo = True , ** kwargs ) : if echo : print ( '$> ' + ' ' . join ( map ( pipes . quote , cmd ) ) ) return call ( cmd , ** kwargs )
Run a command and echo it first
42,399
def git_remote ( git_repo ) : github_token = os . getenv ( GITHUB_TOKEN_KEY ) if github_token : return 'https://{0}@github.com/{1}' . format ( github_token , git_repo ) return 'git@github.com:{0}' . format ( git_repo )
Return the URL for remote git repository .