idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
17,100
def register ( name , validator ) : if not isinstance ( validator , Validator ) : raise TypeError ( "Validator instance expected, %s given" % validator . __class__ ) _NAMED_VALIDATORS [ name ] = validator
Register a validator instance under the given name .
58
10
17,101
def accepts ( * * schemas ) : validate = parse ( schemas ) . validate @ decorator def validating ( func , * args , * * kwargs ) : validate ( inspect . getcallargs ( func , * args , * * kwargs ) , adapt = False ) return func ( * args , * * kwargs ) return validating
Create a decorator for validating function parameters .
76
10
17,102
def returns ( schema ) : validate = parse ( schema ) . validate @ decorator def validating ( func , * args , * * kwargs ) : ret = func ( * args , * * kwargs ) validate ( ret , adapt = False ) return ret return validating
Create a decorator for validating function return value .
59
11
17,103
def adapts ( * * schemas ) : validate = parse ( schemas ) . validate @ decorator def adapting ( func , * args , * * kwargs ) : adapted = validate ( inspect . getcallargs ( func , * args , * * kwargs ) , adapt = True ) argspec = inspect . getargspec ( func ) if argspec . varargs is argspec . keywords is None : # optimization for the common no varargs, no keywords case return func ( * * adapted ) adapted_varargs = adapted . pop ( argspec . varargs , ( ) ) adapted_keywords = adapted . pop ( argspec . keywords , { } ) if not adapted_varargs : # keywords only if adapted_keywords : adapted . update ( adapted_keywords ) return func ( * * adapted ) adapted_posargs = [ adapted [ arg ] for arg in argspec . args ] adapted_posargs . extend ( adapted_varargs ) return func ( * adapted_posargs , * * adapted_keywords ) return adapting
Create a decorator for validating and adapting function parameters .
222
12
17,104
def get_checksum_metadata_tag ( self ) : if not self . _checksums : print ( "Warning: No checksums have been computed for this file." ) return { str ( _hash_name ) : str ( _hash_value ) for _hash_name , _hash_value in self . _checksums . items ( ) }
Returns a map of checksum values by the name of the hashing function that produced it .
75
18
17,105
def compute_checksum ( self ) : if self . _filename . startswith ( "s3://" ) : print ( "Warning: Did not perform client-side checksumming for file in S3. To be implemented." ) pass else : checksumCalculator = self . ChecksumCalculator ( self . _filename ) self . _checksums = checksumCalculator . compute ( )
Calculates checksums for a given file .
88
10
17,106
def upload_files ( self , file_paths , file_size_sum = 0 , dcp_type = "data" , target_filename = None , use_transfer_acceleration = True , report_progress = False , sync = True ) : self . _setup_s3_agent_for_file_upload ( file_count = len ( file_paths ) , file_size_sum = file_size_sum , use_transfer_acceleration = use_transfer_acceleration ) pool = ThreadPool ( ) if report_progress : print ( "\nStarting upload of %s files to upload area %s" % ( len ( file_paths ) , self . uuid ) ) for file_path in file_paths : pool . add_task ( self . _upload_file , file_path , target_filename = target_filename , use_transfer_acceleration = use_transfer_acceleration , report_progress = report_progress , sync = sync ) pool . wait_for_completion ( ) if report_progress : number_of_errors = len ( self . s3agent . failed_uploads ) if number_of_errors == 0 : print ( "Completed upload of %d files to upload area %s\n" % ( self . s3agent . file_upload_completed_count , self . uuid ) ) else : error = "\nThe following files failed:" for k , v in self . s3agent . failed_uploads . items ( ) : error += "\n%s: [Exception] %s" % ( k , v ) error += "\nPlease retry or contact an hca administrator at data-help@humancellatlas.org for help.\n" raise UploadException ( error )
A function that takes in a list of file paths and other optional args for parallel file upload
386
18
17,107
def validation_status ( self , filename ) : return self . upload_service . api_client . validation_status ( area_uuid = self . uuid , filename = filename )
Get status and results of latest validation job for a file .
39
12
17,108
def _item_exists_in_bucket ( self , bucket , key , checksums ) : try : obj = self . target_s3 . meta . client . head_object ( Bucket = bucket , Key = key ) if obj and obj . containsKey ( 'Metadata' ) : if obj [ 'Metadata' ] == checksums : return True except ClientError : # An exception from calling `head_object` indicates that no file with the specified name could be found # in the specified bucket. return False
Returns true if the key already exists in the current bucket and the clientside checksum matches the file s checksums and false otherwise .
110
27
17,109
def upload_to_cloud ( file_handles , staging_bucket , replica , from_cloud = False ) : s3 = boto3 . resource ( "s3" ) file_uuids = [ ] key_names = [ ] abs_file_paths = [ ] if from_cloud : file_uuids , key_names = _copy_from_s3 ( file_handles [ 0 ] , s3 ) else : destination_bucket = s3 . Bucket ( staging_bucket ) for raw_fh in file_handles : file_size = os . path . getsize ( raw_fh . name ) multipart_chunksize = s3_multipart . get_s3_multipart_chunk_size ( file_size ) tx_cfg = TransferConfig ( multipart_threshold = s3_multipart . MULTIPART_THRESHOLD , multipart_chunksize = multipart_chunksize ) with ChecksummingBufferedReader ( raw_fh , multipart_chunksize ) as fh : file_uuid = str ( uuid . uuid4 ( ) ) key_name = "{}/{}" . format ( file_uuid , os . path . basename ( fh . raw . name ) ) destination_bucket . upload_fileobj ( fh , key_name , Config = tx_cfg , ExtraArgs = { 'ContentType' : _mime_type ( fh . raw . name ) , } ) sums = fh . get_checksums ( ) metadata = { "hca-dss-s3_etag" : sums [ "s3_etag" ] , "hca-dss-sha1" : sums [ "sha1" ] , "hca-dss-sha256" : sums [ "sha256" ] , "hca-dss-crc32c" : sums [ "crc32c" ] , } s3 . meta . client . put_object_tagging ( Bucket = destination_bucket . name , Key = key_name , Tagging = dict ( TagSet = encode_tags ( metadata ) ) ) file_uuids . append ( file_uuid ) key_names . append ( key_name ) abs_file_paths . append ( fh . raw . name ) return file_uuids , key_names , abs_file_paths
Upload files to cloud .
537
5
17,110
def download ( self , bundle_uuid , replica , version = "" , download_dir = "" , metadata_files = ( '*' , ) , data_files = ( '*' , ) , num_retries = 10 , min_delay_seconds = 0.25 ) : errors = 0 with concurrent . futures . ThreadPoolExecutor ( self . threads ) as executor : futures_to_dss_file = { executor . submit ( task ) : dss_file for dss_file , task in self . _download_tasks ( bundle_uuid , replica , version , download_dir , metadata_files , data_files , num_retries , min_delay_seconds ) } for future in concurrent . futures . as_completed ( futures_to_dss_file ) : dss_file = futures_to_dss_file [ future ] try : future . result ( ) except Exception as e : errors += 1 logger . warning ( 'Failed to download file %s version %s from replica %s' , dss_file . uuid , dss_file . version , dss_file . replica , exc_info = e ) if errors : raise RuntimeError ( '{} file(s) failed to download' . format ( errors ) )
Download a bundle and save it to the local filesystem as a directory .
280
14
17,111
def _download_file ( self , dss_file , dest_path , num_retries = 10 , min_delay_seconds = 0.25 ) : directory , _ = os . path . split ( dest_path ) if directory : try : os . makedirs ( directory ) except OSError as e : if e . errno != errno . EEXIST : raise with atomic_write ( dest_path , mode = "wb" , overwrite = True ) as fh : if dss_file . size == 0 : return download_hash = self . _do_download_file ( dss_file , fh , num_retries , min_delay_seconds ) if download_hash . lower ( ) != dss_file . sha256 . lower ( ) : # No need to delete what's been written. atomic_write ensures we're cleaned up logger . error ( "%s" , "File {}: GET FAILED. Checksum mismatch." . format ( dss_file . uuid ) ) raise ValueError ( "Expected sha256 {} Received sha256 {}" . format ( dss_file . sha256 . lower ( ) , download_hash . lower ( ) ) )
Attempt to download the data . If a retryable exception occurs we wait a bit and retry again . The delay increases each time we fail and decreases each time we successfully read a block . We set a quota for the number of failures that goes up with every successful block read and down with each failure .
265
62
17,112
def _do_download_file ( self , dss_file , fh , num_retries , min_delay_seconds ) : hasher = hashlib . sha256 ( ) delay = min_delay_seconds retries_left = num_retries while True : try : response = self . get_file . _request ( dict ( uuid = dss_file . uuid , version = dss_file . version , replica = dss_file . replica ) , stream = True , headers = { 'Range' : "bytes={}-" . format ( fh . tell ( ) ) } , ) try : if not response . ok : logger . error ( "%s" , "File {}: GET FAILED." . format ( dss_file . uuid ) ) logger . error ( "%s" , "Response: {}" . format ( response . text ) ) break consume_bytes = int ( fh . tell ( ) ) server_start = 0 content_range_header = response . headers . get ( 'Content-Range' , None ) if content_range_header is not None : cre = re . compile ( "bytes (\d+)-(\d+)" ) mo = cre . search ( content_range_header ) if mo is not None : server_start = int ( mo . group ( 1 ) ) consume_bytes -= server_start assert consume_bytes >= 0 if server_start > 0 and consume_bytes == 0 : logger . info ( "%s" , "File {}: Resuming at {}." . format ( dss_file . uuid , server_start ) ) elif consume_bytes > 0 : logger . info ( "%s" , "File {}: Resuming at {}. Dropping {} bytes to match" . format ( dss_file . uuid , server_start , consume_bytes ) ) while consume_bytes > 0 : bytes_to_read = min ( consume_bytes , 1024 * 1024 ) content = response . iter_content ( chunk_size = bytes_to_read ) chunk = next ( content ) if chunk : consume_bytes -= len ( chunk ) for chunk in response . iter_content ( chunk_size = 1024 * 1024 ) : if chunk : fh . write ( chunk ) hasher . update ( chunk ) retries_left = min ( retries_left + 1 , num_retries ) delay = max ( delay / 2 , min_delay_seconds ) break finally : response . close ( ) except ( ChunkedEncodingError , ConnectionError , ReadTimeout ) : if retries_left > 0 : logger . info ( "%s" , "File {}: GET FAILED. Attempting to resume." . format ( dss_file . uuid ) ) time . sleep ( delay ) delay *= 2 retries_left -= 1 continue raise return hasher . hexdigest ( )
Abstracts away complications for downloading a file handles retries and delays and computes its hash
620
18
17,113
def _write_output_manifest ( self , manifest , filestore_root ) : output = os . path . basename ( manifest ) fieldnames , source_manifest = self . _parse_manifest ( manifest ) if 'file_path' not in fieldnames : fieldnames . append ( 'file_path' ) with atomic_write ( output , overwrite = True ) as f : delimiter = b'\t' if USING_PYTHON2 else '\t' writer = csv . DictWriter ( f , fieldnames , delimiter = delimiter , quoting = csv . QUOTE_NONE ) writer . writeheader ( ) for row in source_manifest : row [ 'file_path' ] = self . _file_path ( row [ 'file_sha256' ] , filestore_root ) writer . writerow ( row ) if os . path . isfile ( output ) : logger . warning ( 'Overwriting manifest %s' , output ) logger . info ( 'Rewrote manifest %s with additional column containing path to downloaded files.' , output )
Adds the file path column to the manifest and writes the copy to the current directory . If the original manifest is in the current directory it is overwritten with a warning .
238
34
17,114
def hardlink ( source , link_name ) : if sys . version_info < ( 3 , ) and platform . system ( ) == 'Windows' : # pragma: no cover import ctypes create_hard_link = ctypes . windll . kernel32 . CreateHardLinkW create_hard_link . argtypes = [ ctypes . c_wchar_p , ctypes . c_wchar_p , ctypes . c_void_p ] create_hard_link . restype = ctypes . wintypes . BOOL res = create_hard_link ( link_name , source , None ) if res == 0 : raise ctypes . WinError ( ) else : try : os . link ( source , link_name ) except OSError as e : if e . errno != errno . EEXIST : raise else : # It's possible that the user created a different file with the same name as the # one we're trying to download. Thus we need to check the if the inode is different # and raise an error in this case. source_stat = os . stat ( source ) dest_stat = os . stat ( link_name ) # Check device first because different drives can have the same inode number if source_stat . st_dev != dest_stat . st_dev or source_stat . st_ino != dest_stat . st_ino : raise
Create a hardlink in a portable way
301
8
17,115
def request_with_retries_on_post_search ( self , session , url , query , json_input , stream , headers ) : # TODO: Revert this PR as soon as the appropriate swagger definitions have percolated up # to prod and merged; see https://github.com/HumanCellAtlas/data-store/pull/1961 status_code = 500 if '/v1/search' in url : retry_count = 10 else : retry_count = 1 while status_code in ( 500 , 502 , 503 , 504 ) and retry_count > 0 : try : retry_count -= 1 res = session . request ( self . http_method , url , params = query , json = json_input , stream = stream , headers = headers , timeout = self . client . timeout_policy ) status_code = res . status_code except SwaggerAPIException : if retry_count > 0 : pass else : raise return res
Submit a request and retry POST search requests specifically .
208
11
17,116
def refresh_swagger ( self ) : try : os . remove ( self . _get_swagger_filename ( self . swagger_url ) ) except EnvironmentError as e : logger . warn ( os . strerror ( e . errno ) ) else : self . __init__ ( )
Manually refresh the swagger document . This can help resolve errors communicate with the API .
64
18
17,117
def add_area ( self , uri ) : if uri . area_uuid not in self . _config . upload . areas : self . _config . upload . areas [ uri . area_uuid ] = { 'uri' : uri . uri } self . save ( )
Record information about a new Upload Area
64
7
17,118
def select_area ( self , area_uuid ) : self . _config . upload . current_area = area_uuid self . save ( )
Update the current area to be the area with this UUID .
33
13
17,119
def create_area ( self , area_uuid ) : response = self . _make_request ( 'post' , path = "/area/{id}" . format ( id = area_uuid ) , headers = { 'Api-Key' : self . auth_token } ) return response . json ( )
Create an Upload Area
68
4
17,120
def area_exists ( self , area_uuid ) : response = requests . head ( self . _url ( path = "/area/{id}" . format ( id = area_uuid ) ) ) return response . ok
Check if an Upload Area exists
49
6
17,121
def delete_area ( self , area_uuid ) : self . _make_request ( 'delete' , path = "/area/{id}" . format ( id = area_uuid ) , headers = { 'Api-Key' : self . auth_token } ) return True
Delete an Upload Area
62
4
17,122
def credentials ( self , area_uuid ) : response = self . _make_request ( "post" , path = "/area/{uuid}/credentials" . format ( uuid = area_uuid ) ) return response . json ( )
Get AWS credentials required to directly upload files to Upload Area in S3
56
14
17,123
def file_upload_notification ( self , area_uuid , filename ) : url_safe_filename = urlparse . quote ( filename ) path = ( "/area/{area_uuid}/{filename}" . format ( area_uuid = area_uuid , filename = url_safe_filename ) ) response = self . _make_request ( 'post' , path = path ) return response . ok
Notify Upload Service that a file has been placed in an Upload Area
90
14
17,124
def files_info ( self , area_uuid , file_list ) : path = "/area/{uuid}/files_info" . format ( uuid = area_uuid ) file_list = [ urlparse . quote ( filename ) for filename in file_list ] response = self . _make_request ( 'put' , path = path , json = file_list ) return response . json ( )
Get information about files
90
4
17,125
def validation_statuses ( self , area_uuid ) : path = "/area/{uuid}/validations" . format ( uuid = area_uuid ) result = self . _make_request ( 'get' , path ) return result . json ( )
Get count of validation statuses for all files in upload_area
59
13
17,126
def language_name ( self , text : str ) -> str : values = extract ( text ) input_fn = _to_func ( ( [ values ] , [ ] ) ) pos : int = next ( self . _classifier . predict_classes ( input_fn = input_fn ) ) LOGGER . debug ( "Predicted language position %s" , pos ) return sorted ( self . languages ) [ pos ]
Predict the programming language name of the given source code .
89
12
17,127
def scores ( self , text : str ) -> Dict [ str , float ] : values = extract ( text ) input_fn = _to_func ( ( [ values ] , [ ] ) ) prediction = self . _classifier . predict_proba ( input_fn = input_fn ) probabilities = next ( prediction ) . tolist ( ) sorted_languages = sorted ( self . languages ) return dict ( zip ( sorted_languages , probabilities ) )
A score for each language corresponding to the probability that the text is written in the given language . The score is a float value between 0 . 0 and 1 . 0
98
33
17,128
def probable_languages ( self , text : str , max_languages : int = 3 ) -> Tuple [ str , ... ] : scores = self . scores ( text ) # Sorted from the most probable language to the least probable sorted_scores = sorted ( scores . items ( ) , key = itemgetter ( 1 ) , reverse = True ) languages , probabilities = list ( zip ( * sorted_scores ) ) # Find the most distant consecutive languages. # A logarithmic scale is used here because the probabilities # are most of the time really close to zero rescaled_probabilities = [ log ( proba ) for proba in probabilities ] distances = [ rescaled_probabilities [ pos ] - rescaled_probabilities [ pos + 1 ] for pos in range ( len ( rescaled_probabilities ) - 1 ) ] max_distance_pos = max ( enumerate ( distances , 1 ) , key = itemgetter ( 1 ) ) [ 0 ] limit = min ( max_distance_pos , max_languages ) return languages [ : limit ]
List of most probable programming languages the list is ordered from the most probable to the least probable one .
232
20
17,129
def learn ( self , input_dir : str ) -> float : if self . is_default : LOGGER . error ( "Cannot learn using default model" ) raise GuesslangError ( 'Cannot learn using default "readonly" model' ) languages = self . languages LOGGER . info ( "Extract training data" ) extensions = [ ext for exts in languages . values ( ) for ext in exts ] files = search_files ( input_dir , extensions ) nb_files = len ( files ) chunk_size = min ( int ( CHUNK_PROPORTION * nb_files ) , CHUNK_SIZE ) LOGGER . debug ( "Evaluation files count: %d" , chunk_size ) LOGGER . debug ( "Training files count: %d" , nb_files - chunk_size ) batches = _pop_many ( files , chunk_size ) LOGGER . debug ( "Prepare evaluation data" ) evaluation_data = extract_from_files ( next ( batches ) , languages ) LOGGER . debug ( "Evaluation data count: %d" , len ( evaluation_data [ 0 ] ) ) accuracy = 0 total = ceil ( nb_files / chunk_size ) - 1 LOGGER . info ( "Start learning" ) for pos , training_files in enumerate ( batches , 1 ) : LOGGER . info ( "Step %.2f%%" , 100 * pos / total ) LOGGER . debug ( "Training data extraction" ) training_data = extract_from_files ( training_files , languages ) LOGGER . debug ( "Training data count: %d" , len ( training_data [ 0 ] ) ) steps = int ( FITTING_FACTOR * len ( training_data [ 0 ] ) / 100 ) LOGGER . debug ( "Fitting, steps count: %d" , steps ) self . _classifier . fit ( input_fn = _to_func ( training_data ) , steps = steps ) LOGGER . debug ( "Evaluation" ) accuracy = self . _classifier . evaluate ( input_fn = _to_func ( evaluation_data ) , steps = 1 ) [ 'accuracy' ] _comment ( accuracy ) return accuracy
Learn languages features from source files .
485
7
17,130
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'reportfile' , type = argparse . FileType ( 'r' ) , help = "test report file generated by `guesslang --test TESTDIR`" ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) report = json . load ( args . reportfile ) graph_data = _build_graph ( report ) index_path = _prepare_resources ( graph_data ) webbrowser . open ( str ( index_path ) )
Report graph creator command line
164
5
17,131
def search_files ( source : str , extensions : List [ str ] ) -> List [ Path ] : files = [ path for path in Path ( source ) . glob ( '**/*' ) if path . is_file ( ) and path . suffix . lstrip ( '.' ) in extensions ] nb_files = len ( files ) LOGGER . debug ( "Total files found: %d" , nb_files ) if nb_files < NB_FILES_MIN : LOGGER . error ( "Too few source files" ) raise GuesslangError ( '{} source files found in {}. {} files minimum is required' . format ( nb_files , source , NB_FILES_MIN ) ) random . shuffle ( files ) return files
Retrieve files located the source directory and its subdirectories whose extension match one of the listed extensions .
162
21
17,132
def extract_from_files ( files : List [ Path ] , languages : Dict [ str , List [ str ] ] ) -> DataSet : enumerator = enumerate ( sorted ( languages . items ( ) ) ) rank_map = { ext : rank for rank , ( _ , exts ) in enumerator for ext in exts } with multiprocessing . Pool ( initializer = _process_init ) as pool : file_iterator = ( ( path , rank_map ) for path in files ) arrays = _to_arrays ( pool . starmap ( _extract_features , file_iterator ) ) LOGGER . debug ( "Extracted arrays count: %d" , len ( arrays [ 0 ] ) ) return arrays
Extract arrays of features from the given files .
158
10
17,133
def safe_read_file ( file_path : Path ) -> str : for encoding in FILE_ENCODINGS : try : return file_path . read_text ( encoding = encoding ) except UnicodeError : pass # Ignore encoding error raise GuesslangError ( 'Encoding not supported for {!s}' . format ( file_path ) )
Read a text file . Several text encodings are tried until the file content is correctly decoded .
73
21
17,134
def config_logging ( debug : bool = False ) -> None : if debug : level = 'DEBUG' tf_level = tf . logging . INFO else : level = 'INFO' tf_level = tf . logging . ERROR logging_config = config_dict ( 'logging.json' ) for logger in logging_config [ 'loggers' ] . values ( ) : logger [ 'level' ] = level logging . config . dictConfig ( logging_config ) tf . logging . set_verbosity ( tf_level )
Set - up application and tensorflow logging .
112
10
17,135
def config_dict ( name : str ) -> Dict [ str , Any ] : try : content = resource_string ( PACKAGE , DATADIR . format ( name ) ) . decode ( ) except DistributionNotFound as error : LOGGER . warning ( "Cannot load %s from packages: %s" , name , error ) content = DATA_FALLBACK . joinpath ( name ) . read_text ( ) return cast ( Dict [ str , Any ] , json . loads ( content ) )
Load a JSON configuration dict from Guesslang config directory .
109
11
17,136
def model_info ( model_dir : Optional [ str ] = None ) -> Tuple [ str , bool ] : if model_dir is None : try : model_dir = resource_filename ( PACKAGE , DATADIR . format ( 'model' ) ) except DistributionNotFound as error : LOGGER . warning ( "Cannot load model from packages: %s" , error ) model_dir = str ( DATA_FALLBACK . joinpath ( 'model' ) . absolute ( ) ) is_default_model = True else : is_default_model = False model_path = Path ( model_dir ) model_path . mkdir ( exist_ok = True ) LOGGER . debug ( "Using model: %s, default: %s" , model_path , is_default_model ) return ( model_dir , is_default_model )
Retrieve Guesslang model directory name and tells if it is the default model .
186
16
17,137
def format ( self , record : logging . LogRecord ) -> str : if platform . system ( ) != 'Linux' : # Avoid funny logs on Windows & MacOS return super ( ) . format ( record ) record . msg = ( self . STYLE [ record . levelname ] + record . msg + self . STYLE [ 'END' ] ) record . levelname = ( self . STYLE [ 'LEVEL' ] + record . levelname + self . STYLE [ 'END' ] ) return super ( ) . format ( record )
Format log records to produce colored messages .
119
8
17,138
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'githubtoken' , help = "Github OAuth token, see https://developer.github.com/v3/oauth/" ) parser . add_argument ( 'destination' , help = "location of the downloaded repos" ) parser . add_argument ( '-n' , '--nbrepo' , help = "number of repositories per language" , type = int , default = 1000 ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) destination = Path ( args . destination ) nb_repos = args . nbrepo token = args . githubtoken languages = config_dict ( 'languages.json' ) destination . mkdir ( exist_ok = True ) for pos , language in enumerate ( sorted ( languages ) , 1 ) : LOGGER . info ( "Step %.2f%%, %s" , 100 * pos / len ( languages ) , language ) LOGGER . info ( "Fetch %d repos infos for language %s" , nb_repos , language ) repos = _retrieve_repo_details ( language , nb_repos , token ) LOGGER . info ( "%d repos details kept. Downloading" , len ( repos ) ) _download_repos ( language , repos , destination ) LOGGER . info ( "Language %s repos downloaded" , language ) LOGGER . debug ( "Exit OK" )
Github repositories downloaded command line
372
6
17,139
def retry ( default = None ) : def decorator ( func ) : """Retry decorator""" @ functools . wraps ( func ) def _wrapper ( * args , * * kw ) : for pos in range ( 1 , MAX_RETRIES ) : try : return func ( * args , * * kw ) except ( RuntimeError , requests . ConnectionError ) as error : LOGGER . warning ( "Failed: %s, %s" , type ( error ) , error ) # Wait a bit before retrying for _ in range ( pos ) : _rest ( ) LOGGER . warning ( "Request Aborted" ) return default return _wrapper return decorator
Retry functions after failures
145
5
17,140
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ ) parser . add_argument ( 'learn' , help = "learning source codes directory" ) parser . add_argument ( 'keywords' , help = "output keywords file, JSON" ) parser . add_argument ( '-n' , '--nbkeywords' , type = int , default = 10000 , help = "the number of keywords to keep" ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) learn_path = Path ( args . learn ) keywords_path = Path ( args . keywords ) nb_keywords = args . nbkeywords languages = config_dict ( 'languages.json' ) exts = { ext : lang for lang , exts in languages . items ( ) for ext in exts } term_count = Counter ( ) document_count = Counter ( ) pos = 0 LOGGER . info ( "Reading files form %s" , learn_path ) for pos , path in enumerate ( Path ( learn_path ) . glob ( '**/*' ) , 1 ) : if pos % STEP == 0 : LOGGER . debug ( "Processed %d" , pos ) gc . collect ( ) # Cleanup dirt if not path . is_file ( ) or not exts . get ( path . suffix . lstrip ( '.' ) ) : continue counter = _extract ( path ) term_count . update ( counter ) document_count . update ( counter . keys ( ) ) nb_terms = sum ( term_count . values ( ) ) nb_documents = pos - 1 if not nb_documents : LOGGER . error ( "No source files found in %s" , learn_path ) raise RuntimeError ( 'No source files in {}' . format ( learn_path ) ) LOGGER . info ( "%d unique terms found" , len ( term_count ) ) terms = _most_frequent ( ( term_count , nb_terms ) , ( document_count , nb_documents ) , nb_keywords ) keywords = { token : int ( hashlib . sha1 ( token . encode ( ) ) . hexdigest ( ) , 16 ) for token in terms } with keywords_path . open ( 'w' ) as keywords_file : json . dump ( keywords , keywords_file , indent = 2 , sort_keys = True ) LOGGER . info ( "%d keywords written into %s" , len ( keywords ) , keywords_path ) LOGGER . debug ( "Exit OK" )
Keywords generator command line
596
5
17,141
def main ( ) -> None : try : _real_main ( ) except GuesslangError as error : LOGGER . critical ( "Failed: %s" , error ) sys . exit ( - 1 ) except KeyboardInterrupt : LOGGER . critical ( "Cancelled!" ) sys . exit ( - 2 )
Run command line
67
3
17,142
def split ( text : str ) -> List [ str ] : return [ word for word in SEPARATOR . split ( text ) if word . strip ( ' \t' ) ]
Split a text into a list of tokens .
38
9
17,143
def main ( ) : parser = argparse . ArgumentParser ( description = __doc__ , formatter_class = argparse . RawDescriptionHelpFormatter ) parser . add_argument ( 'source' , help = "location of the downloaded repos" ) parser . add_argument ( 'destination' , help = "location of the extracted files" ) parser . add_argument ( '-t' , '--nb-test-files' , help = "number of testing files per language" , type = int , default = 5000 ) parser . add_argument ( '-l' , '--nb-learn-files' , help = "number of learning files per language" , type = int , default = 10000 ) parser . add_argument ( '-r' , '--remove' , help = "remove repos that cannot be read" , action = 'store_true' , default = False ) parser . add_argument ( '-d' , '--debug' , default = False , action = 'store_true' , help = "show debug messages" ) args = parser . parse_args ( ) config_logging ( args . debug ) source = Path ( args . source ) destination = Path ( args . destination ) nb_test = args . nb_test_files nb_learn = args . nb_learn_files remove = args . remove repos = _find_repos ( source ) split_repos = _split_repos ( repos , nb_test , nb_learn ) split_files = _find_files ( * split_repos , nb_test , nb_learn , remove ) _unzip_all ( * split_files , destination ) LOGGER . info ( "Files saved into %s" , destination ) LOGGER . debug ( "Exit OK" )
Files extractor command line
394
5
17,144
def combine_slices ( slice_datasets , rescale = None ) : if len ( slice_datasets ) == 0 : raise DicomImportException ( "Must provide at least one DICOM dataset" ) _validate_slices_form_uniform_grid ( slice_datasets ) voxels = _merge_slice_pixel_arrays ( slice_datasets , rescale ) transform = _ijk_to_patient_xyz_transform_matrix ( slice_datasets ) return voxels , transform
Given a list of pydicom datasets for an image series stitch them together into a three - dimensional numpy array . Also calculate a 4x4 affine transformation matrix that converts the ijk - pixel - indices into the xyz - coordinates in the DICOM patient s coordinate system .
124
61
17,145
def _validate_slices_form_uniform_grid ( slice_datasets ) : invariant_properties = [ 'Modality' , 'SOPClassUID' , 'SeriesInstanceUID' , 'Rows' , 'Columns' , 'PixelSpacing' , 'PixelRepresentation' , 'BitsAllocated' , 'BitsStored' , 'HighBit' , ] for property_name in invariant_properties : _slice_attribute_equal ( slice_datasets , property_name ) _validate_image_orientation ( slice_datasets [ 0 ] . ImageOrientationPatient ) _slice_ndarray_attribute_almost_equal ( slice_datasets , 'ImageOrientationPatient' , 1e-5 ) slice_positions = _slice_positions ( slice_datasets ) _check_for_missing_slices ( slice_positions )
Perform various data checks to ensure that the list of slices form a evenly - spaced grid of data . Some of these checks are probably not required if the data follows the DICOM specification however it seems pertinent to check anyway .
206
46
17,146
def parse_url ( cls , string ) : # pylint: disable=redefined-outer-name match = cls . URL_RE . match ( string ) if not match : raise InvalidKeyError ( cls , string ) return match . groupdict ( )
If it can be parsed as a version_guid with no preceding org + offering returns a dict with key version_guid and the value
58
29
17,147
def offering ( self ) : warnings . warn ( "Offering is no longer a supported property of Locator. Please use the course and run properties." , DeprecationWarning , stacklevel = 2 ) if not self . course and not self . run : return None elif not self . run and self . course : return self . course return "/" . join ( [ self . course , self . run ] )
Deprecated . Use course and run independently .
86
9
17,148
def make_usage_key_from_deprecated_string ( self , location_url ) : warnings . warn ( "make_usage_key_from_deprecated_string is deprecated! Please use make_usage_key" , DeprecationWarning , stacklevel = 2 ) return BlockUsageLocator . from_string ( location_url ) . replace ( run = self . run )
Deprecated mechanism for creating a UsageKey given a CourseKey and a serialized Location .
83
18
17,149
def _from_string ( cls , serialized ) : # Allow access to _from_string protected method course_key = CourseLocator . _from_string ( serialized ) # pylint: disable=protected-access parsed_parts = cls . parse_url ( serialized ) block_id = parsed_parts . get ( 'block_id' , None ) if block_id is None : raise InvalidKeyError ( cls , serialized ) return cls ( course_key , parsed_parts . get ( 'block_type' ) , block_id )
Requests CourseLocator to deserialize its part and then adds the local deserialization of block
124
21
17,150
def _parse_block_ref ( cls , block_ref , deprecated = False ) : if deprecated and block_ref is None : return None if isinstance ( block_ref , LocalId ) : return block_ref is_valid_deprecated = deprecated and cls . DEPRECATED_ALLOWED_ID_RE . match ( block_ref ) is_valid = cls . ALLOWED_ID_RE . match ( block_ref ) if is_valid or is_valid_deprecated : return block_ref else : raise InvalidKeyError ( cls , block_ref )
Given block_ref tries to parse it into a valid block reference .
129
14
17,151
def html_id ( self ) : if self . deprecated : id_fields = [ self . DEPRECATED_TAG , self . org , self . course , self . block_type , self . block_id , self . version_guid ] id_string = u"-" . join ( [ v for v in id_fields if v is not None ] ) return self . clean_for_html ( id_string ) else : return self . block_id
Return an id which can be used on an html page as an id attr of an html element . It is currently also persisted by some clients to identify blocks .
100
33
17,152
def to_deprecated_son ( self , prefix = '' , tag = 'i4x' ) : # This preserves the old SON keys ('tag', 'org', 'course', 'category', 'name', 'revision'), # because that format was used to store data historically in mongo # adding tag b/c deprecated form used it son = SON ( { prefix + 'tag' : tag } ) for field_name in ( 'org' , 'course' ) : # Temporary filtering of run field because deprecated form left it out son [ prefix + field_name ] = getattr ( self . course_key , field_name ) for ( dep_field_name , field_name ) in [ ( 'category' , 'block_type' ) , ( 'name' , 'block_id' ) ] : son [ prefix + dep_field_name ] = getattr ( self , field_name ) son [ prefix + 'revision' ] = self . course_key . branch return son
Returns a SON object that represents this location
215
9
17,153
def _from_deprecated_son ( cls , id_dict , run ) : course_key = CourseLocator ( id_dict [ 'org' ] , id_dict [ 'course' ] , run , id_dict [ 'revision' ] , deprecated = True , ) return cls ( course_key , id_dict [ 'category' ] , id_dict [ 'name' ] , deprecated = True )
Return the Location decoding this id_dict and run
92
10
17,154
def _from_string ( cls , serialized ) : # Allow access to _from_string protected method library_key = LibraryLocator . _from_string ( serialized ) # pylint: disable=protected-access parsed_parts = LibraryLocator . parse_url ( serialized ) block_id = parsed_parts . get ( 'block_id' , None ) if block_id is None : raise InvalidKeyError ( cls , serialized ) block_type = parsed_parts . get ( 'block_type' ) if block_type is None : raise InvalidKeyError ( cls , serialized ) return cls ( library_key , parsed_parts . get ( 'block_type' ) , block_id )
Requests LibraryLocator to deserialize its part and then adds the local deserialization of block
159
21
17,155
def for_branch ( self , branch ) : return self . replace ( library_key = self . library_key . for_branch ( branch ) )
Return a UsageLocator for the same block in a different branch of the library .
34
17
17,156
def for_version ( self , version_guid ) : return self . replace ( library_key = self . library_key . for_version ( version_guid ) )
Return a UsageLocator for the same block in a different version of the library .
38
17
17,157
def _strip_object ( key ) : if hasattr ( key , 'version_agnostic' ) and hasattr ( key , 'for_branch' ) : return key . for_branch ( None ) . version_agnostic ( ) else : return key
Strips branch and version info if the given key supports those attributes .
57
15
17,158
def _strip_value ( value , lookup = 'exact' ) : if lookup == 'in' : stripped_value = [ _strip_object ( el ) for el in value ] else : stripped_value = _strip_object ( value ) return stripped_value
Helper function to remove the branch and version information from the given value which could be a single object or a list .
57
23
17,159
def _deprecation_warning ( cls ) : if issubclass ( cls , Location ) : warnings . warn ( "Location is deprecated! Please use locator.BlockUsageLocator" , DeprecationWarning , stacklevel = 3 ) elif issubclass ( cls , AssetLocation ) : warnings . warn ( "AssetLocation is deprecated! Please use locator.AssetLocator" , DeprecationWarning , stacklevel = 3 ) else : warnings . warn ( "{} is deprecated!" . format ( cls ) , DeprecationWarning , stacklevel = 3 )
Display a deprecation warning for the given cls
124
11
17,160
def _check_location_part ( cls , val , regexp ) : cls . _deprecation_warning ( ) return CourseLocator . _check_location_part ( val , regexp )
Deprecated . See CourseLocator . _check_location_part
45
14
17,161
def _clean ( cls , value , invalid ) : cls . _deprecation_warning ( ) return BlockUsageLocator . _clean ( value , invalid )
Deprecated . See BlockUsageLocator . _clean
36
11
17,162
def _join_keys_v1 ( left , right ) : if left . endswith ( ':' ) or '::' in left : raise ValueError ( "Can't join a left string ending in ':' or containing '::'" ) return u"{}::{}" . format ( _encode_v1 ( left ) , _encode_v1 ( right ) )
Join two keys into a format separable by using _split_keys_v1 .
83
18
17,163
def _split_keys_v1 ( joined ) : left , _ , right = joined . partition ( '::' ) return _decode_v1 ( left ) , _decode_v1 ( right )
Split two keys out a string created by _join_keys_v1 .
46
16
17,164
def _split_keys_v2 ( joined ) : left , _ , right = joined . rpartition ( '::' ) return _decode_v2 ( left ) , _decode_v2 ( right )
Split two keys out a string created by _join_keys_v2 .
48
16
17,165
def refresher ( name , refreshers = CompletionRefresher . refreshers ) : def wrapper ( wrapped ) : refreshers [ name ] = wrapped return wrapped return wrapper
Decorator to add the decorated function to the dictionary of refreshers . Any function decorated with a
36
20
17,166
def refresh ( self , executor , callbacks , completer_options = None ) : if completer_options is None : completer_options = { } if self . is_refreshing ( ) : self . _restart_refresh . set ( ) return [ ( None , None , None , 'Auto-completion refresh restarted.' ) ] else : self . _completer_thread = threading . Thread ( target = self . _bg_refresh , args = ( executor , callbacks , completer_options ) , name = 'completion_refresh' ) self . _completer_thread . setDaemon ( True ) self . _completer_thread . start ( ) return [ ( None , None , None , 'Auto-completion refresh started in the background.' ) ]
Creates a SQLCompleter object and populates it with the relevant completion suggestions in a background thread .
177
22
17,167
def handle_cd_command ( arg ) : CD_CMD = 'cd' tokens = arg . split ( CD_CMD + ' ' ) directory = tokens [ - 1 ] if len ( tokens ) > 1 else None if not directory : return False , "No folder name was provided." try : os . chdir ( directory ) subprocess . call ( [ 'pwd' ] ) return True , None except OSError as e : return False , e . strerror
Handles a cd shell command by calling python s os . chdir .
103
15
17,168
def get_editor_query ( sql ) : sql = sql . strip ( ) # The reason we can't simply do .strip('\e') is that it strips characters, # not a substring. So it'll strip "e" in the end of the sql also! # Ex: "select * from style\e" -> "select * from styl". pattern = re . compile ( '(^\\\e|\\\e$)' ) while pattern . search ( sql ) : sql = pattern . sub ( '' , sql ) return sql
Get the query part of an editor command .
114
9
17,169
def delete_favorite_query ( arg , * * _ ) : usage = 'Syntax: \\fd name.\n\n' + favoritequeries . usage if not arg : return [ ( None , None , None , usage ) ] status = favoritequeries . delete ( arg ) return [ ( None , None , None , status ) ]
Delete an existing favorite query .
73
6
17,170
def execute_system_command ( arg , * * _ ) : usage = "Syntax: system [command].\n" if not arg : return [ ( None , None , None , usage ) ] try : command = arg . strip ( ) if command . startswith ( 'cd' ) : ok , error_message = handle_cd_command ( arg ) if not ok : return [ ( None , None , None , error_message ) ] return [ ( None , None , None , '' ) ] args = arg . split ( ' ' ) process = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) output , error = process . communicate ( ) response = output if not error else error # Python 3 returns bytes. This needs to be decoded to a string. if isinstance ( response , bytes ) : encoding = locale . getpreferredencoding ( False ) response = response . decode ( encoding ) return [ ( None , None , None , response ) ] except OSError as e : return [ ( None , None , None , 'OSError: %s' % e . strerror ) ]
Execute a system shell command .
254
7
17,171
def need_completion_refresh ( queries ) : tokens = { 'use' , '\\u' , 'create' , 'drop' } for query in sqlparse . split ( queries ) : try : first_token = query . split ( ) [ 0 ] if first_token . lower ( ) in tokens : return True except Exception : return False
Determines if the completion needs a refresh by checking if the sql statement is an alter create drop or change db .
75
24
17,172
def is_mutating ( status ) : if not status : return False mutating = set ( [ 'insert' , 'update' , 'delete' , 'alter' , 'create' , 'drop' , 'replace' , 'truncate' , 'load' ] ) return status . split ( None , 1 ) [ 0 ] . lower ( ) in mutating
Determines if the statement is mutating based on the status .
79
14
17,173
def change_prompt_format ( self , arg , * * _ ) : if not arg : message = 'Missing required argument, format.' return [ ( None , None , None , message ) ] self . prompt = self . get_prompt ( arg ) return [ ( None , None , None , "Changed prompt format to %s" % arg ) ]
Change the prompt format .
76
5
17,174
def get_output_margin ( self , status = None ) : margin = self . get_reserved_space ( ) + self . get_prompt ( self . prompt ) . count ( '\n' ) + 1 if special . is_timing_enabled ( ) : margin += 1 if status : margin += 1 + status . count ( '\n' ) return margin
Get the output margin ( number of rows for the prompt footer and timing message .
81
17
17,175
def output ( self , output , status = None ) : if output : size = self . cli . output . get_size ( ) margin = self . get_output_margin ( status ) fits = True buf = [ ] output_via_pager = self . explicit_pager and special . is_pager_enabled ( ) for i , line in enumerate ( output , 1 ) : special . write_tee ( line ) special . write_once ( line ) if fits or output_via_pager : # buffering buf . append ( line ) if len ( line ) > size . columns or i > ( size . rows - margin ) : fits = False if not self . explicit_pager and special . is_pager_enabled ( ) : # doesn't fit, use pager output_via_pager = True if not output_via_pager : # doesn't fit, flush buffer for line in buf : click . secho ( line ) buf = [ ] else : click . secho ( line ) if buf : if output_via_pager : # sadly click.echo_via_pager doesn't accept generators click . echo_via_pager ( "\n" . join ( buf ) ) else : for line in buf : click . secho ( line ) if status : click . secho ( status )
Output text to stdout or a pager command . The status text is not outputted to pager or files . The message will be logged in the audit log if enabled . The message will be written to the tee file if enabled . The message will be written to the output file if enabled .
288
60
17,176
def _on_completions_refreshed ( self , new_completer ) : with self . _completer_lock : self . completer = new_completer # When cli is first launched we call refresh_completions before # instantiating the cli object. So it is necessary to check if cli # exists before trying the replace the completer object in cli. if self . cli : self . cli . current_buffer . completer = new_completer if self . cli : # After refreshing, redraw the CLI to clear the statusbar # "Refreshing completions..." indicator self . cli . request_redraw ( )
Swap the completer object in cli with the newly created completer .
149
16
17,177
def get_reserved_space ( self ) : reserved_space_ratio = .45 max_reserved_space = 8 _ , height = click . get_terminal_size ( ) return min ( int ( round ( height * reserved_space_ratio ) ) , max_reserved_space )
Get the number of lines to reserve for the completion menu .
67
12
17,178
def find_matches ( text , collection , start_only = False , fuzzy = True , casing = None ) : last = last_word ( text , include = 'most_punctuations' ) text = last . lower ( ) completions = [ ] if fuzzy : regex = '.*?' . join ( map ( escape , text ) ) pat = compile ( '(%s)' % regex ) for item in sorted ( collection ) : r = pat . search ( item . lower ( ) ) if r : completions . append ( ( len ( r . group ( ) ) , r . start ( ) , item ) ) else : match_end_limit = len ( text ) if start_only else None for item in sorted ( collection ) : match_point = item . lower ( ) . find ( text , 0 , match_end_limit ) if match_point >= 0 : completions . append ( ( len ( text ) , match_point , item ) ) if casing == 'auto' : casing = 'lower' if last and last [ - 1 ] . islower ( ) else 'upper' def apply_case ( kw ) : if casing == 'upper' : return kw . upper ( ) return kw . lower ( ) return ( Completion ( z if casing is None else apply_case ( z ) , - len ( text ) ) for x , y , z in sorted ( completions ) )
Find completion matches for the given text . Given the user s input text and a collection of available completions find completions matching the last word of the text . If start_only is True the text will match an available completion only at the beginning . Otherwise a completion is considered a match if the text appears anywhere within it . yields prompt_toolkit Completion instances for any matches found in the collection of available completions .
300
85
17,179
def log ( logger , level , message ) : if logger . parent . name != 'root' : logger . log ( level , message ) else : print ( message , file = sys . stderr )
Logs message to stderr if logging isn t initialized .
43
13
17,180
def read_config_file ( f ) : if isinstance ( f , basestring ) : f = os . path . expanduser ( f ) try : config = ConfigObj ( f , interpolation = False , encoding = 'utf8' ) except ConfigObjError as e : log ( LOGGER , logging . ERROR , "Unable to parse line {0} of config file " "'{1}'." . format ( e . line_number , f ) ) log ( LOGGER , logging . ERROR , "Using successfully parsed config values." ) return e . config except ( IOError , OSError ) as e : log ( LOGGER , logging . WARNING , "You don't have permission to read " "config file '{0}'." . format ( e . filename ) ) return None return config
Read a config file .
171
5
17,181
def read_config_files ( files ) : config = ConfigObj ( ) for _file in files : _config = read_config_file ( _file ) if bool ( _config ) is True : config . merge ( _config ) config . filename = _config . filename return config
Read and merge a list of config files .
60
9
17,182
def cli_bindings ( ) : key_binding_manager = KeyBindingManager ( enable_open_in_editor = True , enable_system_bindings = True , enable_auto_suggest_bindings = True , enable_search = True , enable_abort_and_exit_bindings = True ) @ key_binding_manager . registry . add_binding ( Keys . F2 ) def _ ( event ) : """ Enable/Disable SmartCompletion Mode. """ _logger . debug ( 'Detected F2 key.' ) buf = event . cli . current_buffer buf . completer . smart_completion = not buf . completer . smart_completion @ key_binding_manager . registry . add_binding ( Keys . F3 ) def _ ( event ) : """ Enable/Disable Multiline Mode. """ _logger . debug ( 'Detected F3 key.' ) buf = event . cli . current_buffer buf . always_multiline = not buf . always_multiline @ key_binding_manager . registry . add_binding ( Keys . F4 ) def _ ( event ) : """ Toggle between Vi and Emacs mode. """ _logger . debug ( 'Detected F4 key.' ) if event . cli . editing_mode == EditingMode . VI : event . cli . editing_mode = EditingMode . EMACS else : event . cli . editing_mode = EditingMode . VI @ key_binding_manager . registry . add_binding ( Keys . Tab ) def _ ( event ) : """ Force autocompletion at cursor. """ _logger . debug ( 'Detected <Tab> key.' ) b = event . cli . current_buffer if b . complete_state : b . complete_next ( ) else : event . cli . start_completion ( select_first = True ) @ key_binding_manager . registry . add_binding ( Keys . ControlSpace ) def _ ( event ) : """ Initialize autocompletion at cursor. If the autocompletion menu is not showing, display it with the appropriate completions for the context. If the menu is showing, select the next completion. """ _logger . debug ( 'Detected <C-Space> key.' ) b = event . cli . current_buffer if b . complete_state : b . complete_next ( ) else : event . cli . start_completion ( select_first = False ) @ key_binding_manager . registry . add_binding ( Keys . ControlJ , filter = HasSelectedCompletion ( ) ) def _ ( event ) : """ Makes the enter key work as the tab key only when showing the menu. """ _logger . debug ( 'Detected <C-J> key.' ) event . current_buffer . complete_state = None b = event . cli . current_buffer b . complete_state = None return key_binding_manager
Custom key bindings for cli .
630
7
17,183
def prompt ( * args , * * kwargs ) : try : return click . prompt ( * args , * * kwargs ) except click . Abort : return False
Prompt the user for input and handle any abort exceptions .
37
12
17,184
def run ( self , statement ) : # Remove spaces and EOL statement = statement . strip ( ) if not statement : # Empty string yield ( None , None , None , None ) # Split the sql into separate queries and run each one. components = sqlparse . split ( statement ) for sql in components : # Remove spaces, eol and semi-colons. sql = sql . rstrip ( ';' ) # \G is treated specially since we have to set the expanded output. if sql . endswith ( '\\G' ) : special . set_expanded_output ( True ) sql = sql [ : - 2 ] . strip ( ) cur = self . conn . cursor ( ) try : for result in special . execute ( cur , sql ) : yield result except special . CommandNotFound : # Regular SQL cur . execute ( sql ) yield self . get_result ( cur )
Execute the sql in the database and return the results .
188
12
17,185
def get_result ( self , cursor ) : title = headers = None # cursor.description is not None for queries that return result sets, # e.g. SELECT or SHOW. if cursor . description is not None : headers = [ x [ 0 ] for x in cursor . description ] rows = cursor . fetchall ( ) status = '%d row%s in set' % ( len ( rows ) , '' if len ( rows ) == 1 else 's' ) else : logger . debug ( 'No rows in result.' ) rows = None status = 'Query OK' return ( title , rows , headers , status )
Get the current result s data from the cursor .
131
10
17,186
def tables ( self ) : with self . conn . cursor ( ) as cur : cur . execute ( self . TABLES_QUERY ) for row in cur : yield row
Yields table names .
37
6
17,187
def table_columns ( self ) : with self . conn . cursor ( ) as cur : cur . execute ( self . TABLE_COLUMNS_QUERY % self . database ) for row in cur : yield row
Yields column names .
46
6
17,188
def create_toolbar_tokens_func ( get_is_refreshing , show_fish_help ) : token = Token . Toolbar def get_toolbar_tokens ( cli ) : result = [ ] result . append ( ( token , ' ' ) ) if cli . buffers [ DEFAULT_BUFFER ] . always_multiline : result . append ( ( token . On , '[F3] Multiline: ON ' ) ) else : result . append ( ( token . Off , '[F3] Multiline: OFF ' ) ) if cli . buffers [ DEFAULT_BUFFER ] . always_multiline : result . append ( ( token , ' (Semi-colon [;] will end the line)' ) ) if cli . editing_mode == EditingMode . VI : result . append ( ( token . On , 'Vi-mode ({})' . format ( _get_vi_mode ( cli ) ) ) ) if show_fish_help ( ) : result . append ( ( token , ' Right-arrow to complete suggestion' ) ) if get_is_refreshing ( ) : result . append ( ( token , ' Refreshing completions...' ) ) return result return get_toolbar_tokens
Return a function that generates the toolbar tokens .
278
9
17,189
def _get_vi_mode ( cli ) : return { InputMode . INSERT : 'I' , InputMode . NAVIGATION : 'N' , InputMode . REPLACE : 'R' , InputMode . INSERT_MULTIPLE : 'M' } [ cli . vi_state . input_mode ]
Get the current vi mode for display .
72
8
17,190
def export ( defn ) : globals ( ) [ defn . __name__ ] = defn __all__ . append ( defn . __name__ ) return defn
Decorator to explicitly mark functions that are exposed in a lib .
38
14
17,191
def execute ( cur , sql ) : command , verbose , arg = parse_special_command ( sql ) if ( command not in COMMANDS ) and ( command . lower ( ) not in COMMANDS ) : raise CommandNotFound try : special_cmd = COMMANDS [ command ] except KeyError : special_cmd = COMMANDS [ command . lower ( ) ] if special_cmd . case_sensitive : raise CommandNotFound ( 'Command not found: %s' % command ) # "help <SQL KEYWORD> is a special case. if command == 'help' and arg : return show_keyword_help ( cur = cur , arg = arg ) if special_cmd . arg_type == NO_QUERY : return special_cmd . handler ( ) elif special_cmd . arg_type == PARSED_QUERY : return special_cmd . handler ( cur = cur , arg = arg , verbose = verbose ) elif special_cmd . arg_type == RAW_QUERY : return special_cmd . handler ( cur = cur , query = sql )
Execute a special command and return the results . If the special command is not supported a KeyError will be raised .
234
24
17,192
def find_prev_keyword ( sql ) : if not sql . strip ( ) : return None , '' parsed = sqlparse . parse ( sql ) [ 0 ] flattened = list ( parsed . flatten ( ) ) logical_operators = ( 'AND' , 'OR' , 'NOT' , 'BETWEEN' ) for t in reversed ( flattened ) : if t . value == '(' or ( t . is_keyword and ( t . value . upper ( ) not in logical_operators ) ) : # Find the location of token t in the original parsed statement # We can't use parsed.token_index(t) because t may be a child token # inside a TokenList, in which case token_index thows an error # Minimal example: # p = sqlparse.parse('select * from foo where bar') # t = list(p.flatten())[-3] # The "Where" token # p.token_index(t) # Throws ValueError: not in list idx = flattened . index ( t ) # Combine the string values of all tokens in the original list # up to and including the target keyword token t, to produce a # query string with everything after the keyword token removed text = '' . join ( tok . value for tok in flattened [ : idx + 1 ] ) return t , text return None , ''
Find the last sql keyword in an SQL statement Returns the value of the last keyword and the text of the query with everything after the last keyword stripped
294
29
17,193
def _get_thumbnail_options ( self , context , instance ) : width , height = None , None subject_location = False placeholder_width = context . get ( 'width' , None ) placeholder_height = context . get ( 'height' , None ) if instance . use_autoscale and placeholder_width : # use the placeholder width as a hint for sizing width = int ( placeholder_width ) if instance . use_autoscale and placeholder_height : height = int ( placeholder_height ) elif instance . width : width = instance . width if instance . height : height = instance . height if instance . image : if instance . image . subject_location : subject_location = instance . image . subject_location if not height and width : # height was not externally defined: use ratio to scale it by the width height = int ( float ( width ) * float ( instance . image . height ) / float ( instance . image . width ) ) if not width and height : # width was not externally defined: use ratio to scale it by the height width = int ( float ( height ) * float ( instance . image . width ) / float ( instance . image . height ) ) if not width : # width is still not defined. fallback the actual image width width = instance . image . width if not height : # height is still not defined. fallback the actual image height height = instance . image . height return { 'size' : ( width , height ) , 'subject_location' : subject_location }
Return the size and options of the thumbnail that should be inserted
320
12
17,194
def create_image_plugin ( filename , image , parent_plugin , * * kwargs ) : from cmsplugin_filer_image . models import FilerImage from filer . models import Image image_plugin = FilerImage ( ) image_plugin . placeholder = parent_plugin . placeholder image_plugin . parent = CMSPlugin . objects . get ( pk = parent_plugin . id ) image_plugin . position = CMSPlugin . objects . filter ( parent = parent_plugin ) . count ( ) image_plugin . language = parent_plugin . language image_plugin . plugin_type = 'FilerImagePlugin' image . seek ( 0 ) image_model = Image . objects . create ( file = SimpleUploadedFile ( name = filename , content = image . read ( ) ) ) image_plugin . image = image_model image_plugin . save ( ) return image_plugin
Used for drag - n - drop image insertion with djangocms - text - ckeditor . Set TEXT_SAVE_IMAGE_FUNCTION = cmsplugin_filer_image . integrations . ckeditor . create_image_plugin to enable .
190
58
17,195
def rename_tables ( db , table_mapping , reverse = False ) : from django . db import connection if reverse : table_mapping = [ ( dst , src ) for src , dst in table_mapping ] table_names = connection . introspection . table_names ( ) for source , destination in table_mapping : if source in table_names and destination in table_names : print ( u" WARNING: not renaming {0} to {1}, because both tables already exist." . format ( source , destination ) ) elif source in table_names and destination not in table_names : print ( u" - renaming {0} to {1}" . format ( source , destination ) ) db . rename_table ( source , destination )
renames tables from source to destination name if the source exists and the destination does not exist yet .
163
20
17,196
def group_and_sort_statements ( stmt_list , ev_totals = None ) : def _count ( stmt ) : if ev_totals is None : return len ( stmt . evidence ) else : return ev_totals [ stmt . get_hash ( ) ] stmt_rows = defaultdict ( list ) stmt_counts = defaultdict ( lambda : 0 ) arg_counts = defaultdict ( lambda : 0 ) for key , s in _get_keyed_stmts ( stmt_list ) : # Update the counts, and add key if needed. stmt_rows [ key ] . append ( s ) # Keep track of the total evidence counts for this statement and the # arguments. stmt_counts [ key ] += _count ( s ) # Add up the counts for the arguments, pairwise for Complexes and # Conversions. This allows, for example, a complex between MEK, ERK, # and something else to lend weight to the interactions between MEK # and ERK. if key [ 0 ] == 'Conversion' : subj = key [ 1 ] for obj in key [ 2 ] + key [ 3 ] : arg_counts [ ( subj , obj ) ] += _count ( s ) else : arg_counts [ key [ 1 : ] ] += _count ( s ) # Sort the rows by count and agent names. def process_rows ( stmt_rows ) : for key , stmts in stmt_rows . items ( ) : verb = key [ 0 ] inps = key [ 1 : ] sub_count = stmt_counts [ key ] arg_count = arg_counts [ inps ] if verb == 'Complex' and sub_count == arg_count and len ( inps ) <= 2 : if all ( [ len ( set ( ag . name for ag in s . agent_list ( ) ) ) > 2 for s in stmts ] ) : continue new_key = ( arg_count , inps , sub_count , verb ) stmts = sorted ( stmts , key = lambda s : _count ( s ) + 1 / ( 1 + len ( s . agent_list ( ) ) ) , reverse = True ) yield new_key , verb , stmts sorted_groups = sorted ( process_rows ( stmt_rows ) , key = lambda tpl : tpl [ 0 ] , reverse = True ) return sorted_groups
Group statements by type and arguments and sort by prevalence .
536
11
17,197
def make_stmt_from_sort_key ( key , verb ) : def make_agent ( name ) : if name == 'None' or name is None : return None return Agent ( name ) StmtClass = get_statement_by_name ( verb ) inps = list ( key [ 1 ] ) if verb == 'Complex' : stmt = StmtClass ( [ make_agent ( name ) for name in inps ] ) elif verb == 'Conversion' : stmt = StmtClass ( make_agent ( inps [ 0 ] ) , [ make_agent ( name ) for name in inps [ 1 ] ] , [ make_agent ( name ) for name in inps [ 2 ] ] ) elif verb == 'ActiveForm' or verb == 'HasActivity' : stmt = StmtClass ( make_agent ( inps [ 0 ] ) , inps [ 1 ] , inps [ 2 ] ) else : stmt = StmtClass ( * [ make_agent ( name ) for name in inps ] ) return stmt
Make a Statement from the sort key .
230
8
17,198
def get_ecs_cluster_for_queue ( queue_name , batch_client = None ) : if batch_client is None : batch_client = boto3 . client ( 'batch' ) queue_resp = batch_client . describe_job_queues ( jobQueues = [ queue_name ] ) if len ( queue_resp [ 'jobQueues' ] ) == 1 : queue = queue_resp [ 'jobQueues' ] [ 0 ] else : raise BatchReadingError ( 'Error finding queue with name %s.' % queue_name ) compute_env_names = queue [ 'computeEnvironmentOrder' ] if len ( compute_env_names ) == 1 : compute_env_name = compute_env_names [ 0 ] [ 'computeEnvironment' ] else : raise BatchReadingError ( 'Error finding the compute environment name ' 'for %s.' % queue_name ) compute_envs = batch_client . describe_compute_environments ( computeEnvironments = [ compute_env_name ] ) [ 'computeEnvironments' ] if len ( compute_envs ) == 1 : compute_env = compute_envs [ 0 ] else : raise BatchReadingError ( "Error getting compute environment %s for %s. " "Got %d environments instead of 1." % ( compute_env_name , queue_name , len ( compute_envs ) ) ) ecs_cluster_name = os . path . basename ( compute_env [ 'ecsClusterArn' ] ) return ecs_cluster_name
Get the name of the ecs cluster using the batch client .
344
13
17,199
def tag_instances_on_cluster ( cluster_name , project = 'cwc' ) : # Get the relevant instance ids from the ecs cluster ecs = boto3 . client ( 'ecs' ) task_arns = ecs . list_tasks ( cluster = cluster_name ) [ 'taskArns' ] if not task_arns : return tasks = ecs . describe_tasks ( cluster = cluster_name , tasks = task_arns ) [ 'tasks' ] container_instances = ecs . describe_container_instances ( cluster = cluster_name , containerInstances = [ task [ 'containerInstanceArn' ] for task in tasks ] ) [ 'containerInstances' ] ec2_instance_ids = [ ci [ 'ec2InstanceId' ] for ci in container_instances ] # Instantiate each instance to tag as a resource and create project tag for instance_id in ec2_instance_ids : tag_instance ( instance_id , project = project ) return
Adds project tag to untagged instances in a given cluster .
227
12