idx int64 0 251k | question stringlengths 53 3.53k | target stringlengths 5 1.23k | len_question int64 20 893 | len_target int64 3 238 |
|---|---|---|---|---|
250,100 | def update_snapshot_schedule ( cls , cluster_id_label , s3_location = None , frequency_unit = None , frequency_num = None , status = None ) : conn = Qubole . agent ( version = Cluster . api_version ) data = { } if s3_location is not None : data [ "s3_location" ] = s3_location if frequency_unit is not None : data [ "frequency_unit" ] = frequency_unit if frequency_num is not None : data [ "frequency_num" ] = frequency_num if status is not None : data [ "status" ] = status return conn . put ( cls . element_path ( cluster_id_label ) + "/snapshot_schedule" , data ) | Update for snapshot schedule | 168 | 4 |
250,101 | def set_spot_instance_settings ( self , maximum_bid_price_percentage = None , timeout_for_request = None , maximum_spot_instance_percentage = None ) : self . hadoop_settings [ 'spot_instance_settings' ] = { 'maximum_bid_price_percentage' : maximum_bid_price_percentage , 'timeout_for_request' : timeout_for_request , 'maximum_spot_instance_percentage' : maximum_spot_instance_percentage } | Purchase options for spot instances . Valid only when slave_request_type is hybrid or spot . | 113 | 19 |
250,102 | def set_stable_spot_instance_settings ( self , maximum_bid_price_percentage = None , timeout_for_request = None , allow_fallback = True ) : self . hadoop_settings [ 'stable_spot_instance_settings' ] = { 'maximum_bid_price_percentage' : maximum_bid_price_percentage , 'timeout_for_request' : timeout_for_request , 'allow_fallback' : allow_fallback } | Purchase options for stable spot instances . | 105 | 7 |
250,103 | def minimal_payload ( self ) : payload_dict = self . __dict__ payload_dict . pop ( "api_version" , None ) return util . _make_minimal ( payload_dict ) | This method can be used to create the payload which is sent while creating or updating a cluster . | 45 | 19 |
250,104 | def _handle_error ( response ) : code = response . status_code if 200 <= code < 400 : return if code == 400 : sys . stderr . write ( response . text + "\n" ) raise BadRequest ( response ) elif code == 401 : sys . stderr . write ( response . text + "\n" ) raise UnauthorizedAccess ( response ) elif code == 403 : sys . stderr . write ( response . text + "\n" ) raise ForbiddenAccess ( response ) elif code == 404 : sys . stderr . write ( response . text + "\n" ) raise ResourceNotFound ( response ) elif code == 405 : sys . stderr . write ( response . text + "\n" ) raise MethodNotAllowed ( response ) elif code == 409 : sys . stderr . write ( response . text + "\n" ) raise ResourceConflict ( response ) elif code == 422 : sys . stderr . write ( response . text + "\n" ) raise ResourceInvalid ( response ) elif code in ( 449 , 502 , 503 , 504 ) : sys . stderr . write ( response . text + "\n" ) raise RetryWithDelay ( response ) elif 401 <= code < 500 : sys . stderr . write ( response . text + "\n" ) raise ClientError ( response ) elif 500 <= code < 600 : sys . stderr . write ( response . text + "\n" ) raise ServerError ( response ) else : raise ConnectionError ( response ) | Raise exceptions in response to any http errors | 332 | 9 |
250,105 | def createTemplate ( data ) : conn = Qubole . agent ( ) return conn . post ( Template . rest_entity_path , data ) | Create a new template . | 31 | 5 |
250,106 | def editTemplate ( id , data ) : conn = Qubole . agent ( ) return conn . put ( Template . element_path ( id ) , data ) | Edit an existing template . | 34 | 5 |
250,107 | def viewTemplate ( id ) : conn = Qubole . agent ( ) return conn . get ( Template . element_path ( id ) ) | View an existing Template details . | 30 | 6 |
250,108 | def submitTemplate ( id , data = { } ) : conn = Qubole . agent ( ) path = str ( id ) + "/run" return conn . post ( Template . element_path ( path ) , data ) | Submit an existing Template . | 47 | 5 |
250,109 | def runTemplate ( id , data = { } ) : conn = Qubole . agent ( ) path = str ( id ) + "/run" res = conn . post ( Template . element_path ( path ) , data ) cmdType = res [ 'command_type' ] cmdId = res [ 'id' ] cmdClass = eval ( cmdType ) cmd = cmdClass . find ( cmdId ) while not Command . is_done ( cmd . status ) : time . sleep ( Qubole . poll_interval ) cmd = cmdClass . find ( cmd . id ) return Template . getResult ( cmdClass , cmd ) | Run an existing Template and waits for the Result . Prints result to stdout . | 134 | 17 |
250,110 | def listTemplates ( data = { } ) : conn = Qubole . agent ( ) url_path = Template . rest_entity_path page_attr = [ ] if "page" in data and data [ "page" ] is not None : page_attr . append ( "page=%s" % data [ "page" ] ) if "per_page" in data and data [ "per_page" ] is not None : page_attr . append ( "per_page=%s" % data [ "per_page" ] ) if page_attr : url_path = "%s?%s" % ( url_path , "&" . join ( page_attr ) ) return conn . get ( url_path ) | Fetch existing Templates details . | 160 | 7 |
250,111 | def edit ( args ) : tap = DbTap . find ( args . id ) options = { } if not args . name is None : options [ "db_name" ] = args . name if args . host is not None : options [ "db_host" ] = args . host if args . user is not None : options [ "db_user" ] = args . user if args . password is not None : options [ "db_passwd" ] = args . password if args . type is not None : options [ "db_type" ] = args . type if args . location is not None : options [ "db_location" ] = args . location if args . port is not None : options [ "port" ] = args . port tap = tap . edit ( * * options ) return json . dumps ( tap . attributes , sort_keys = True , indent = 4 ) | Carefully setup a dict | 190 | 5 |
250,112 | def create ( cls , name , config = None , kind = "spark" ) : conn = Qubole . agent ( ) return conn . post ( cls . rest_entity_path , data = { 'name' : name , 'config' : config , 'kind' : kind } ) | Create a new app . | 65 | 5 |
250,113 | def configure ( cls , api_token , api_url = "https://api.qubole.com/api/" , version = "v1.2" , poll_interval = 5 , skip_ssl_cert_check = False , cloud_name = "AWS" ) : cls . _auth = QuboleAuth ( api_token ) cls . api_token = api_token cls . version = version cls . baseurl = api_url if poll_interval < Qubole . MIN_POLL_INTERVAL : log . warn ( "Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % ( Qubole . MIN_POLL_INTERVAL , Qubole . MIN_POLL_INTERVAL ) ) cls . poll_interval = Qubole . MIN_POLL_INTERVAL else : cls . poll_interval = poll_interval cls . skip_ssl_cert_check = skip_ssl_cert_check cls . cloud_name = cloud_name . lower ( ) cls . cached_agent = None | Set parameters governing interaction with QDS | 246 | 7 |
250,114 | def get_cluster_request_parameters ( cluster_info , cloud_config , engine_config ) : cluster_request = { } cloud_config = util . _make_minimal ( cloud_config . __dict__ ) if bool ( cloud_config ) : cluster_request [ 'cloud_config' ] = cloud_config engine_config = util . _make_minimal ( engine_config . __dict__ ) if bool ( engine_config ) : cluster_request [ 'engine_config' ] = engine_config cluster_request . update ( util . _make_minimal ( cluster_info . __dict__ ) ) return cluster_request | Use this to return final minimal request from cluster_info cloud_config or engine_config objects Alternatively call util . _make_minimal if only one object needs to be implemented | 141 | 36 |
250,115 | def create ( cls , cluster_info ) : conn = Qubole . agent ( version = "v2" ) return conn . post ( cls . rest_entity_path , data = cluster_info ) | Create a new cluster using information provided in cluster_info . | 46 | 12 |
250,116 | def _download_to_local ( boto_conn , s3_path , fp , num_result_dir , delim = None ) : #Progress bar to display download progress def _callback ( downloaded , total ) : ''' Call function for upload. `downloaded`: File size already downloaded (int) `total`: Total file size to be downloaded (int) ''' if ( total is 0 ) or ( downloaded == total ) : return progress = downloaded * 100 / total sys . stderr . write ( '\r[{0}] {1}%' . format ( '#' * progress , progress ) ) sys . stderr . flush ( ) m = _URI_RE . match ( s3_path ) bucket_name = m . group ( 1 ) bucket = boto_conn . get_bucket ( bucket_name ) retries = 6 if s3_path . endswith ( '/' ) is False : #It is a file key_name = m . group ( 2 ) key_instance = bucket . get_key ( key_name ) while key_instance is None and retries > 0 : retries = retries - 1 log . info ( "Results file is not available on s3. Retry: " + str ( 6 - retries ) ) time . sleep ( 10 ) key_instance = bucket . get_key ( key_name ) if key_instance is None : raise Exception ( "Results file not available on s3 yet. This can be because of s3 eventual consistency issues." ) log . info ( "Downloading file from %s" % s3_path ) if delim is None : try : key_instance . get_contents_to_file ( fp ) # cb=_callback except boto . exception . S3ResponseError as e : if ( e . status == 403 ) : # SDK-191, boto gives an error while fetching the objects using versions which happens by default # in the get_contents_to_file() api. So attempt one without specifying version. log . warn ( "Access denied while fetching the s3 object. Retrying without specifying the version...." ) key_instance . open ( ) fp . write ( key_instance . read ( ) ) key_instance . close ( ) else : raise else : # Get contents as string. Replace parameters and write to file. _read_iteratively ( key_instance , fp , delim = delim ) else : #It is a folder key_prefix = m . group ( 2 ) bucket_paths = bucket . list ( key_prefix ) for one_path in bucket_paths : name = one_path . name # Eliminate _tmp_ files which ends with $folder$ if name . endswith ( '$folder$' ) : continue log . info ( "Downloading file from %s" % name ) if delim is None : one_path . get_contents_to_file ( fp ) # cb=_callback else : _read_iteratively ( one_path , fp , delim = delim ) | Downloads the contents of all objects in s3_path into fp | 665 | 15 |
250,117 | def cancel_id ( cls , id ) : conn = Qubole . agent ( ) data = { "status" : "kill" } return conn . put ( cls . element_path ( id ) , data ) | Cancels command denoted by this id | 48 | 9 |
250,118 | def get_log_id ( cls , id ) : conn = Qubole . agent ( ) r = conn . get_raw ( cls . element_path ( id ) + "/logs" ) return r . text | Fetches log for the command represented by this id | 49 | 11 |
250,119 | def get_log ( self ) : log_path = self . meta_data [ 'logs_resource' ] conn = Qubole . agent ( ) r = conn . get_raw ( log_path ) return r . text | Fetches log for the command represented by this object | 50 | 11 |
250,120 | def get_results ( self , fp = sys . stdout , inline = True , delim = None , fetch = True , qlog = None , arguments = [ ] ) : result_path = self . meta_data [ 'results_resource' ] conn = Qubole . agent ( ) include_header = "false" if len ( arguments ) == 1 : include_header = arguments . pop ( 0 ) if include_header not in ( 'true' , 'false' ) : raise ParseError ( "incude_header can be either true or false" ) r = conn . get ( result_path , { 'inline' : inline , 'include_headers' : include_header } ) if r . get ( 'inline' ) : raw_results = r [ 'results' ] encoded_results = raw_results . encode ( 'utf8' ) if sys . version_info < ( 3 , 0 , 0 ) : fp . write ( encoded_results ) else : import io if isinstance ( fp , io . TextIOBase ) : if hasattr ( fp , 'buffer' ) : fp . buffer . write ( encoded_results ) else : fp . write ( raw_results ) elif isinstance ( fp , io . BufferedIOBase ) or isinstance ( fp , io . RawIOBase ) : fp . write ( encoded_results ) else : # Can this happen? Don't know what's the right thing to do in this case. pass else : if fetch : storage_credentials = conn . get ( Account . credentials_rest_entity_path ) if storage_credentials [ 'region_endpoint' ] is not None : boto_conn = boto . connect_s3 ( aws_access_key_id = storage_credentials [ 'storage_access_key' ] , aws_secret_access_key = storage_credentials [ 'storage_secret_key' ] , security_token = storage_credentials [ 'session_token' ] , host = storage_credentials [ 'region_endpoint' ] ) else : boto_conn = boto . connect_s3 ( aws_access_key_id = storage_credentials [ 'storage_access_key' ] , aws_secret_access_key = storage_credentials [ 'storage_secret_key' ] , security_token = storage_credentials [ 'session_token' ] ) log . info ( "Starting download from result locations: [%s]" % "," . join ( r [ 'result_location' ] ) ) #fetch latest value of num_result_dir num_result_dir = Command . find ( self . id ) . num_result_dir # If column/header names are not able to fetch then use include header as true if include_header . lower ( ) == "true" and qlog is not None : write_headers ( qlog , fp ) for s3_path in r [ 'result_location' ] : # In Python 3, # If the delim is None, fp should be in binary mode because # boto expects it to be. # If the delim is not None, then both text and binary modes # work. _download_to_local ( boto_conn , s3_path , fp , num_result_dir , delim = delim ) else : fp . write ( "," . join ( r [ 'result_location' ] ) ) | Fetches the result for the command represented by this object | 758 | 12 |
250,121 | def pluralize ( singular ) : if singular in UNCOUNTABLES : return singular for i in IRREGULAR : if i [ 0 ] == singular : return i [ 1 ] for i in PLURALIZE_PATTERNS : if re . search ( i [ 0 ] , singular ) : return re . sub ( i [ 0 ] , i [ 1 ] , singular ) | Convert singular word to its plural form . | 81 | 9 |
250,122 | def singularize ( plural ) : if plural in UNCOUNTABLES : return plural for i in IRREGULAR : if i [ 1 ] == plural : return i [ 0 ] for i in SINGULARIZE_PATTERNS : if re . search ( i [ 0 ] , plural ) : return re . sub ( i [ 0 ] , i [ 1 ] , plural ) return plural | Convert plural word to its singular form . | 83 | 9 |
250,123 | def camelize ( word ) : return '' . join ( w [ 0 ] . upper ( ) + w [ 1 : ] for w in re . sub ( '[^A-Z^a-z^0-9^:]+' , ' ' , word ) . split ( ' ' ) ) | Convert a word from lower_with_underscores to CamelCase . | 63 | 16 |
250,124 | def _make_minimal ( dictionary ) : new_dict = { } for key , value in dictionary . items ( ) : if value is not None : if isinstance ( value , dict ) : new_value = _make_minimal ( value ) if new_value : new_dict [ key ] = new_value else : new_dict [ key ] = value return new_dict | This function removes all the keys whose value is either None or an empty dictionary . | 83 | 16 |
250,125 | def upload_profiler_report ( url , filename , config ) : try : logger . debug ( "Uploading profiler report to IOpipe" ) with open ( filename , "rb" ) as data : response = requests . put ( url , data = data , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error while uploading profiler report: %s" , e ) if hasattr ( e , "response" ) : logger . debug ( e . response . content ) else : logger . debug ( "Profiler report uploaded successfully" ) finally : if os . path . isfile ( filename ) : os . remove ( filename ) | Uploads a profiler report to IOpipe | 154 | 10 |
250,126 | def read_pid_stat ( pid ) : return { "utime" : random . randint ( 0 , 999999999 ) , "stime" : random . randint ( 0 , 999999999 ) , "cutime" : random . randint ( 0 , 999999999 ) , "cstime" : random . randint ( 0 , 999999999 ) , } | Mocks read_pid_stat as this is a Linux - specific operation . | 82 | 16 |
250,127 | def read_stat ( ) : return [ { "times" : { "user" : random . randint ( 0 , 999999999 ) , "nice" : random . randint ( 0 , 999999999 ) , "sys" : random . randint ( 0 , 999999999 ) , "idle" : random . randint ( 0 , 999999999 ) , "irq" : random . randint ( 0 , 999999999 ) , } } ] | Mocks read_stat as this is a Linux - specific operation . | 100 | 14 |
250,128 | def load_plugins ( self , plugins ) : def instantiate ( plugin ) : return plugin ( ) if inspect . isclass ( plugin ) else plugin loaded_plugins = [ ] plugins_seen = [ ] # Iterate over plugins in reverse to permit users to override default # plugin config for plugin in reversed ( plugins ) : if not is_plugin ( plugin ) or plugin . name in plugins_seen : continue # Build the plugins list in reverse to restore original order loaded_plugins . insert ( 0 , instantiate ( plugin ) ) plugins_seen . append ( plugin . name ) return loaded_plugins | Loads plugins that match the Plugin interface and are instantiated . | 124 | 13 |
250,129 | def run_hooks ( self , name , event = None , context = None ) : hooks = { "pre:setup" : lambda p : p . pre_setup ( self ) , "post:setup" : lambda p : p . post_setup ( self ) , "pre:invoke" : lambda p : p . pre_invoke ( event , context ) , "post:invoke" : lambda p : p . post_invoke ( event , context ) , "pre:report" : lambda p : p . pre_report ( self . report ) , "post:report" : lambda p : p . post_report ( self . report ) , } if name in hooks : for p in self . plugins : if p . enabled : try : hooks [ name ] ( p ) except Exception as e : logger . error ( "IOpipe plugin %s hook raised error" % ( name , str ( e ) ) ) logger . exception ( e ) | Runs plugin hooks for each registered plugin . | 200 | 9 |
250,130 | def wait_for_futures ( self ) : [ future for future in futures . as_completed ( self . futures ) ] self . futures = [ ] | Wait for all futures to complete . This should be done at the end of an an invocation . | 35 | 19 |
250,131 | def validate_context ( self , context ) : return all ( [ hasattr ( context , attr ) for attr in [ "aws_request_id" , "function_name" , "function_version" , "get_remaining_time_in_millis" , "invoked_function_arn" , "log_group_name" , "log_stream_name" , "memory_limit_in_mb" , ] ] ) and callable ( context . get_remaining_time_in_millis ) | Checks to see if we re working with a valid lambda context object . | 117 | 15 |
250,132 | def patch_session_send ( context , http_filter ) : if Session is None : return def send ( self , * args , * * kwargs ) : id = ensure_utf8 ( str ( uuid . uuid4 ( ) ) ) with context . iopipe . mark ( id ) : response = original_session_send ( self , * args , * * kwargs ) trace = context . iopipe . mark . measure ( id ) context . iopipe . mark . delete ( id ) collect_metrics_for_response ( response , context , trace , http_filter ) return response Session . send = send | Monkey patches requests Session class if available . Overloads the send method to add tracing and metrics collection . | 137 | 21 |
250,133 | def patch_botocore_session_send ( context , http_filter ) : if BotocoreSession is None : return def send ( self , * args , * * kwargs ) : id = str ( uuid . uuid4 ( ) ) with context . iopipe . mark ( id ) : response = original_botocore_session_send ( self , * args , * * kwargs ) trace = context . iopipe . mark . measure ( id ) context . iopipe . mark . delete ( id ) collect_metrics_for_response ( response , context , trace , http_filter ) return response BotocoreSession . send = send | Monkey patches botocore s vendored requests if available . Overloads the Session class send method to add tracing and metric collection . | 145 | 27 |
250,134 | def collect_metrics_for_response ( http_response , context , trace , http_filter ) : http_response = copy . deepcopy ( http_response ) if http_filter is not None and callable ( http_filter ) : http_response = http_filter ( http_response ) if http_response is False : return request = None if hasattr ( http_response , "request" ) : parsed_url = None if hasattr ( http_response . request , "url" ) : parsed_url = urlparse ( http_response . request . url ) request_headers = [ ] if hasattr ( http_response . request , "headers" ) : request_headers = [ { "key" : ensure_utf8 ( k ) , "string" : ensure_utf8 ( v ) } for k , v in http_response . request . headers . items ( ) if k . lower ( ) in INCLUDE_HEADERS ] request = Request ( hash = ensure_utf8 ( getattr ( parsed_url , "fragment" , None ) ) , headers = request_headers , hostname = ensure_utf8 ( getattr ( parsed_url , "hostname" , None ) ) , method = ensure_utf8 ( getattr ( http_response . request , "method" , None ) ) , path = ensure_utf8 ( getattr ( parsed_url , "path" , None ) ) , # TODO: Determine if this is redundant pathname = ensure_utf8 ( getattr ( parsed_url , "path" , None ) ) , port = ensure_utf8 ( getattr ( parsed_url , "port" , None ) ) , protocol = ensure_utf8 ( getattr ( parsed_url , "scheme" , None ) ) , query = ensure_utf8 ( getattr ( parsed_url , "query" , None ) ) , url = ensure_utf8 ( getattr ( http_response . request , "url" , None ) ) , ) response_headers = [ ] if hasattr ( http_response , "headers" ) : response_headers = [ { "key" : ensure_utf8 ( k ) , "string" : ensure_utf8 ( v ) } for k , v in http_response . headers . items ( ) if k . lower ( ) in INCLUDE_HEADERS ] response = Response ( headers = response_headers , statusCode = ensure_utf8 ( getattr ( http_response , "status_code" , None ) ) , statusMessage = None , ) context . iopipe . mark . http_trace ( trace , request , response ) | Collects relevant metrics from a requests Response object and adds them to the IOpipe context . | 571 | 19 |
250,135 | def get_plugin_meta ( plugins ) : return [ { "name" : p . name , "version" : p . version , "homepage" : p . homepage , "enabled" : p . enabled , } for p in plugins if is_plugin ( p ) ] | Returns meta data about plugins . | 59 | 6 |
250,136 | def is_plugin ( plugin ) : try : return isinstance ( plugin , Plugin ) or issubclass ( plugin , Plugin ) except TypeError : return False | Returns true if the plugin implements the Plugin interface . | 33 | 10 |
250,137 | def with_metaclass ( meta , * bases ) : class metaclass ( meta ) : def __new__ ( cls , name , this_bases , d ) : return meta ( name , bases , d ) return type . __new__ ( metaclass , "temporary_class" , ( ) , { } ) | Python 2 and 3 compatible way to do meta classes | 72 | 10 |
250,138 | def extract_context_data ( self ) : data = { } for k , v in { # camel case names in the report to align with AWS standards "functionName" : "function_name" , "functionVersion" : "function_version" , "memoryLimitInMB" : "memory_limit_in_mb" , "invokedFunctionArn" : "invoked_function_arn" , "awsRequestId" : "aws_request_id" , "logGroupName" : "log_group_name" , "logStreamName" : "log_stream_name" , } . items ( ) : if hasattr ( self . context , v ) : data [ k ] = getattr ( self . context , v ) if ( hasattr ( self . context , "invoked_function_arn" ) and "AWS_SAM_LOCAL" in os . environ ) : data [ "invokedFunctionArn" ] = ( "arn:aws:lambda:local:0:function:%s" % data . get ( "functionName" , "unknown" ) ) if hasattr ( self . context , "get_remaining_time_in_millis" ) and callable ( self . context . get_remaining_time_in_millis ) : data [ "getRemainingTimeInMillis" ] = self . context . get_remaining_time_in_millis ( ) data [ "traceId" ] = os . getenv ( "_X_AMZN_TRACE_ID" , "" ) return data | Returns the contents of a AWS Lambda context . | 342 | 10 |
250,139 | def retain_error ( self , error , frame = None ) : if frame is None : stack = traceback . format_exc ( ) self . labels . add ( "@iopipe/error" ) else : stack = "\n" . join ( traceback . format_stack ( frame ) ) self . labels . add ( "@iopipe/timeout" ) details = { "name" : type ( error ) . __name__ , "message" : "{}" . format ( error ) , "stack" : stack , } self . report [ "errors" ] = details | Adds details of an error to the report . | 120 | 9 |
250,140 | def prepare ( self , error = None , frame = None ) : if error : self . retain_error ( error , frame ) self . report [ "environment" ] [ "host" ] [ "boot_id" ] = system . read_bootid ( ) # convert labels to list for sending self . report [ "labels" ] = list ( self . labels ) meminfo = system . read_meminfo ( ) self . report . update ( { "aws" : self . extract_context_data ( ) , "timestampEnd" : int ( time . time ( ) * 1000 ) , } ) self . report [ "environment" ] [ "os" ] . update ( { "cpus" : system . read_stat ( ) , "freemem" : meminfo [ "MemFree" ] , "hostname" : system . read_hostname ( ) , "totalmem" : meminfo [ "MemTotal" ] , "usedmem" : meminfo [ "MemTotal" ] - meminfo [ "MemFree" ] , } ) self . report [ "environment" ] [ "os" ] [ "linux" ] [ "pid" ] = { "self" : { "stat" : system . read_pid_stat ( "self" ) , "stat_start" : self . stat_start , "status" : system . read_pid_status ( "self" ) , } } self . report [ "disk" ] = system . read_disk ( ) self . report [ "duration" ] = int ( ( monotonic ( ) - self . start_time ) * 1e9 ) | Prepare the report to be sent to IOpipe . | 351 | 12 |
250,141 | def send ( self ) : if self . sent is True : return self . sent = True logger . debug ( "Sending report to IOpipe:" ) logger . debug ( json . dumps ( self . report , indent = 2 , sort_keys = True ) ) self . client . submit_future ( send_report , copy . deepcopy ( self . report ) , self . config ) | Sends the report to IOpipe . | 82 | 9 |
250,142 | def send_report ( report , config ) : headers = { "Authorization" : "Bearer {}" . format ( config [ "token" ] ) } url = "https://{host}{path}" . format ( * * config ) try : response = session . post ( url , json = report , headers = headers , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error sending report to IOpipe: %s" % e ) else : logger . debug ( "Report sent to IOpipe successfully" ) | Sends the report to IOpipe s collector . | 129 | 11 |
250,143 | def upload_log_data ( url , stream_or_file , config ) : try : logger . debug ( "Uploading log data to IOpipe" ) if isinstance ( stream_or_file , StringIO ) : stream_or_file . seek ( 0 ) response = requests . put ( url , data = stream_or_file , timeout = config [ "network_timeout" ] ) else : with open ( stream_or_file , "rb" ) as data : response = requests . put ( url , data = data , timeout = config [ "network_timeout" ] ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error while uploading log data: %s" , e ) logger . exception ( e ) if hasattr ( e , "response" ) and hasattr ( e . response , "content" ) : logger . debug ( e . response . content ) else : logger . debug ( "Log data uploaded successfully" ) finally : if isinstance ( stream_or_file , str ) and os . path . exists ( stream_or_file ) : os . remove ( stream_or_file ) | Uploads log data to IOpipe . | 248 | 9 |
250,144 | def get_signer_hostname ( ) : region = os . getenv ( "AWS_REGION" , "" ) region = region if region and region in SUPPORTED_REGIONS else "us-west-2" return "signer.{region}.iopipe.com" . format ( region = region ) | Returns the IOpipe signer hostname for a region | 69 | 12 |
250,145 | def get_signed_request ( config , context , extension ) : url = "https://{hostname}/" . format ( hostname = get_signer_hostname ( ) ) try : logger . debug ( "Requesting signed request URL from %s" , url ) response = requests . post ( url , json = { "arn" : context . invoked_function_arn , "requestId" : context . aws_request_id , "timestamp" : int ( time . time ( ) * 1000 ) , "extension" : extension , } , headers = { "Authorization" : config [ "token" ] } , timeout = config [ "network_timeout" ] , ) response . raise_for_status ( ) except Exception as e : logger . debug ( "Error requesting signed request URL: %s" , e ) if hasattr ( e , "response" ) : logger . debug ( e . response . content ) else : response = response . json ( ) logger . debug ( "Signed request URL received for %s" , response [ "url" ] ) return response | Returns a signed request URL from IOpipe | 234 | 9 |
250,146 | def handler ( event , context ) : try : ip = requests . get ( "http://checkip.amazonaws.com/" ) except requests . RequestException as e : # Send some context about this error to Lambda Logs print ( e ) raise e return { "statusCode" : 200 , "body" : json . dumps ( { "message" : "hello world" , "location" : ip . text . replace ( "\n" , "" ) } ) , } | Sample pure Lambda function | 101 | 5 |
250,147 | def read_meminfo ( ) : data = { } with open ( "/proc/meminfo" , "rb" ) as meminfo_file : for row in meminfo_file : fields = row . split ( ) # Example content: # MemTotal: 3801016 kB # MemFree: 1840972 kB # MemAvailable: 3287752 kB # HugePages_Total: 0 data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = int ( fields [ 1 ] ) * 1024 return data | Returns system memory usage information . | 118 | 6 |
250,148 | def read_pid_stat ( pid = "self" ) : with open ( "/proc/%s/stat" % ( pid , ) , "rb" ) as f : stat = f . readline ( ) . split ( ) return { "utime" : int ( stat [ 13 ] ) , "stime" : int ( stat [ 14 ] ) , "cutime" : int ( stat [ 15 ] ) , "cstime" : int ( stat [ 16 ] ) , } | Returns system process stat information . | 106 | 6 |
250,149 | def read_pid_status ( pid = "self" ) : data = { } with open ( "/proc/%s/status" % ( pid , ) , "rb" ) as status_file : for row in status_file : fields = row . split ( ) if fields and fields [ 0 ] in [ b"VmRSS:" , b"Threads:" , b"FDSize:" ] : try : data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = int ( fields [ 1 ] ) except ValueError : data [ fields [ 0 ] . decode ( "ascii" ) [ : - 1 ] ] = fields [ 1 ] . decode ( "ascii" ) return data | Returns the system process sstatus . | 161 | 7 |
250,150 | def read_stat ( ) : data = [ ] with open ( "/proc/stat" , "rb" ) as stat_file : for line in stat_file : cpu_stat = line . split ( ) if cpu_stat [ 0 ] [ : 3 ] != b"cpu" : break # First cpu line is aggregation of following lines, skip it if len ( cpu_stat [ 0 ] ) == 3 : continue data . append ( { "times" : { "user" : int ( cpu_stat [ 1 ] ) , "nice" : int ( cpu_stat [ 2 ] ) , "sys" : int ( cpu_stat [ 3 ] ) , "idle" : int ( cpu_stat [ 4 ] ) , "irq" : int ( cpu_stat [ 6 ] ) , } } ) return data | Returns the system stat information . | 176 | 6 |
250,151 | def set_config ( * * config ) : config . setdefault ( "debug" , bool ( strtobool ( os . getenv ( "IOPIPE_DEBUG" , "false" ) ) ) ) config . setdefault ( "enabled" , bool ( strtobool ( os . getenv ( "IOPIPE_ENABLED" , "true" ) ) ) ) config . setdefault ( "host" , get_hostname ( ) ) config . setdefault ( "install_method" , os . getenv ( "IOPIPE_INSTALL_METHOD" , "manual" ) ) config . setdefault ( "network_timeout" , os . getenv ( "IOPIPE_NETWORK_TIMEOUT" , 5000 ) ) config . setdefault ( "path" , get_collector_path ( ) ) config . setdefault ( "plugins" , [ ] ) config . setdefault ( "sync_http" , False ) config . setdefault ( "timeout_window" , os . getenv ( "IOPIPE_TIMEOUT_WINDOW" , 500 ) ) config . setdefault ( "token" , os . getenv ( "IOPIPE_TOKEN" ) or os . getenv ( "IOPIPE_CLIENTID" ) or "" ) if "client_id" in config : config [ "token" ] = config . pop ( "client_id" ) if "url" in config : url = config . pop ( "url" ) config [ "host" ] = get_hostname ( url ) config [ "path" ] = get_collector_path ( url ) if "." in str ( config [ "network_timeout" ] ) : warnings . warn ( "IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer" ) try : config [ "debug" ] = bool ( config [ "debug" ] ) except ValueError : config [ "debug" ] = False try : config [ "network_timeout" ] = int ( config [ "network_timeout" ] ) / 1000.0 except ValueError : config [ "network_timeout" ] = 5.0 if "." in str ( config [ "timeout_window" ] ) : warnings . warn ( "IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer" ) try : config [ "timeout_window" ] = int ( config [ "timeout_window" ] ) / 1000.0 except ValueError : config [ "timeout_window" ] = 0.5 return config | Returns IOpipe configuration options setting defaults as necessary . | 556 | 11 |
250,152 | def b ( s ) : return s if isinstance ( s , bytes ) else s . encode ( locale . getpreferredencoding ( ) ) | Encodes Unicode strings to byte strings if necessary . | 31 | 10 |
250,153 | def LogMsg ( msg ) : global headerlogged if headerlogged == 0 : print ( "{0:<8} {1:<90} {2}" . format ( "Time" , "MainThread" , "UpdateSNMPObjsThread" ) ) print ( "{0:-^120}" . format ( "-" ) ) headerlogged = 1 threadname = threading . currentThread ( ) . name funcname = sys . _getframe ( 1 ) . f_code . co_name if funcname == "<module>" : funcname = "Main code path" elif funcname == "LogNetSnmpMsg" : funcname = "net-snmp code" else : funcname = "{0}()" . format ( funcname ) if threadname == "MainThread" : logmsg = "{0} {1:<112.112}" . format ( time . strftime ( "%T" , time . localtime ( time . time ( ) ) ) , "{0}: {1}" . format ( funcname , msg ) ) else : logmsg = "{0} {1:>112.112}" . format ( time . strftime ( "%T" , time . localtime ( time . time ( ) ) ) , "{0}: {1}" . format ( funcname , msg ) ) print ( logmsg ) | Writes a formatted log message with a timestamp to stdout . | 287 | 13 |
250,154 | def UpdateSNMPObjs ( ) : global threadingString LogMsg ( "Beginning data update." ) data = "" # Obtain the data by calling an external command. We don't use # subprocess.check_output() here for compatibility with Python versions # older than 2.7. LogMsg ( "Calling external command \"sleep 5; date\"." ) proc = subprocess . Popen ( "sleep 5; date" , shell = True , env = { "LANG" : "C" } , stdout = subprocess . PIPE , stderr = subprocess . STDOUT ) output = proc . communicate ( ) [ 0 ] . splitlines ( ) [ 0 ] rc = proc . poll ( ) if rc != 0 : LogMsg ( "An error occured executing the command: {0}" . format ( output ) ) return msg = "Updating \"threadingString\" object with data \"{0}\"." LogMsg ( msg . format ( output ) ) threadingString . update ( output ) LogMsg ( "Data update done, exiting thread." ) | Function that does the actual data update . | 229 | 8 |
250,155 | def getRegistered ( self , context = "" ) : myobjs = { } try : # Python 2.x objs_iterator = self . _objs [ context ] . iteritems ( ) except AttributeError : # Python 3.x objs_iterator = self . _objs [ context ] . items ( ) for oidstr , snmpobj in objs_iterator : myobjs [ oidstr ] = { "type" : type ( snmpobj ) . __name__ , "value" : snmpobj . value ( ) } return dict ( myobjs ) | Returns a dictionary with the currently registered SNMP objects . | 127 | 11 |
250,156 | def start ( self ) : if self . _status != netsnmpAgentStatus . CONNECTED and self . _status != netsnmpAgentStatus . RECONNECTING : self . _status = netsnmpAgentStatus . FIRSTCONNECT libnsa . init_snmp ( b ( self . AgentName ) ) if self . _status == netsnmpAgentStatus . CONNECTFAILED : msg = "Error connecting to snmpd instance at \"{0}\" -- " "incorrect \"MasterSocket\" or snmpd not running?" msg = msg . format ( self . MasterSocket ) raise netsnmpAgentException ( msg ) | Starts the agent . Among other things this means connecting to the master agent if configured that way . | 139 | 20 |
250,157 | def _adjust_trim_top ( self , canv , size ) : action = self . _scroll_action self . _scroll_action = None maxcol , maxrow = size trim_top = self . _trim_top canv_rows = canv . rows ( ) if trim_top < 0 : # Negative trim_top values use bottom of canvas as reference trim_top = canv_rows - maxrow + trim_top + 1 if canv_rows <= maxrow : self . _trim_top = 0 # Reset scroll position return def ensure_bounds ( new_trim_top ) : return max ( 0 , min ( canv_rows - maxrow , new_trim_top ) ) if action == SCROLL_LINE_UP : self . _trim_top = ensure_bounds ( trim_top - 1 ) elif action == SCROLL_LINE_DOWN : self . _trim_top = ensure_bounds ( trim_top + 1 ) elif action == SCROLL_PAGE_UP : self . _trim_top = ensure_bounds ( trim_top - maxrow + 1 ) elif action == SCROLL_PAGE_DOWN : self . _trim_top = ensure_bounds ( trim_top + maxrow - 1 ) elif action == SCROLL_TO_TOP : self . _trim_top = 0 elif action == SCROLL_TO_END : self . _trim_top = canv_rows - maxrow else : self . _trim_top = ensure_bounds ( trim_top ) # If the cursor was moved by the most recent keypress, adjust trim_top # so that the new cursor position is within the displayed canvas part. # But don't do this if the cursor is at the top/bottom edge so we can still scroll out if self . _old_cursor_coords is not None and self . _old_cursor_coords != canv . cursor : self . _old_cursor_coords = None curscol , cursrow = canv . cursor if cursrow < self . _trim_top : self . _trim_top = cursrow elif cursrow >= self . _trim_top + maxrow : self . _trim_top = max ( 0 , cursrow - maxrow + 1 ) | Adjust self . _trim_top according to self . _scroll_action | 512 | 16 |
250,158 | def rows_max ( self , size = None , focus = False ) : if size is not None : ow = self . _original_widget ow_size = self . _get_original_widget_size ( size ) sizing = ow . sizing ( ) if FIXED in sizing : self . _rows_max_cached = ow . pack ( ow_size , focus ) [ 1 ] elif FLOW in sizing : self . _rows_max_cached = ow . rows ( ow_size , focus ) else : raise RuntimeError ( 'Not a flow/box widget: %r' % self . _original_widget ) return self . _rows_max_cached | Return the number of rows for size | 145 | 7 |
250,159 | def scrolling_base_widget ( self ) : def orig_iter ( w ) : while hasattr ( w , 'original_widget' ) : w = w . original_widget yield w yield w def is_scrolling_widget ( w ) : return hasattr ( w , 'get_scrollpos' ) and hasattr ( w , 'rows_max' ) for w in orig_iter ( self ) : if is_scrolling_widget ( w ) : return w raise ValueError ( 'Not compatible to be wrapped by ScrollBar: %r' % w ) | Nearest original_widget that is compatible with the scrolling API | 121 | 12 |
250,160 | def ignore_after ( seconds , coro = None , * args , timeout_result = None ) : if coro : return _ignore_after_func ( seconds , False , coro , args , timeout_result ) return TimeoutAfter ( seconds , ignore = True ) | Execute the specified coroutine and return its result . Issue a cancellation request after seconds have elapsed . When a timeout occurs no exception is raised . Instead timeout_result is returned . | 58 | 36 |
250,161 | def _add_task ( self , task ) : if hasattr ( task , '_task_group' ) : raise RuntimeError ( 'task is already part of a group' ) if self . _closed : raise RuntimeError ( 'task group is closed' ) task . _task_group = self if task . done ( ) : self . _done . append ( task ) else : self . _pending . add ( task ) task . add_done_callback ( self . _on_done ) | Add an already existing task to the task group . | 107 | 10 |
250,162 | async def next_done ( self ) : if not self . _done and self . _pending : self . _done_event . clear ( ) await self . _done_event . wait ( ) if self . _done : return self . _done . popleft ( ) return None | Returns the next completed task . Returns None if no more tasks remain . A TaskGroup may also be used as an asynchronous iterator . | 63 | 26 |
250,163 | async def join ( self ) : def errored ( task ) : return not task . cancelled ( ) and task . exception ( ) try : if self . _wait in ( all , object ) : while True : task = await self . next_done ( ) if task is None : return if errored ( task ) : break if self . _wait is object : if task . cancelled ( ) or task . result ( ) is not None : return else : # any task = await self . next_done ( ) if task is None or not errored ( task ) : return finally : await self . cancel_remaining ( ) if errored ( task ) : raise task . exception ( ) | Wait for tasks in the group to terminate according to the wait policy for the group . | 145 | 17 |
250,164 | async def cancel_remaining ( self ) : self . _closed = True task_list = list ( self . _pending ) for task in task_list : task . cancel ( ) for task in task_list : with suppress ( CancelledError ) : await task | Cancel all remaining tasks . | 58 | 6 |
250,165 | async def _connect_one ( self , remote_address ) : loop = asyncio . get_event_loop ( ) for info in await loop . getaddrinfo ( str ( self . address . host ) , self . address . port , type = socket . SOCK_STREAM ) : # This object has state so is only good for one connection client = self . protocol ( remote_address , self . auth ) sock = socket . socket ( family = info [ 0 ] ) try : # A non-blocking socket is required by loop socket methods sock . setblocking ( False ) await loop . sock_connect ( sock , info [ 4 ] ) await self . _handshake ( client , sock , loop ) self . peername = sock . getpeername ( ) return sock except ( OSError , SOCKSProtocolError ) as e : exception = e # Don't close the socket because of an asyncio bug # see https://github.com/kyuupichan/aiorpcX/issues/8 return exception | Connect to the proxy and perform a handshake requesting a connection . | 220 | 12 |
250,166 | async def _connect ( self , remote_addresses ) : assert remote_addresses exceptions = [ ] for remote_address in remote_addresses : sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : return sock , remote_address exceptions . append ( sock ) strings = set ( f'{exc!r}' for exc in exceptions ) raise ( exceptions [ 0 ] if len ( strings ) == 1 else OSError ( f'multiple exceptions: {", ".join(strings)}' ) ) | Connect to the proxy and perform a handshake requesting a connection to each address in addresses . | 122 | 17 |
250,167 | async def _detect_proxy ( self ) : if self . protocol is SOCKS4a : remote_address = NetAddress ( 'www.apple.com' , 80 ) else : remote_address = NetAddress ( '8.8.8.8' , 53 ) sock = await self . _connect_one ( remote_address ) if isinstance ( sock , socket . socket ) : sock . close ( ) return True # SOCKSFailure indicates something failed, but that we are likely talking to a # proxy return isinstance ( sock , SOCKSFailure ) | Return True if it appears we can connect to a SOCKS proxy otherwise False . | 125 | 17 |
250,168 | async def auto_detect_at_host ( cls , host , ports , auth ) : for port in ports : proxy = await cls . auto_detect_at_address ( NetAddress ( host , port ) , auth ) if proxy : return proxy return None | Try to detect a SOCKS proxy on a host on one of the ports . | 59 | 17 |
250,169 | async def create_connection ( self ) : connector = self . proxy or self . loop return await connector . create_connection ( self . session_factory , self . host , self . port , * * self . kwargs ) | Initiate a connection . | 50 | 6 |
250,170 | def data_received ( self , framed_message ) : if self . verbosity >= 4 : self . logger . debug ( f'Received framed message {framed_message}' ) self . recv_size += len ( framed_message ) self . bump_cost ( len ( framed_message ) * self . bw_cost_per_byte ) self . framer . received_bytes ( framed_message ) | Called by asyncio when a message comes in . | 90 | 11 |
250,171 | def pause_writing ( self ) : if not self . is_closing ( ) : self . _can_send . clear ( ) self . transport . pause_reading ( ) | Transport calls when the send buffer is full . | 38 | 10 |
250,172 | def resume_writing ( self ) : if not self . _can_send . is_set ( ) : self . _can_send . set ( ) self . transport . resume_reading ( ) | Transport calls when the send buffer has room . | 42 | 10 |
250,173 | def connection_made ( self , transport ) : self . transport = transport # If the Socks proxy was used then _proxy and _remote_address are already set if self . _proxy is None : # This would throw if called on a closed SSL transport. Fixed in asyncio in # Python 3.6.1 and 3.5.4 peername = transport . get_extra_info ( 'peername' ) self . _remote_address = NetAddress ( peername [ 0 ] , peername [ 1 ] ) self . _task = spawn_sync ( self . _process_messages ( ) , loop = self . loop ) | Called by asyncio when a connection is established . | 135 | 11 |
250,174 | def connection_lost ( self , exc ) : # Work around uvloop bug; see https://github.com/MagicStack/uvloop/issues/246 if self . transport : self . transport = None self . closed_event . set ( ) # Release waiting tasks self . _can_send . set ( ) # Cancelling directly leads to self-cancellation problems for member # functions await-ing self.close() self . loop . call_soon ( self . _task . cancel ) | Called by asyncio when the connection closes . | 106 | 10 |
250,175 | def recalc_concurrency ( self ) : # Refund resource usage proportionally to elapsed time; the bump passed is negative now = time . time ( ) self . cost = max ( 0 , self . cost - ( now - self . _cost_time ) * self . cost_decay_per_sec ) self . _cost_time = now self . _cost_last = self . cost # Setting cost_hard_limit <= 0 means to not limit concurrency value = self . _incoming_concurrency . max_concurrent cost_soft_range = self . cost_hard_limit - self . cost_soft_limit if cost_soft_range <= 0 : return cost = self . cost + self . extra_cost ( ) self . _cost_fraction = max ( 0.0 , ( cost - self . cost_soft_limit ) / cost_soft_range ) target = max ( 0 , ceil ( ( 1.0 - self . _cost_fraction ) * self . initial_concurrent ) ) if abs ( target - value ) > 1 : self . logger . info ( f'changing task concurrency from {value} to {target}' ) self . _incoming_concurrency . set_target ( target ) | Call to recalculate sleeps and concurrency for the session . Called automatically if cost has drifted significantly . Otherwise can be called at regular intervals if desired . | 269 | 31 |
250,176 | async def close ( self , * , force_after = 30 ) : if self . transport : self . transport . close ( ) try : async with timeout_after ( force_after ) : await self . closed_event . wait ( ) except TaskTimeout : self . abort ( ) await self . closed_event . wait ( ) | Close the connection and return when closed . | 70 | 8 |
250,177 | async def send_request ( self , method , args = ( ) ) : message , event = self . connection . send_request ( Request ( method , args ) ) return await self . _send_concurrent ( message , event , 1 ) | Send an RPC request over the network . | 52 | 8 |
250,178 | async def send_notification ( self , method , args = ( ) ) : message = self . connection . send_notification ( Notification ( method , args ) ) await self . _send_message ( message ) | Send an RPC notification over the network . | 46 | 8 |
250,179 | async def close ( self ) : if self . server : self . server . close ( ) await self . server . wait_closed ( ) self . server = None | Close the listening socket . This does not close any ServerSession objects created to handle incoming connections . | 35 | 19 |
250,180 | def _message_to_payload ( cls , message ) : try : return json . loads ( message . decode ( ) ) except UnicodeDecodeError : message = 'messages must be encoded in UTF-8' except json . JSONDecodeError : message = 'invalid JSON' raise cls . _error ( cls . PARSE_ERROR , message , True , None ) | Returns a Python object or a ProtocolError . | 83 | 9 |
250,181 | def batch_message ( cls , batch , request_ids ) : assert isinstance ( batch , Batch ) if not cls . allow_batches : raise ProtocolError . invalid_request ( 'protocol does not permit batches' ) id_iter = iter ( request_ids ) rm = cls . request_message nm = cls . notification_message parts = ( rm ( request , next ( id_iter ) ) if isinstance ( request , Request ) else nm ( request ) for request in batch ) return cls . batch_message_from_parts ( parts ) | Convert a request Batch to a message . | 123 | 10 |
250,182 | def batch_message_from_parts ( cls , messages ) : # Comma-separate the messages and wrap the lot in square brackets middle = b', ' . join ( messages ) if not middle : raise ProtocolError . empty_batch ( ) return b'' . join ( [ b'[' , middle , b']' ] ) | Convert messages one per batch item into a batch message . At least one message must be passed . | 71 | 20 |
250,183 | def encode_payload ( cls , payload ) : try : return json . dumps ( payload ) . encode ( ) except TypeError : msg = f'JSON payload encoding error: {payload}' raise ProtocolError ( cls . INTERNAL_ERROR , msg ) from None | Encode a Python object as JSON and convert it to bytes . | 60 | 13 |
250,184 | def detect_protocol ( cls , message ) : main = cls . _message_to_payload ( message ) def protocol_for_payload ( payload ) : if not isinstance ( payload , dict ) : return JSONRPCLoose # Will error # Obey an explicit "jsonrpc" version = payload . get ( 'jsonrpc' ) if version == '2.0' : return JSONRPCv2 if version == '1.0' : return JSONRPCv1 # Now to decide between JSONRPCLoose and JSONRPCv1 if possible if 'result' in payload and 'error' in payload : return JSONRPCv1 return JSONRPCLoose if isinstance ( main , list ) : parts = set ( protocol_for_payload ( payload ) for payload in main ) # If all same protocol, return it if len ( parts ) == 1 : return parts . pop ( ) # If strict protocol detected, return it, preferring JSONRPCv2. # This means a batch of JSONRPCv1 will fail for protocol in ( JSONRPCv2 , JSONRPCv1 ) : if protocol in parts : return protocol # Will error if no parts return JSONRPCLoose return protocol_for_payload ( main ) | Attempt to detect the protocol from the message . | 276 | 9 |
250,185 | def receive_message ( self , message ) : if self . _protocol is JSONRPCAutoDetect : self . _protocol = JSONRPCAutoDetect . detect_protocol ( message ) try : item , request_id = self . _protocol . message_to_item ( message ) except ProtocolError as e : if e . response_msg_id is not id : return self . _receive_response ( e , e . response_msg_id ) raise if isinstance ( item , Request ) : item . send_result = partial ( self . _send_result , request_id ) return [ item ] if isinstance ( item , Notification ) : return [ item ] if isinstance ( item , Response ) : return self . _receive_response ( item . result , request_id ) assert isinstance ( item , list ) if all ( isinstance ( payload , dict ) and ( 'result' in payload or 'error' in payload ) for payload in item ) : return self . _receive_response_batch ( item ) else : return self . _receive_request_batch ( item ) | Call with an unframed message received from the network . | 239 | 12 |
250,186 | def cancel_pending_requests ( self ) : exception = CancelledError ( ) for _request , event in self . _requests . values ( ) : event . result = exception event . set ( ) self . _requests . clear ( ) | Cancel all pending requests . | 54 | 6 |
250,187 | def is_valid_hostname ( hostname ) : if not isinstance ( hostname , str ) : raise TypeError ( 'hostname must be a string' ) # strip exactly one dot from the right, if present if hostname and hostname [ - 1 ] == "." : hostname = hostname [ : - 1 ] if not hostname or len ( hostname ) > 253 : return False labels = hostname . split ( '.' ) # the TLD must be not all-numeric if re . match ( NUMERIC_REGEX , labels [ - 1 ] ) : return False return all ( LABEL_REGEX . match ( label ) for label in labels ) | Return True if hostname is valid otherwise False . | 147 | 10 |
250,188 | def classify_host ( host ) : if isinstance ( host , ( IPv4Address , IPv6Address ) ) : return host if is_valid_hostname ( host ) : return host return ip_address ( host ) | Host is an IPv4Address IPv6Address or a string . | 47 | 13 |
250,189 | def validate_port ( port ) : if not isinstance ( port , ( str , int ) ) : raise TypeError ( f'port must be an integer or string: {port}' ) if isinstance ( port , str ) and port . isdigit ( ) : port = int ( port ) if isinstance ( port , int ) and 0 < port <= 65535 : return port raise ValueError ( f'invalid port: {port}' ) | Validate port and return it as an integer . | 96 | 10 |
250,190 | def validate_protocol ( protocol ) : if not re . match ( PROTOCOL_REGEX , protocol ) : raise ValueError ( f'invalid protocol: {protocol}' ) return protocol . lower ( ) | Validate a protocol a string and return it . | 47 | 10 |
250,191 | def is_async_call ( func ) : while isinstance ( func , partial ) : func = func . func return inspect . iscoroutinefunction ( func ) | inspect . iscoroutinefunction that looks through partials . | 35 | 13 |
250,192 | def from_string ( cls , string , * , default_func = None ) : if not isinstance ( string , str ) : raise TypeError ( f'service must be a string: {string}' ) parts = string . split ( '://' , 1 ) if len ( parts ) == 2 : protocol , address = parts else : item , = parts protocol = None if default_func : if default_func ( item , ServicePart . HOST ) and default_func ( item , ServicePart . PORT ) : protocol , address = item , '' else : protocol , address = default_func ( None , ServicePart . PROTOCOL ) , item if not protocol : raise ValueError ( f'invalid service string: {string}' ) if default_func : default_func = partial ( default_func , protocol . lower ( ) ) address = NetAddress . from_string ( address , default_func = default_func ) return cls ( protocol , address ) | Construct a Service from a string . | 209 | 7 |
250,193 | def scrub ( self ) : LOG . info ( "Scrubbing out the nasty characters that break our parser." ) myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) tmpfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] + '.tmp.gz' ) ) tmp = gzip . open ( tmpfile , 'wb' ) du = DipperUtil ( ) with gzip . open ( myfile , 'rb' ) as fh : filereader = io . TextIOWrapper ( fh , newline = "" ) for line in filereader : line = du . remove_control_characters ( line ) + '\n' tmp . write ( line . encode ( 'utf-8' ) ) tmp . close ( ) # TEC I do not like this at all. original data must be preserved as is. # also may be heavy handed as chars which do not break the parser # are stripped as well (i.e. tabs and newlines) # move the temp file LOG . info ( "Replacing the original data with the scrubbed file." ) shutil . move ( tmpfile , myfile ) return | The XML file seems to have mixed - encoding ; we scrub out the control characters from the file for processing . | 272 | 22 |
250,194 | def process_associations ( self , limit ) : myfile = '/' . join ( ( self . rawdir , self . files [ 'data' ] [ 'file' ] ) ) f = gzip . open ( myfile , 'rb' ) filereader = io . TextIOWrapper ( f , newline = "" ) filereader . readline ( ) # remove the xml declaration line for event , elem in ET . iterparse ( filereader ) : # iterparse is not deprecated self . process_xml_table ( elem , 'Article_Breed' , self . _process_article_breed_row , limit ) self . process_xml_table ( elem , 'Article_Phene' , self . _process_article_phene_row , limit ) self . process_xml_table ( elem , 'Breed_Phene' , self . _process_breed_phene_row , limit ) self . process_xml_table ( elem , 'Lida_Links' , self . _process_lida_links_row , limit ) self . process_xml_table ( elem , 'Phene_Gene' , self . _process_phene_gene_row , limit ) self . process_xml_table ( elem , 'Group_MPO' , self . _process_group_mpo_row , limit ) f . close ( ) return | Loop through the xml file and process the article - breed article - phene breed - phene phene - gene associations and the external links to LIDA . | 308 | 32 |
250,195 | def _process_article_phene_row ( self , row ) : # article_id, phene_id, added_by # look up the article in the hashmap phenotype_id = self . id_hash [ 'phene' ] . get ( row [ 'phene_id' ] ) article_id = self . id_hash [ 'article' ] . get ( row [ 'article_id' ] ) omia_id = self . _get_omia_id_from_phene_id ( phenotype_id ) if self . test_mode or omia_id not in self . test_ids [ 'disease' ] or phenotype_id is None or article_id is None : return # make a triple, where the article is about the phenotype self . graph . addTriple ( article_id , self . globaltt [ 'is_about' ] , phenotype_id ) return | Linking articles to species - specific phenes . | 198 | 10 |
250,196 | def filter_keep_phenotype_entry_ids ( self , entry ) : omim_id = str ( entry [ 'mimNumber' ] ) otype = self . globaltt [ 'obsolete' ] if omim_id in self . omim_type : otype = self . omim_type [ omim_id ] if otype == self . globaltt [ 'obsolete' ] and omim_id in self . omim_replaced : omim_id = self . omim_replaced [ omim_id ] otype = self . omim_type [ omim_id ] # else: # removed or multiple if otype not in ( self . globaltt [ 'Phenotype' ] , self . globaltt [ 'has_affected_feature' ] ) : omim_id = None return omim_id | doubt this should be kept | 186 | 6 |
250,197 | def make_spo ( sub , prd , obj ) : # To establish string as a curie and expand, # we use a global curie_map(.yaml) # sub are allways uri (unless a bnode) # prd are allways uri (unless prd is 'a') # should fail loudly if curie does not exist if prd == 'a' : prd = 'rdf:type' try : ( subcuri , subid ) = re . split ( r':' , sub ) except Exception : LOG . error ( "not a Subject Curie '%s'" , sub ) raise ValueError try : ( prdcuri , prdid ) = re . split ( r':' , prd ) except Exception : LOG . error ( "not a Predicate Curie '%s'" , prd ) raise ValueError objt = '' # object is a curie or bnode or literal [string|number] objcuri = None match = re . match ( CURIERE , obj ) if match is not None : try : ( objcuri , objid ) = re . split ( r':' , obj ) except ValueError : match = None if match is not None and objcuri in CURIEMAP : objt = CURIEMAP [ objcuri ] + objid . strip ( ) # allow unexpanded bnodes in object if objcuri != '_' or CURIEMAP [ objcuri ] != '_:b' : objt = '<' + objt + '>' elif obj . isnumeric ( ) : objt = '"' + obj + '"' else : # Literals may not contain the characters ", LF, CR '\' # except in their escaped forms. internal quotes as well. obj = obj . strip ( '"' ) . replace ( '\\' , '\\\\' ) . replace ( '"' , '\'' ) obj = obj . replace ( '\n' , '\\n' ) . replace ( '\r' , '\\r' ) objt = '"' + obj + '"' # allow unexpanded bnodes in subject if subcuri is not None and subcuri in CURIEMAP and prdcuri is not None and prdcuri in CURIEMAP : subjt = CURIEMAP [ subcuri ] + subid . strip ( ) if subcuri != '_' or CURIEMAP [ subcuri ] != '_:b' : subjt = '<' + subjt + '>' return subjt + ' <' + CURIEMAP [ prdcuri ] + prdid . strip ( ) + '> ' + objt + ' .' else : LOG . error ( 'Cant work with: <%s> %s , <%s> %s, %s' , subcuri , subid , prdcuri , prdid , objt ) return None | Decorates the three given strings as a line of ntriples | 643 | 14 |
250,198 | def write_spo ( sub , prd , obj ) : rcvtriples . append ( make_spo ( sub , prd , obj ) ) | write triples to a buffer incase we decide to drop them | 34 | 13 |
250,199 | def make_allele_by_consequence ( self , consequence , gene_id , gene_symbol ) : allele_id = None # Loss of function : Nonsense, frame-shifting indel, # essential splice site mutation, whole gene deletion or any other # mutation where functional analysis demonstrates clear reduction # or loss of function # All missense/in frame : Where all the mutations described in the data # source are either missense or in frame deletions and there is no # evidence favoring either loss-of-function, activating or # dominant negative effect # Dominant negative : Mutation within one allele of a gene that creates # a significantly greater deleterious effect on gene product # function than a monoallelic loss of function mutation # Activating : Mutation, usually missense that results in # a constitutive functional activation of the gene product # Increased gene dosage : Copy number variation that increases # the functional dosage of the gene # Cis-regulatory or promotor mutation : Mutation in cis-regulatory # elements that lies outwith the known transcription unit and # promotor of the controlled gene # Uncertain : Where the exact nature of the mutation is unclear or # not recorded type_id = self . resolve ( consequence , mandatory = False ) if type_id == consequence : LOG . warning ( "Consequence type unmapped: %s" , str ( consequence ) ) type_id = self . globaltt [ 'sequence_variant' ] # make the allele allele_id = '' . join ( ( gene_id , type_id ) ) allele_id = re . sub ( r':' , '' , allele_id ) allele_id = '_:' + allele_id # make this a BNode allele_label = ' ' . join ( ( consequence , 'allele in' , gene_symbol ) ) self . model . addIndividualToGraph ( allele_id , allele_label , type_id ) self . geno . addAlleleOfGene ( allele_id , gene_id ) return allele_id | Given a consequence label that describes a variation type create an anonymous variant of the specified gene as an instance of that consequence type . | 432 | 25 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.