idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
248,600
def bucket_create ( self , name , bucket_type = 'couchbase' , bucket_password = '' , replicas = 0 , ram_quota = 1024 , flush_enabled = False ) : params = { 'name' : name , 'bucketType' : bucket_type , 'authType' : 'sasl' , 'saslPassword' : bucket_password if bucket_password else '' , 'flushEnabled' : int ( flush_enabled ) , 'ramQuotaMB' : ram_quota } if bucket_type in ( 'couchbase' , 'membase' , 'ephemeral' ) : params [ 'replicaNumber' ] = replicas return self . http_request ( path = '/pools/default/buckets' , method = 'POST' , content = self . _mk_formstr ( params ) , content_type = 'application/x-www-form-urlencoded' )
Create a new bucket
206
4
248,601
def wait_ready ( self , name , timeout = 5.0 , sleep_interval = 0.2 ) : end = time ( ) + timeout while True : try : info = self . bucket_info ( name ) . value for node in info [ 'nodes' ] : if node [ 'status' ] != 'healthy' : raise NotReadyError . pyexc ( 'Not all nodes are healthy' ) return # No error and all OK except E . CouchbaseError : if time ( ) + sleep_interval > end : raise sleep ( sleep_interval )
Wait for a newly created bucket to be ready .
122
10
248,602
def bucket_update ( self , name , current , bucket_password = None , replicas = None , ram_quota = None , flush_enabled = None ) : params = { } current = current . value # Merge params params [ 'authType' ] = current [ 'authType' ] if 'saslPassword' in current : params [ 'saslPassword' ] = current [ 'saslPassword' ] if bucket_password is not None : params [ 'authType' ] = 'sasl' params [ 'saslPassword' ] = bucket_password params [ 'replicaNumber' ] = ( replicas if replicas is not None else current [ 'replicaNumber' ] ) if ram_quota : params [ 'ramQuotaMB' ] = ram_quota else : params [ 'ramQuotaMB' ] = current [ 'quota' ] [ 'ram' ] / 1024 / 1024 if flush_enabled is not None : params [ 'flushEnabled' ] = int ( flush_enabled ) params [ 'proxyPort' ] = current [ 'proxyPort' ] return self . http_request ( path = '/pools/default/buckets/' + name , method = 'POST' , content_type = 'application/x-www-form-urlencoded' , content = self . _mk_formstr ( params ) )
Update an existing bucket s settings .
298
7
248,603
def users_get ( self , domain ) : path = self . _get_management_path ( domain ) return self . http_request ( path = path , method = 'GET' )
Retrieve a list of users from the server .
40
10
248,604
def user_get ( self , domain , userid ) : path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'GET' )
Retrieve a user from the server
46
7
248,605
def user_upsert ( self , domain , userid , password = None , roles = None , name = None ) : if not roles or not isinstance ( roles , list ) : raise E . ArgumentError ( "Roles must be a non-empty list" ) if password and domain == AuthDomain . External : raise E . ArgumentError ( "External domains must not have passwords" ) tmplist = [ ] for role in roles : if isinstance ( role , basestring ) : tmplist . append ( role ) else : tmplist . append ( '{0}[{1}]' . format ( * role ) ) role_string = ',' . join ( tmplist ) params = { 'roles' : role_string , } if password : params [ 'password' ] = password if name : params [ 'name' ] = name form = self . _mk_formstr ( params ) path = self . _get_management_path ( domain , userid ) return self . http_request ( path = path , method = 'PUT' , content_type = 'application/x-www-form-urlencoded' , content = form )
Upsert a user in the cluster
255
8
248,606
def convert_1x_args ( bucket , * * kwargs ) : host = kwargs . pop ( 'host' , 'localhost' ) port = kwargs . pop ( 'port' , None ) if not 'connstr' in kwargs and 'connection_string' not in kwargs : kwargs [ 'connection_string' ] = _build_connstr ( host , port , bucket ) return kwargs
Converts arguments for 1 . x constructors to their 2 . x forms
96
15
248,607
def parse ( cls , ss ) : up = urlparse ( ss ) path = up . path query = up . query if '?' in path : path , _ = up . path . split ( '?' ) if path . startswith ( '/' ) : path = path [ 1 : ] bucket = path options = parse_qs ( query ) scheme = up . scheme hosts = up . netloc . split ( ',' ) return cls ( bucket = bucket , options = options , hosts = hosts , scheme = scheme )
Parses an existing connection string
111
7
248,608
def encode ( self ) : opt_dict = { } for k , v in self . options . items ( ) : opt_dict [ k ] = v [ 0 ] ss = '{0}://{1}' . format ( self . scheme , ',' . join ( self . hosts ) ) if self . bucket : ss += '/' + self . bucket # URL encode options then decoded forward slash / ss += '?' + urlencode ( opt_dict ) . replace ( '%2F' , '/' ) return ss
Encodes the current state of the object into a string .
114
12
248,609
def rc_to_exctype ( cls , rc ) : try : return _LCB_ERRNO_MAP [ rc ] except KeyError : newcls = _mk_lcberr ( rc ) _LCB_ERRNO_MAP [ rc ] = newcls return newcls
Map an error code to an exception
67
7
248,610
def split_results ( self ) : ret_ok , ret_fail = { } , { } count = 0 nokey_prefix = ( [ "" ] + sorted ( filter ( bool , self . all_results . keys ( ) ) ) ) [ - 1 ] for key , v in self . all_results . items ( ) : if not key : key = nokey_prefix + ":nokey:" + str ( count ) count += 1 success = getattr ( v , 'success' , True ) if success : ret_ok [ key ] = v else : ret_fail [ key ] = v return ret_ok , ret_fail
Convenience method to separate failed and successful results .
138
11
248,611
def add ( self , itm , * * options ) : if not options : options = None self . _d [ itm ] = options
Convenience method to add an item together with a series of options .
30
15
248,612
def deprecate_module_attribute ( mod , deprecated ) : deprecated = set ( deprecated ) class Wrapper ( object ) : def __getattr__ ( self , attr ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return getattr ( mod , attr ) def __setattr__ ( self , attr , value ) : if attr in deprecated : warnings . warn ( "Property %s is deprecated" % attr ) return setattr ( mod , attr , value ) return Wrapper ( )
Return a wrapped object that warns about deprecated accesses
120
10
248,613
def get ( self , path_or_index , default = None ) : err , value = self . _resolve ( path_or_index ) value = default if err else value return err , value
Get details about a given result
43
6
248,614
def query ( self , * args , * * kwargs ) : if not issubclass ( kwargs . get ( 'itercls' , None ) , AsyncViewBase ) : raise ArgumentError . pyexc ( "itercls must be defined " "and must be derived from AsyncViewBase" ) return super ( AsyncBucket , self ) . query ( * args , * * kwargs )
Reimplemented from base class .
91
8
248,615
def _gen_3spec ( op , path , xattr = False ) : flags = 0 if xattr : flags |= _P . SDSPEC_F_XATTR return Spec ( op , path , flags )
Returns a Spec tuple suitable for passing to the underlying C extension . This variant is called for operations that lack an input value .
47
25
248,616
def upsert ( path , value , create_parents = False , * * kwargs ) : return _gen_4spec ( LCB_SDCMD_DICT_UPSERT , path , value , create_path = create_parents , * * kwargs )
Create or replace a dictionary path .
60
7
248,617
def array_append ( path , * values , * * kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_LAST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , * * kwargs )
Add new values to the end of an array .
73
10
248,618
def array_prepend ( path , * values , * * kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_FIRST , path , MultiValue ( * values ) , create_path = kwargs . pop ( 'create_parents' , False ) , * * kwargs )
Add new values to the beginning of an array .
74
10
248,619
def array_insert ( path , * values , * * kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_INSERT , path , MultiValue ( * values ) , * * kwargs )
Insert items at a given position within an array .
52
10
248,620
def array_addunique ( path , value , create_parents = False , * * kwargs ) : return _gen_4spec ( LCB_SDCMD_ARRAY_ADD_UNIQUE , path , value , create_path = create_parents , * * kwargs )
Add a new value to an array if the value does not exist .
64
14
248,621
def counter ( path , delta , create_parents = False , * * kwargs ) : if not delta : raise ValueError ( "Delta must be positive or negative!" ) return _gen_4spec ( LCB_SDCMD_COUNTER , path , delta , create_path = create_parents , * * kwargs )
Increment or decrement a counter in a document .
73
11
248,622
def add_results ( self , * rvs , * * kwargs ) : if not rvs : raise MissingTokenError . pyexc ( message = 'No results passed' ) for rv in rvs : mi = rv . _mutinfo if not mi : if kwargs . get ( 'quiet' ) : return False raise MissingTokenError . pyexc ( message = 'Result does not contain token' ) self . _add_scanvec ( mi ) return True
Changes the state to reflect the mutation which yielded the given result .
102
13
248,623
def add_all ( self , bucket , quiet = False ) : added = False for mt in bucket . _mutinfo ( ) : added = True self . _add_scanvec ( mt ) if not added and not quiet : raise MissingTokenError ( 'Bucket object contains no tokens!' ) return added
Ensures the query result is consistent with all prior mutations performed by a given bucket .
64
18
248,624
def _assign_kwargs ( self , kwargs ) : for k in kwargs : if not hasattr ( self , k ) : raise AttributeError ( k , 'Not valid for' , self . __class__ . __name__ ) setattr ( self , k , kwargs [ k ] )
Assigns all keyword arguments to a given instance raising an exception if one of the keywords is not already the name of a property .
69
27
248,625
def _mk_range_bucket ( name , n1 , n2 , r1 , r2 ) : d = { } if r1 is not None : d [ n1 ] = r1 if r2 is not None : d [ n2 ] = r2 if not d : raise TypeError ( 'Must specify at least one range boundary!' ) d [ 'name' ] = name return d
Create a named range specification for encoding .
86
8
248,626
def add_range ( self , name , start = None , end = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'start' , 'end' , start , end ) ) return self
Adds a date range to the given facet .
51
9
248,627
def add_range ( self , name , min = None , max = None ) : self . _ranges . append ( _mk_range_bucket ( name , 'min' , 'max' , min , max ) ) return self
Add a numeric range .
51
5
248,628
def mk_kwargs ( cls , kwargs ) : ret = { } kws = [ 'row_factory' , 'body' , 'parent' ] for k in kws : if k in kwargs : ret [ k ] = kwargs . pop ( k ) return ret
Pop recognized arguments from a keyword list .
65
8
248,629
def _set_named_args ( self , * * kv ) : for k in kv : self . _body [ '${0}' . format ( k ) ] = kv [ k ] return self
Set a named parameter in the query . The named field must exist in the query itself .
46
18
248,630
def consistent_with ( self , state ) : if self . consistency not in ( UNBOUNDED , NOT_BOUNDED , 'at_plus' ) : raise TypeError ( 'consistent_with not valid with other consistency options' ) if not state : raise TypeError ( 'Passed empty or invalid state' , state ) self . consistency = 'at_plus' self . _body [ 'scan_vectors' ] = state . _sv
Indicate that the query should be consistent with one or more mutations .
97
14
248,631
def timeout ( self ) : value = self . _body . get ( 'timeout' , '0s' ) value = value [ : - 1 ] return float ( value )
Optional per - query timeout . If set this will limit the amount of time in which the query can be executed and waited for .
37
26
248,632
def _is_ready ( self ) : while not self . finish_time or time . time ( ) < self . finish_time : result = self . _poll_deferred ( ) if result == 'success' : return True if result == 'failed' : raise couchbase . exceptions . InternalError ( "Failed exception" ) time . sleep ( self . interval ) raise couchbase . exceptions . TimeoutError ( "Deferred query timed out" )
Return True if and only if final result has been received optionally blocking until this is the case or the timeout is exceeded .
97
24
248,633
def package_version ( self ) : vbase = self . base_version if self . ncommits : vbase += '.dev{0}+{1}' . format ( self . ncommits , self . sha ) return vbase
Returns the well formed PEP - 440 version
53
9
248,634
def download_and_bootstrap ( src , name , prereq = None ) : if prereq : prereq_cmd = '{0} -c "{1}"' . format ( PY_EXE , prereq ) rv = os . system ( prereq_cmd ) if rv == 0 : return ulp = urllib2 . urlopen ( src ) fp = open ( name , "wb" ) fp . write ( ulp . read ( ) ) fp . close ( ) cmdline = "{0} {1}" . format ( PY_EXE , name ) rv = os . system ( cmdline ) assert rv == 0
Download and install something if prerequisite fails
145
7
248,635
def _register_opt ( parser , * args , * * kwargs ) : try : # Flake8 3.x registration parser . add_option ( * args , * * kwargs ) except ( optparse . OptionError , TypeError ) : # Flake8 2.x registration parse_from_config = kwargs . pop ( 'parse_from_config' , False ) option = parser . add_option ( * args , * * kwargs ) if parse_from_config : parser . config_options . append ( option . get_opt_string ( ) . lstrip ( '-' ) )
Handler to register an option for both Flake8 3 . x and 2 . x .
132
17
248,636
def dict_to_hashable ( d ) : return frozenset ( ( k , tuple ( v ) if isinstance ( v , list ) else ( dict_to_hashable ( v ) if isinstance ( v , dict ) else v ) ) for k , v in six . iteritems ( d ) )
Takes a dict and returns an immutable hashable version of that dict that can be used as a key in dicts or as a set value . Any two dicts passed in with the same content are guaranteed to return the same value . Any two dicts passed in with different content are guaranteed to return different values . Performs comparatively to repr .
67
70
248,637
def run ( self , request ) : if request . body . get ( 'action_name' ) : return self . _get_response_for_single_action ( request . body . get ( 'action_name' ) ) return self . _get_response_for_all_actions ( )
Introspects all of the actions on the server and returns their documentation .
64
16
248,638
def _make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
Given a list of in - order middleware callables middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
32
39
248,639
def send_request ( self , job_request , message_expiry_in_seconds = None ) : request_id = self . request_counter self . request_counter += 1 meta = { } wrapper = self . _make_middleware_stack ( [ m . request for m in self . middleware ] , self . _base_send_request , ) try : with self . metrics . timer ( 'client.send.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : wrapper ( request_id , meta , job_request , message_expiry_in_seconds ) return request_id finally : self . metrics . commit ( )
Send a JobRequest and return a request ID .
149
10
248,640
def get_all_responses ( self , receive_timeout_in_seconds = None ) : wrapper = self . _make_middleware_stack ( [ m . response for m in self . middleware ] , self . _get_response , ) try : while True : with self . metrics . timer ( 'client.receive.including_middleware' , resolution = TimerResolution . MICROSECONDS ) : request_id , response = wrapper ( receive_timeout_in_seconds ) if response is None : break yield request_id , response finally : self . metrics . commit ( )
Receive all available responses from the transport as a generator .
130
12
248,641
def call_action ( self , service_name , action , body = None , * * kwargs ) : return self . call_action_future ( service_name , action , body , * * kwargs ) . result ( )
Build and send a single job request with one action .
51
11
248,642
def call_actions ( self , service_name , actions , expansions = None , raise_job_errors = True , raise_action_errors = True , timeout = None , * * kwargs ) : return self . call_actions_future ( service_name , actions , expansions , raise_job_errors , raise_action_errors , timeout , * * kwargs ) . result ( )
Build and send a single job request with one or more actions .
85
13
248,643
def call_actions_parallel ( self , service_name , actions , * * kwargs ) : return self . call_actions_parallel_future ( service_name , actions , * * kwargs ) . result ( )
Build and send multiple job requests to one service each job with one action to be executed in parallel and return once all responses have been received .
51
28
248,644
def call_jobs_parallel ( self , jobs , expansions = None , raise_job_errors = True , raise_action_errors = True , catch_transport_errors = False , timeout = None , * * kwargs ) : return self . call_jobs_parallel_future ( jobs , expansions = expansions , raise_job_errors = raise_job_errors , raise_action_errors = raise_action_errors , catch_transport_errors = catch_transport_errors , timeout = timeout , * * kwargs ) . result ( )
Build and send multiple job requests to one or more services each with one or more actions to be executed in parallel and return once all responses have been received .
122
31
248,645
def send_request ( self , service_name , actions , switches = None , correlation_id = None , continue_on_error = False , context = None , control_extra = None , message_expiry_in_seconds = None , suppress_response = False , ) : control_extra = control_extra . copy ( ) if control_extra else { } if message_expiry_in_seconds and 'timeout' not in control_extra : control_extra [ 'timeout' ] = message_expiry_in_seconds handler = self . _get_handler ( service_name ) control = self . _make_control_header ( continue_on_error = continue_on_error , control_extra = control_extra , suppress_response = suppress_response , ) context = self . _make_context_header ( switches = switches , correlation_id = correlation_id , context_extra = context , ) job_request = JobRequest ( actions = actions , control = control , context = context or { } ) return handler . send_request ( job_request , message_expiry_in_seconds )
Build and send a JobRequest and return a request ID .
243
12
248,646
def get_all_responses ( self , service_name , receive_timeout_in_seconds = None ) : handler = self . _get_handler ( service_name ) return handler . get_all_responses ( receive_timeout_in_seconds )
Receive all available responses from the service as a generator .
56
12
248,647
def get_reloader ( main_module_name , watch_modules , signal_forks = False ) : if USE_PY_INOTIFY : return _PyInotifyReloader ( main_module_name , watch_modules , signal_forks ) return _PollingReloader ( main_module_name , watch_modules , signal_forks )
Don t instantiate a reloader directly . Instead call this method to get a reloader and then call main on that reloader .
83
27
248,648
def ext_hook ( self , code , data ) : if code == self . EXT_DATETIME : # Unpack datetime object from a big-endian signed 64-bit integer. microseconds = self . STRUCT_DATETIME . unpack ( data ) [ 0 ] return datetime . datetime . utcfromtimestamp ( microseconds / 1000000.0 ) elif code == self . EXT_DATE : # Unpack local-date object from a big-endian unsigned short and two big-endian unsigned chars return datetime . date ( * self . STRUCT_DATE . unpack ( data ) ) elif code == self . EXT_TIME : # Unpack a dateless-time object from three big-endian unsigned chars and a big-endian unsigned # 32-bit integer. return datetime . time ( * self . STRUCT_TIME . unpack ( data ) ) elif code == self . EXT_DECIMAL : obj_len = self . STRUCT_DECIMAL_LENGTH . unpack ( data [ : 2 ] ) [ 0 ] obj_decoder = struct . Struct ( str ( '!{}s' . format ( obj_len ) ) ) return decimal . Decimal ( obj_decoder . unpack ( data [ 2 : ] ) [ 0 ] . decode ( 'utf-8' ) ) elif code == self . EXT_CURRINT : # Unpack Amount object into (code, minor) from a 3-char ASCII string and a signed 64-bit integer. code , minor_value = self . STRUCT_CURRINT . unpack ( data ) return currint . Amount . from_code_and_minor ( code . decode ( 'ascii' ) , minor_value ) else : raise TypeError ( 'Cannot decode unknown extension type {} from MessagePack' . format ( code ) )
Decodes our custom extension types
410
6
248,649
def send_request_message ( self , request_id , meta , body , _ = None ) : self . _current_request = ( request_id , meta , body ) try : self . server . handle_next_request ( ) finally : self . _current_request = None
Receives a request from the client and handles and dispatches in in - thread . message_expiry_in_seconds is not supported . Messages do not expire as the server handles the request immediately in the same thread before this method returns . This method blocks until the server has completed handling the request .
61
63
248,650
def send_response_message ( self , request_id , meta , body ) : self . response_messages . append ( ( request_id , meta , body ) )
Add the response to the deque .
37
8
248,651
def StatusActionFactory ( version , build = None , base_class = BaseStatusAction ) : # noqa return type ( str ( 'StatusAction' ) , ( base_class , ) , { str ( '_version' ) : version , str ( '_build' ) : build } , )
A factory for creating a new status action class specific to a service .
64
14
248,652
def make_middleware_stack ( middleware , base ) : for ware in reversed ( middleware ) : base = ware ( base ) return base
Given a list of in - order middleware callable objects middleware and a base function base chains them together so each middleware is fed the function below and returns the top level ready to call .
31
40
248,653
def process_job ( self , job_request ) : try : # Validate JobRequest message validation_errors = [ Error ( code = error . code , message = error . message , field = error . pointer , ) for error in ( JobRequestSchema . errors ( job_request ) or [ ] ) ] if validation_errors : raise JobError ( errors = validation_errors ) # Add the client object in case a middleware wishes to use it job_request [ 'client' ] = self . make_client ( job_request [ 'context' ] ) # Add the async event loop in case a middleware wishes to use it job_request [ 'async_event_loop' ] = self . _async_event_loop if hasattr ( self , '_async_event_loop_thread' ) : job_request [ 'run_coroutine' ] = self . _async_event_loop_thread . run_coroutine else : job_request [ 'run_coroutine' ] = None # Build set of middleware + job handler, then run job wrapper = self . make_middleware_stack ( [ m . job for m in self . middleware ] , self . execute_job , ) job_response = wrapper ( job_request ) if 'correlation_id' in job_request [ 'context' ] : job_response . context [ 'correlation_id' ] = job_request [ 'context' ] [ 'correlation_id' ] except JobError as e : self . metrics . counter ( 'server.error.job_error' ) . increment ( ) job_response = JobResponse ( errors = e . errors , ) except Exception as e : # Send an error response if no middleware caught this. # Formatting the error might itself error, so try to catch that self . metrics . counter ( 'server.error.unhandled_error' ) . increment ( ) return self . handle_job_exception ( e ) return job_response
Validate execute and run the job request wrapping it with any applicable job middleware .
428
17
248,654
def handle_job_exception ( self , exception , variables = None ) : # Get the error and traceback if we can # noinspection PyBroadException try : error_str , traceback_str = six . text_type ( exception ) , traceback . format_exc ( ) except Exception : self . metrics . counter ( 'server.error.error_formatting_failure' ) . increment ( ) error_str , traceback_str = 'Error formatting error' , traceback . format_exc ( ) # Log what happened self . logger . exception ( exception ) if not isinstance ( traceback_str , six . text_type ) : try : # Try to traceback_str = traceback_str . decode ( 'utf-8' ) except UnicodeDecodeError : traceback_str = 'UnicodeDecodeError: Traceback could not be decoded' # Make a bare bones job response error_dict = { 'code' : ERROR_CODE_SERVER_ERROR , 'message' : 'Internal server error: %s' % error_str , 'traceback' : traceback_str , } if variables is not None : # noinspection PyBroadException try : error_dict [ 'variables' ] = { key : repr ( value ) for key , value in variables . items ( ) } except Exception : self . metrics . counter ( 'server.error.variable_formatting_failure' ) . increment ( ) error_dict [ 'variables' ] = 'Error formatting variables' return JobResponse ( errors = [ error_dict ] )
Makes and returns a last - ditch error response .
342
11
248,655
def execute_job ( self , job_request ) : # Run the Job's Actions job_response = JobResponse ( ) job_switches = RequestSwitchSet ( job_request [ 'context' ] [ 'switches' ] ) for i , raw_action_request in enumerate ( job_request [ 'actions' ] ) : action_request = EnrichedActionRequest ( action = raw_action_request [ 'action' ] , body = raw_action_request . get ( 'body' , None ) , switches = job_switches , context = job_request [ 'context' ] , control = job_request [ 'control' ] , client = job_request [ 'client' ] , async_event_loop = job_request [ 'async_event_loop' ] , run_coroutine = job_request [ 'run_coroutine' ] , ) action_in_class_map = action_request . action in self . action_class_map if action_in_class_map or action_request . action in ( 'status' , 'introspect' ) : # Get action to run if action_in_class_map : action = self . action_class_map [ action_request . action ] ( self . settings ) elif action_request . action == 'introspect' : from pysoa . server . action . introspection import IntrospectionAction action = IntrospectionAction ( server = self ) else : if not self . _default_status_action_class : from pysoa . server . action . status import make_default_status_action_class self . _default_status_action_class = make_default_status_action_class ( self . __class__ ) action = self . _default_status_action_class ( self . settings ) # Wrap it in middleware wrapper = self . make_middleware_stack ( [ m . action for m in self . middleware ] , action , ) # Execute the middleware stack try : action_response = wrapper ( action_request ) except ActionError as e : # Error: an error was thrown while running the Action (or Action middleware) action_response = ActionResponse ( action = action_request . action , errors = e . errors , ) else : # Error: Action not found. action_response = ActionResponse ( action = action_request . action , errors = [ Error ( code = ERROR_CODE_UNKNOWN , message = 'The action "{}" was not found on this server.' . format ( action_request . action ) , field = 'action' , ) ] , ) job_response . actions . append ( action_response ) if ( action_response . errors and not job_request [ 'control' ] . get ( 'continue_on_error' , False ) ) : # Quit running Actions if an error occurred and continue_on_error is False break return job_response
Processes and runs the action requests contained in the job and returns a JobResponse .
625
17
248,656
def handle_shutdown_signal ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Received double interrupt, forcing shutdown' ) sys . exit ( 1 ) else : self . logger . warning ( 'Received interrupt, initiating shutdown' ) self . shutting_down = True
Handles the reception of a shutdown signal .
68
9
248,657
def harakiri ( self , * _ ) : if self . shutting_down : self . logger . warning ( 'Graceful shutdown failed after {}s. Exiting now!' . format ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] ) ) sys . exit ( 1 ) else : self . logger . warning ( 'No activity during {}s, triggering harakiri with grace {}s' . format ( self . settings [ 'harakiri' ] [ 'timeout' ] , self . settings [ 'harakiri' ] [ 'shutdown_grace' ] , ) ) self . shutting_down = True signal . alarm ( self . settings [ 'harakiri' ] [ 'shutdown_grace' ] )
Handles the reception of a timeout signal indicating that a request has been processing for too long as defined by the Harakiri settings .
164
27
248,658
def run ( self ) : self . logger . info ( 'Service "{service}" server starting up, pysoa version {pysoa}, listening on transport {transport}.' . format ( service = self . service_name , pysoa = pysoa . version . __version__ , transport = self . transport , ) ) self . setup ( ) self . metrics . commit ( ) if self . _async_event_loop_thread : self . _async_event_loop_thread . start ( ) self . _create_heartbeat_file ( ) signal . signal ( signal . SIGINT , self . handle_shutdown_signal ) signal . signal ( signal . SIGTERM , self . handle_shutdown_signal ) signal . signal ( signal . SIGALRM , self . harakiri ) # noinspection PyBroadException try : while not self . shutting_down : # reset harakiri timeout signal . alarm ( self . settings [ 'harakiri' ] [ 'timeout' ] ) # Get, process, and execute the next JobRequest self . handle_next_request ( ) self . metrics . commit ( ) except MessageReceiveError : self . logger . exception ( 'Error receiving message from transport; shutting down' ) except Exception : self . metrics . counter ( 'server.error.unknown' ) . increment ( ) self . logger . exception ( 'Unhandled server error; shutting down' ) finally : self . metrics . commit ( ) self . logger . info ( 'Server shutting down' ) if self . _async_event_loop_thread : self . _async_event_loop_thread . join ( ) self . _close_django_caches ( shutdown = True ) self . _delete_heartbeat_file ( ) self . logger . info ( 'Server shutdown complete' )
Starts the server run loop and returns after the server shuts down due to a shutdown - request Harakiri signal or unhandled exception . See the documentation for Server . main for full details on the chain of Server method calls .
394
46
248,659
def emit ( self , record ) : # noinspection PyBroadException try : formatted_message = self . format ( record ) encoded_message = formatted_message . encode ( 'utf-8' ) prefix = suffix = b'' if getattr ( self , 'ident' , False ) : prefix = self . ident . encode ( 'utf-8' ) if isinstance ( self . ident , six . text_type ) else self . ident if getattr ( self , 'append_nul' , True ) : suffix = '\000' . encode ( 'utf-8' ) priority = '<{:d}>' . format ( self . encodePriority ( self . facility , self . mapPriority ( record . levelname ) ) ) . encode ( 'utf-8' ) message_length = len ( encoded_message ) message_length_limit = self . maximum_length - len ( prefix ) - len ( suffix ) - len ( priority ) if message_length < message_length_limit : parts = [ priority + prefix + encoded_message + suffix ] elif self . overflow == self . OVERFLOW_BEHAVIOR_TRUNCATE : truncated_message , _ = self . _cleanly_slice_encoded_string ( encoded_message , message_length_limit ) parts = [ priority + prefix + truncated_message + suffix ] else : # This can't work perfectly, but it's pretty unusual for a message to go before machine-parseable parts # in the formatted record. So we split the record on the message part. Everything before the split # becomes the preamble and gets repeated every packet. Everything after the split gets chunked. There's # no reason to match on more than the first 40 characters of the message--the chances of that matching # the wrong part of the record are astronomical. try : index = formatted_message . index ( record . getMessage ( ) [ : 40 ] ) start_of_message , to_chunk = formatted_message [ : index ] , formatted_message [ index : ] except ( TypeError , ValueError ) : # We can't locate the message in the formatted record? That's unfortunate. Let's make something up. start_of_message , to_chunk = '{} ' . format ( formatted_message [ : 30 ] ) , formatted_message [ 30 : ] start_of_message = start_of_message . encode ( 'utf-8' ) to_chunk = to_chunk . encode ( 'utf-8' ) # 12 is the length of "... (cont'd)" in bytes chunk_length_limit = message_length_limit - len ( start_of_message ) - 12 i = 1 parts = [ ] remaining_message = to_chunk while remaining_message : message_id = b'' subtractor = 0 if i > 1 : # If this is not the first message, we determine message # so that we can subtract that length message_id = '{}' . format ( i ) . encode ( 'utf-8' ) # 14 is the length of "(cont'd #) ..." in bytes subtractor = 14 + len ( message_id ) chunk , remaining_message = self . _cleanly_slice_encoded_string ( remaining_message , chunk_length_limit - subtractor , ) if i > 1 : # If this is not the first message, we prepend the chunk to indicate continuation chunk = b"(cont'd #" + message_id + b') ...' + chunk i += 1 if remaining_message : # If this is not the last message, we append the chunk to indicate continuation chunk = chunk + b"... (cont'd)" parts . append ( priority + prefix + start_of_message + chunk + suffix ) self . _send ( parts ) except Exception : self . handleError ( record )
Emits a record . The record is sent carefully according to the following rules to ensure that data is not lost by exceeding the MTU of the connection .
824
31
248,660
def add_expansion ( self , expansion_node ) : # Check for existing expansion node with the same name existing_expansion_node = self . get_expansion ( expansion_node . name ) if existing_expansion_node : # Expansion node exists with the same name, merge child expansions. for child_expansion in expansion_node . expansions : existing_expansion_node . add_expansion ( child_expansion ) else : # Add the expansion node. self . _expansions [ expansion_node . name ] = expansion_node
Add a child expansion node to the type node s expansions .
118
12
248,661
def find_objects ( self , obj ) : objects = [ ] if isinstance ( obj , dict ) : # obj is a dictionary, so it is a potential match... object_type = obj . get ( '_type' ) if object_type == self . type : # Found a match! objects . append ( obj ) else : # Not a match. Check each value of the dictionary for matches. for sub_object in six . itervalues ( obj ) : objects . extend ( self . find_objects ( sub_object ) ) elif isinstance ( obj , list ) : # obj is a list. Check each element of the list for matches. for sub_object in obj : objects . extend ( self . find_objects ( sub_object ) ) return objects
Find all objects in obj that match the type of the type node .
164
14
248,662
def to_dict ( self ) : expansion_strings = [ ] for expansion in self . expansions : expansion_strings . extend ( expansion . to_strings ( ) ) return { self . type : expansion_strings , }
Convert the tree node to its dictionary representation .
46
10
248,663
def to_strings ( self ) : result = [ ] if not self . expansions : result . append ( self . name ) else : for expansion in self . expansions : result . extend ( '{}.{}' . format ( self . name , es ) for es in expansion . to_strings ( ) ) return result
Convert the expansion node to a list of expansion strings .
67
12
248,664
def dict_to_trees ( self , expansion_dict ) : trees = [ ] for node_type , expansion_list in six . iteritems ( expansion_dict ) : type_node = TypeNode ( node_type = node_type ) for expansion_string in expansion_list : expansion_node = type_node for expansion_name in expansion_string . split ( '.' ) : child_expansion_node = expansion_node . get_expansion ( expansion_name ) if not child_expansion_node : type_expansion = self . type_expansions [ expansion_node . type ] [ expansion_name ] type_route = self . type_routes [ type_expansion [ 'route' ] ] if type_expansion [ 'destination_field' ] == type_expansion [ 'source_field' ] : raise ValueError ( 'Expansion configuration destination_field error: ' 'destination_field can not have the same name as the source_field: ' '{}' . format ( type_expansion [ 'source_field' ] ) ) child_expansion_node = ExpansionNode ( node_type = type_expansion [ 'type' ] , name = expansion_name , source_field = type_expansion [ 'source_field' ] , destination_field = type_expansion [ 'destination_field' ] , service = type_route [ 'service' ] , action = type_route [ 'action' ] , request_field = type_route [ 'request_field' ] , response_field = type_route [ 'response_field' ] , raise_action_errors = type_expansion . get ( 'raise_action_errors' , False ) , ) expansion_node . add_expansion ( child_expansion_node ) expansion_node = child_expansion_node trees . append ( type_node ) return trees
Convert an expansion dictionary to a list of expansion trees .
413
12
248,665
def trees_to_dict ( trees_list ) : result = { } for tree in trees_list : result . update ( tree . to_dict ( ) ) return result
Convert a list of TreeNode s to an expansion dictionary .
37
13
248,666
def _get_service_names ( self ) : master_info = None connection_errors = [ ] for sentinel in self . _sentinel . sentinels : # Unfortunately, redis.sentinel.Sentinel does not support sentinel_masters, so we have to step # through all of its connections manually try : master_info = sentinel . sentinel_masters ( ) break except ( redis . ConnectionError , redis . TimeoutError ) as e : connection_errors . append ( 'Failed to connect to {} due to error: "{}".' . format ( sentinel , e ) ) continue if master_info is None : raise redis . ConnectionError ( 'Could not get master info from Sentinel\n{}:' . format ( '\n' . join ( connection_errors ) ) ) return list ( master_info . keys ( ) )
Get a list of service names from Sentinel . Tries Sentinel hosts until one succeeds ; if none succeed raises a ConnectionError .
186
25
248,667
def timid_relpath ( arg ) : # TODO-TEST: unit tests from os . path import isabs , relpath , sep if isabs ( arg ) : result = relpath ( arg ) if result . count ( sep ) + 1 < arg . count ( sep ) : return result return arg
convert an argument to a relative path carefully
65
9
248,668
def ensure_virtualenv ( args , return_values ) : def adjust_options ( options , args ) : # TODO-TEST: proper error message with no arguments venv_path = return_values . venv_path = args [ 0 ] if venv_path == DEFAULT_VIRTUALENV_PATH or options . prompt == '<dirname>' : from os . path import abspath , basename , dirname options . prompt = '(%s)' % basename ( dirname ( abspath ( venv_path ) ) ) # end of option munging. # there are two python interpreters involved here: # 1) the interpreter we're instructing virtualenv to copy if options . python is None : source_python = None else : source_python = virtualenv . resolve_interpreter ( options . python ) # 2) the interpreter virtualenv will create destination_python = venv_python ( venv_path ) if exists ( destination_python ) : reason = invalid_virtualenv_reason ( venv_path , source_python , destination_python , options ) if reason : info ( 'Removing invalidated virtualenv. (%s)' % reason ) run ( ( 'rm' , '-rf' , venv_path ) ) else : info ( 'Keeping valid virtualenv from previous run.' ) raise SystemExit ( 0 ) # looks good! we're done here. # this is actually a documented extension point: # http://virtualenv.readthedocs.org/en/latest/reference.html#adjust_options import virtualenv virtualenv . adjust_options = adjust_options from sys import argv argv [ : ] = ( 'virtualenv' , ) + args info ( colorize ( argv ) ) raise_on_failure ( virtualenv . main ) # There might not be a venv_path if doing something like "venv= --version" # and not actually asking virtualenv to make a venv. if return_values . venv_path is not None : run ( ( 'rm' , '-rf' , join ( return_values . venv_path , 'local' ) ) )
Ensure we have a valid virtualenv .
467
9
248,669
def touch ( filename , timestamp ) : if timestamp is not None : timestamp = ( timestamp , timestamp ) # atime, mtime from os import utime utime ( filename , timestamp )
set the mtime of a file
39
7
248,670
def pip_faster ( venv_path , pip_command , install , bootstrap_deps ) : # activate the virtualenv execfile_ ( venv_executable ( venv_path , 'activate_this.py' ) ) # disable a useless warning # FIXME: ensure a "true SSLContext" is available from os import environ environ [ 'PIP_DISABLE_PIP_VERSION_CHECK' ] = '1' # we always have to run the bootstrap, because the presense of an # executable doesn't imply the right version. pip is able to validate the # version in the fastpath case quickly anyway. run ( ( 'pip' , 'install' ) + bootstrap_deps ) run ( pip_command + install )
install and run pip - faster
166
6
248,671
def raise_on_failure ( mainfunc ) : try : errors = mainfunc ( ) if errors : exit ( errors ) except CalledProcessError as error : exit ( error . returncode ) except SystemExit as error : if error . code : raise except KeyboardInterrupt : # I don't plan to test-cover this. :pragma:nocover: exit ( 1 )
raise if and only if mainfunc fails
82
8
248,672
def cache_installed_wheels ( index_url , installed_packages ) : for installed_package in installed_packages : if not _can_be_cached ( installed_package ) : continue _store_wheel_in_cache ( installed_package . link . path , index_url )
After installation pip tells us what it installed and from where .
63
12
248,673
def pip ( args ) : from sys import stdout stdout . write ( colorize ( ( 'pip' , ) + args ) ) stdout . write ( '\n' ) stdout . flush ( ) return pipmodule . _internal . main ( list ( args ) )
Run pip in - process .
60
6
248,674
def dist_to_req ( dist ) : try : # :pragma:nocover: (pip>=10) from pip . _internal . operations . freeze import FrozenRequirement except ImportError : # :pragma:nocover: (pip<10) from pip import FrozenRequirement # normalize the casing, dashes in the req name orig_name , dist . project_name = dist . project_name , dist . key result = FrozenRequirement . from_dist ( dist , [ ] ) # put things back the way we found it. dist . project_name = orig_name return result
Make a pip . FrozenRequirement from a pkg_resources distribution object
135
15
248,675
def req_cycle ( req ) : cls = req . __class__ seen = { req . name } while isinstance ( req . comes_from , cls ) : req = req . comes_from if req . name in seen : return True else : seen . add ( req . name ) return False
is this requirement cyclic?
65
6
248,676
def pretty_req ( req ) : from copy import copy req = copy ( req ) req . link = None req . satisfied_by = None return req
return a copy of a pip requirement that is a bit more readable at the expense of removing some of its data
32
22
248,677
def trace_requirements ( requirements ) : requirements = tuple ( pretty_req ( r ) for r in requirements ) working_set = fresh_working_set ( ) # breadth-first traversal: from collections import deque queue = deque ( requirements ) queued = { _package_req_to_pkg_resources_req ( req . req ) for req in queue } errors = [ ] result = [ ] while queue : req = queue . popleft ( ) logger . debug ( 'tracing: %s' , req ) try : dist = working_set . find_normalized ( _package_req_to_pkg_resources_req ( req . req ) ) except pkg_resources . VersionConflict as conflict : dist = conflict . args [ 0 ] errors . append ( 'Error: version conflict: {} ({}) <-> {}' . format ( dist , timid_relpath ( dist . location ) , req ) ) assert dist is not None , 'Should be unreachable in pip8+' result . append ( dist_to_req ( dist ) ) # TODO: pip does no validation of extras. should we? extras = [ extra for extra in req . extras if extra in dist . extras ] for sub_req in sorted ( dist . requires ( extras = extras ) , key = lambda req : req . key ) : sub_req = InstallRequirement ( sub_req , req ) if req_cycle ( sub_req ) : logger . warning ( 'Circular dependency! %s' , sub_req ) continue elif sub_req . req in queued : logger . debug ( 'already queued: %s' , sub_req ) continue else : logger . debug ( 'adding sub-requirement %s' , sub_req ) queue . append ( sub_req ) queued . add ( sub_req . req ) if errors : raise InstallationError ( '\n' . join ( errors ) ) return result
given an iterable of pip InstallRequirements return the set of required packages given their transitive requirements .
416
20
248,678
def patch ( attrs , updates ) : orig = { } for attr , value in updates : orig [ attr ] = attrs [ attr ] attrs [ attr ] = value return orig
Perform a set of updates to a attribute dictionary return the original values .
43
15
248,679
def patched ( attrs , updates ) : orig = patch ( attrs , updates . items ( ) ) try : yield orig finally : patch ( attrs , orig . items ( ) )
A context in which some attributes temporarily have a modified value .
39
12
248,680
def pipfaster_packagefinder ( ) : # A poor man's dependency injection: monkeypatch :( try : # :pragma:nocover: pip>=18.1 from pip . _internal . cli import base_command except ImportError : # :pragma:nocover: pip<18.1 from pip . _internal import basecommand as base_command return patched ( vars ( base_command ) , { 'PackageFinder' : FasterPackageFinder } )
Provide a short - circuited search when the requirement is pinned and appears on disk .
106
19
248,681
def pipfaster_download_cacher ( index_urls ) : from pip . _internal import download orig = download . _download_http_url patched_fn = get_patched_download_http_url ( orig , index_urls ) return patched ( vars ( download ) , { '_download_http_url' : patched_fn } )
vanilla pip stores a cache of the http session in its cache and not the wheel files . We intercept the download and save those files into our cache
79
30
248,682
def run ( self , options , args ) : if options . prune : previously_installed = pip_get_installed ( ) index_urls = [ options . index_url ] + options . extra_index_urls with pipfaster_download_cacher ( index_urls ) : requirement_set = super ( FasterInstallCommand , self ) . run ( options , args , ) required = requirement_set . requirements . values ( ) # With extra_index_urls we don't know where the wheel is from if not options . extra_index_urls : cache_installed_wheels ( options . index_url , requirement_set . successfully_downloaded ) if not options . ignore_dependencies : # transitive requirements, previously installed, are also required # this has a side-effect of finding any missing / conflicting requirements required = trace_requirements ( required ) if not options . prune : return requirement_set extraneous = ( reqnames ( previously_installed ) - reqnames ( required ) - # the stage1 bootstrap packages reqnames ( trace_requirements ( [ install_req_from_line ( 'venv-update' ) ] ) ) - # See #186 frozenset ( ( 'pkg-resources' , ) ) ) if extraneous : extraneous = sorted ( extraneous ) pip ( ( 'uninstall' , '--yes' ) + tuple ( extraneous ) )
update install options with caching values
302
6
248,683
def setEncoder ( self , encoder ) : if not encoder : self . _encoder = json . JSONEncoder ( ) else : self . _encoder = encoder self . _encode = self . _encoder . encode
Sets the client s encoder encoder should be an instance of a json . JSONEncoder class
52
21
248,684
def setDecoder ( self , decoder ) : if not decoder : self . _decoder = json . JSONDecoder ( ) else : self . _decoder = decoder self . _decode = self . _decoder . decode
Sets the client s decoder decoder should be an instance of a json . JSONDecoder class
52
21
248,685
def jsondel ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.DEL' , name , str_path ( path ) )
Deletes the JSON value stored at key name under path
42
11
248,686
def jsonget ( self , name , * args ) : pieces = [ name ] if len ( args ) == 0 : pieces . append ( Path . rootPath ( ) ) else : for p in args : pieces . append ( str_path ( p ) ) # Handle case where key doesn't exist. The JSONDecoder would raise a # TypeError exception since it can't decode None try : return self . execute_command ( 'JSON.GET' , * pieces ) except TypeError : return None
Get the object stored as a JSON value at key name args is zero or more paths and defaults to root path
105
22
248,687
def jsonmget ( self , path , * args ) : pieces = [ ] pieces . extend ( args ) pieces . append ( str_path ( path ) ) return self . execute_command ( 'JSON.MGET' , * pieces )
Gets the objects stored as a JSON values under path from keys args
51
14
248,688
def jsonset ( self , name , path , obj , nx = False , xx = False ) : pieces = [ name , str_path ( path ) , self . _encode ( obj ) ] # Handle existential modifiers if nx and xx : raise Exception ( 'nx and xx are mutually exclusive: use one, the ' 'other or neither - but not both' ) elif nx : pieces . append ( 'NX' ) elif xx : pieces . append ( 'XX' ) return self . execute_command ( 'JSON.SET' , * pieces )
Set the JSON value at key name under the path to obj nx if set to True set value only if it does not exist xx if set to True set value only if it exists
123
37
248,689
def jsontype ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.TYPE' , name , str_path ( path ) )
Gets the type of the JSON value under path from key name
41
13
248,690
def jsonstrappend ( self , name , string , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRAPPEND' , name , str_path ( path ) , self . _encode ( string ) )
Appends to the string JSON value under path at key name the provided string
55
15
248,691
def jsonstrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.STRLEN' , name , str_path ( path ) )
Returns the length of the string JSON value under path at key name
43
13
248,692
def jsonarrappend ( self , name , path = Path . rootPath ( ) , * args ) : pieces = [ name , str_path ( path ) ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRAPPEND' , * pieces )
Appends the objects args to the array under the path in key name
72
14
248,693
def jsonarrindex ( self , name , path , scalar , start = 0 , stop = - 1 ) : return self . execute_command ( 'JSON.ARRINDEX' , name , str_path ( path ) , self . _encode ( scalar ) , start , stop )
Returns the index of scalar in the JSON array under path at key name . The search can be limited using the optional inclusive start and exclusive stop indices .
62
31
248,694
def jsonarrinsert ( self , name , path , index , * args ) : pieces = [ name , str_path ( path ) , index ] for o in args : pieces . append ( self . _encode ( o ) ) return self . execute_command ( 'JSON.ARRINSERT' , * pieces )
Inserts the objects args to the array at index index under the path in key name
67
17
248,695
def jsonarrlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.ARRLEN' , name , str_path ( path ) )
Returns the length of the array JSON value under path at key name
43
13
248,696
def jsonarrpop ( self , name , path = Path . rootPath ( ) , index = - 1 ) : return self . execute_command ( 'JSON.ARRPOP' , name , str_path ( path ) , index )
Pops the element at index in the array JSON value under path at key name
50
16
248,697
def jsonarrtrim ( self , name , path , start , stop ) : return self . execute_command ( 'JSON.ARRTRIM' , name , str_path ( path ) , start , stop )
Trim the array JSON value under path at key name to the inclusive range given by start and stop
45
20
248,698
def jsonobjkeys ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJKEYS' , name , str_path ( path ) )
Returns the key names in the dictionary JSON value under path at key name
44
14
248,699
def jsonobjlen ( self , name , path = Path . rootPath ( ) ) : return self . execute_command ( 'JSON.OBJLEN' , name , str_path ( path ) )
Returns the length of the dictionary JSON value under path at key name
44
13