idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
24,800
def find_outliers ( group , delta ) : with_pos = sorted ( [ pair for pair in enumerate ( group ) ] , key = lambda p : p [ 1 ] ) outliers_start = outliers_end = - 1 for i in range ( 0 , len ( with_pos ) - 1 ) : cur = with_pos [ i ] [ 1 ] nex = with_pos [ i + 1 ] [ 1 ] if nex - cur > delta : if i < ( len ( with_pos ) - i ) : outliers_start , outliers_end = 0 , i + 1 else : outliers_start , outliers_end = i + 1 , len ( with_pos ) break if outliers_start != - 1 : return [ with_pos [ i ] [ 0 ] for i in range ( outliers_start , outliers_end ) ] else : return [ ]
given a list of values find those that are apart from the rest by delta . the indexes for the outliers is returned if any .
24,801
def get_matching ( content , match ) : if match != "" : lines = [ line for line in content . split ( "\n" ) if match in line ] content = "\n" . join ( lines ) return content
filters out lines that don t include match
24,802
def load_remote_db ( self ) : signature_version = self . settings_dict . get ( "SIGNATURE_VERSION" , "s3v4" ) s3 = boto3 . resource ( 's3' , config = botocore . client . Config ( signature_version = signature_version ) , ) if '/tmp/' not in self . settings_dict [ 'NAME' ] : try : etag = '' if os . path . isfile ( '/tmp/' + self . settings_dict [ 'NAME' ] ) : m = hashlib . md5 ( ) with open ( '/tmp/' + self . settings_dict [ 'NAME' ] , 'rb' ) as f : m . update ( f . read ( ) ) etag = m . hexdigest ( ) obj = s3 . Object ( self . settings_dict [ 'BUCKET' ] , self . settings_dict [ 'NAME' ] ) obj_bytes = obj . get ( IfNoneMatch = etag ) [ "Body" ] with open ( '/tmp/' + self . settings_dict [ 'NAME' ] , 'wb' ) as f : f . write ( obj_bytes . read ( ) ) m = hashlib . md5 ( ) with open ( '/tmp/' + self . settings_dict [ 'NAME' ] , 'rb' ) as f : m . update ( f . read ( ) ) self . db_hash = m . hexdigest ( ) except botocore . exceptions . ClientError as e : if e . response [ 'Error' ] [ 'Code' ] == "304" : logging . debug ( "ETag matches md5 of local copy, using local copy of DB!" ) self . db_hash = etag else : logging . debug ( "Couldn't load remote DB object." ) except Exception as e : logging . debug ( e ) if '/tmp/' not in self . settings_dict [ 'NAME' ] : self . settings_dict [ 'REMOTE_NAME' ] = self . settings_dict [ 'NAME' ] self . settings_dict [ 'NAME' ] = '/tmp/' + self . settings_dict [ 'NAME' ] if not os . path . isfile ( self . settings_dict [ 'NAME' ] ) : open ( self . settings_dict [ 'NAME' ] , 'a' ) . close ( ) logging . debug ( "Loaded remote DB!" )
Load remote S3 DB
24,803
def close ( self , * args , ** kwargs ) : super ( DatabaseWrapper , self ) . close ( * args , ** kwargs ) signature_version = self . settings_dict . get ( "SIGNATURE_VERSION" , "s3v4" ) s3 = boto3 . resource ( 's3' , config = botocore . client . Config ( signature_version = signature_version ) , ) try : with open ( self . settings_dict [ 'NAME' ] , 'rb' ) as f : fb = f . read ( ) m = hashlib . md5 ( ) m . update ( fb ) if self . db_hash == m . hexdigest ( ) : logging . debug ( "Database unchanged, not saving to remote DB!" ) return bytesIO = BytesIO ( ) bytesIO . write ( fb ) bytesIO . seek ( 0 ) s3_object = s3 . Object ( self . settings_dict [ 'BUCKET' ] , self . settings_dict [ 'REMOTE_NAME' ] ) result = s3_object . put ( 'rb' , Body = bytesIO ) except Exception as e : logging . debug ( e ) logging . debug ( "Saved to remote DB!" )
Engine closed copy file to DB if it has changed
24,804
def id ( self ) : try : return os . environ [ 'BLINKA_FORCECHIP' ] except KeyError : pass try : if os . environ [ 'BLINKA_FT232H' ] : import ftdi1 as ftdi try : ctx = None ctx = ftdi . new ( ) count , _ = ftdi . usb_find_all ( ctx , 0 , 0 ) if count < 0 : raise RuntimeError ( 'ftdi_usb_find_all returned error %d : %s' % count , ftdi . get_error_string ( self . _ctx ) ) if count == 0 : raise RuntimeError ( 'BLINKA_FT232H environment variable' + 'set, but no FT232H device found' ) finally : if ctx is not None : ftdi . free ( ctx ) return FT232H except KeyError : pass platform = sys . platform if platform == "linux" or platform == "linux2" : return self . _linux_id ( ) if platform == "esp8266" : return ESP8266 if platform == "samd21" : return SAMD21 if platform == "pyboard" : return STM32 return None
Return a unique id for the detected chip if any .
24,805
def _linux_id ( self ) : linux_id = None hardware = self . detector . get_cpuinfo_field ( "Hardware" ) if hardware is None : vendor_id = self . detector . get_cpuinfo_field ( "vendor_id" ) if vendor_id in ( "GenuineIntel" , "AuthenticAMD" ) : linux_id = GENERIC_X86 compatible = self . detector . get_device_compatible ( ) if compatible and 'tegra' in compatible : if 'cv' in compatible or 'nano' in compatible : linux_id = T210 elif 'quill' in compatible : linux_id = T186 elif 'xavier' in compatible : linux_id = T194 elif hardware in ( "BCM2708" , "BCM2709" , "BCM2835" ) : linux_id = BCM2XXX elif "AM33XX" in hardware : linux_id = AM33XX elif "sun8i" in hardware : linux_id = SUN8I elif "ODROIDC" in hardware : linux_id = S805 elif "ODROID-C2" in hardware : linux_id = S905 elif "SAMA5" in hardware : linux_id = SAMA5 return linux_id
Attempt to detect the CPU on a computer running the Linux kernel .
24,806
def id ( self ) : try : return os . environ [ 'BLINKA_FORCEBOARD' ] except KeyError : pass chip_id = self . detector . chip . id board_id = None if chip_id == ap_chip . BCM2XXX : board_id = self . _pi_id ( ) elif chip_id == ap_chip . AM33XX : board_id = self . _beaglebone_id ( ) elif chip_id == ap_chip . GENERIC_X86 : board_id = GENERIC_LINUX_PC elif chip_id == ap_chip . SUN8I : board_id = self . _armbian_id ( ) elif chip_id == ap_chip . SAMA5 : board_id = self . _sama5_id ( ) elif chip_id == ap_chip . ESP8266 : board_id = FEATHER_HUZZAH elif chip_id == ap_chip . SAMD21 : board_id = FEATHER_M0_EXPRESS elif chip_id == ap_chip . STM32 : board_id = PYBOARD elif chip_id == ap_chip . S805 : board_id = ODROID_C1 elif chip_id == ap_chip . S905 : board_id = ODROID_C2 elif chip_id == ap_chip . FT232H : board_id = FTDI_FT232H elif chip_id in ( ap_chip . T210 , ap_chip . T186 , ap_chip . T194 ) : board_id = self . _tegra_id ( ) return board_id
Return a unique id for the detected board if any .
24,807
def _pi_id ( self ) : pi_rev_code = self . _pi_rev_code ( ) if pi_rev_code : for model , codes in _PI_REV_CODES . items ( ) : if pi_rev_code in codes : return model return None
Try to detect id of a Raspberry Pi .
24,808
def _pi_rev_code ( self ) : if self . detector . chip . id != ap_chip . BCM2XXX : return None return self . detector . get_cpuinfo_field ( 'Revision' )
Attempt to find a Raspberry Pi revision code for this board .
24,809
def _beaglebone_id ( self ) : try : with open ( "/sys/bus/nvmem/devices/0-00500/nvmem" , "rb" ) as eeprom : eeprom_bytes = eeprom . read ( 16 ) except FileNotFoundError : return None if eeprom_bytes [ : 4 ] != b'\xaaU3\xee' : return None id_string = eeprom_bytes [ 4 : ] . decode ( "ascii" ) for model , bb_ids in _BEAGLEBONE_BOARD_IDS . items ( ) : for bb_id in bb_ids : if id_string == bb_id [ 1 ] : return model return None
Try to detect id of a Beaglebone .
24,810
def _tegra_id ( self ) : board_value = self . detector . get_device_model ( ) if 'tx1' in board_value : return JETSON_TX1 elif 'quill' in board_value : return JETSON_TX2 elif 'xavier' in board_value : return JETSON_XAVIER elif 'nano' in board_value : return JETSON_NANO return None
Try to detect the id of aarch64 board .
24,811
def any_embedded_linux ( self ) : return self . any_raspberry_pi or self . any_beaglebone or self . any_orange_pi or self . any_giant_board or self . any_jetson_board
Check whether the current board is any embedded Linux device .
24,812
async def middleware ( request , handler ) : xray_header = construct_xray_header ( request . headers ) name = calculate_segment_name ( request . headers [ 'host' ] . split ( ':' , 1 ) [ 0 ] , xray_recorder ) sampling_req = { 'host' : request . headers [ 'host' ] , 'method' : request . method , 'path' : request . path , 'service' : name , } sampling_decision = calculate_sampling_decision ( trace_header = xray_header , recorder = xray_recorder , sampling_req = sampling_req , ) segment = xray_recorder . begin_segment ( name = name , traceid = xray_header . root , parent_id = xray_header . parent , sampling = sampling_decision , ) segment . save_origin_trace_header ( xray_header ) segment . put_http_meta ( http . URL , str ( request . url ) ) segment . put_http_meta ( http . METHOD , request . method ) if 'User-Agent' in request . headers : segment . put_http_meta ( http . USER_AGENT , request . headers [ 'User-Agent' ] ) if 'X-Forwarded-For' in request . headers : segment . put_http_meta ( http . CLIENT_IP , request . headers [ 'X-Forwarded-For' ] ) segment . put_http_meta ( http . X_FORWARDED_FOR , True ) elif 'remote_addr' in request . headers : segment . put_http_meta ( http . CLIENT_IP , request . headers [ 'remote_addr' ] ) else : segment . put_http_meta ( http . CLIENT_IP , request . remote ) try : response = await handler ( request ) except HTTPException as exc : response = exc raise except Exception as err : response = None segment . put_http_meta ( http . STATUS , 500 ) stack = stacktrace . get_stacktrace ( limit = xray_recorder . max_trace_back ) segment . add_exception ( err , stack ) raise finally : if response is not None : segment . put_http_meta ( http . STATUS , response . status ) if 'Content-Length' in response . headers : length = int ( response . headers [ 'Content-Length' ] ) segment . put_http_meta ( http . CONTENT_LENGTH , length ) header_str = prepare_response_header ( xray_header , segment ) response . headers [ http . XRAY_HEADER ] = header_str xray_recorder . end_segment ( ) return response
Main middleware function deals with all the X - Ray segment logic
24,813
def to_id ( self ) : return "%s%s%s%s%s" % ( TraceId . VERSION , TraceId . DELIMITER , format ( self . start_time , 'x' ) , TraceId . DELIMITER , self . __number )
Convert TraceId object to a string .
24,814
def unpatch ( ) : _PATCHED_MODULES . discard ( 'httplib' ) setattr ( httplib , PATCH_FLAG , False ) unwrap ( httplib . HTTPConnection , '_send_request' ) unwrap ( httplib . HTTPConnection , 'getresponse' ) unwrap ( httplib . HTTPResponse , 'read' )
Unpatch any previously patched modules . This operation is idempotent .
24,815
def task_factory ( loop , coro ) : task = asyncio . Task ( coro , loop = loop ) if task . _source_traceback : del task . _source_traceback [ - 1 ] current_task = asyncio . Task . current_task ( loop = loop ) if current_task is not None and hasattr ( current_task , 'context' ) : setattr ( task , 'context' , current_task . context ) return task
Task factory function
24,816
def match ( self , sampling_req ) : if sampling_req is None : return False host = sampling_req . get ( 'host' , None ) method = sampling_req . get ( 'method' , None ) path = sampling_req . get ( 'path' , None ) service = sampling_req . get ( 'service' , None ) service_type = sampling_req . get ( 'service_type' , None ) return ( not host or wildcard_match ( self . _host , host ) ) and ( not method or wildcard_match ( self . _method , method ) ) and ( not path or wildcard_match ( self . _path , path ) ) and ( not service or wildcard_match ( self . _service , service ) ) and ( not service_type or wildcard_match ( self . _service_type , service_type ) )
Determines whether or not this sampling rule applies to the incoming request based on some of the request s parameters . Any None parameter provided will be considered an implicit match .
24,817
def merge ( self , rule ) : with self . _lock : self . _request_count = rule . request_count self . _borrow_count = rule . borrow_count self . _sampled_count = rule . sampled_count self . _reservoir = rule . reservoir rule . reservoir = None
Migrate all stateful attributes from the old rule
24,818
def borrow_or_take ( self , now , can_borrow ) : with self . _lock : return self . _borrow_or_take ( now , can_borrow )
Decide whether to borrow or take one quota from the reservoir . Return False if it can neither borrow nor take . This method is thread - safe .
24,819
def close ( self , end_time = None ) : self . _check_ended ( ) if end_time : self . end_time = end_time else : self . end_time = time . time ( ) self . in_progress = False
Close the trace entity by setting end_time and flip the in progress flag to False .
24,820
def add_subsegment ( self , subsegment ) : self . _check_ended ( ) subsegment . parent_id = self . id self . subsegments . append ( subsegment )
Add input subsegment as a child subsegment .
24,821
def put_http_meta ( self , key , value ) : self . _check_ended ( ) if value is None : return if key == http . STATUS : if isinstance ( value , string_types ) : value = int ( value ) self . apply_status_code ( value ) if key in http . request_keys : if 'request' not in self . http : self . http [ 'request' ] = { } self . http [ 'request' ] [ key ] = value elif key in http . response_keys : if 'response' not in self . http : self . http [ 'response' ] = { } self . http [ 'response' ] [ key ] = value else : log . warning ( "ignoring unsupported key %s in http meta." , key )
Add http related metadata .
24,822
def put_annotation ( self , key , value ) : self . _check_ended ( ) if not isinstance ( key , string_types ) : log . warning ( "ignoring non string type annotation key with type %s." , type ( key ) ) return if not isinstance ( value , annotation_value_types ) : log . warning ( "ignoring unsupported annotation value type %s." , type ( value ) ) return if any ( character not in _valid_annotation_key_characters for character in key ) : log . warning ( "ignoring annnotation with unsupported characters in key: '%s'." , key ) return self . annotations [ key ] = value
Annotate segment or subsegment with a key - value pair . Annotations will be indexed for later search query .
24,823
def put_metadata ( self , key , value , namespace = 'default' ) : self . _check_ended ( ) if not isinstance ( namespace , string_types ) : log . warning ( "ignoring non string type metadata namespace" ) return if namespace . startswith ( 'AWS.' ) : log . warning ( "Prefix 'AWS.' is reserved, drop metadata with namespace %s" , namespace ) return if self . metadata . get ( namespace , None ) : self . metadata [ namespace ] [ key ] = value else : self . metadata [ namespace ] = { key : value }
Add metadata to segment or subsegment . Metadata is not indexed but can be later retrieved by BatchGetTraces API .
24,824
def add_exception ( self , exception , stack , remote = False ) : self . _check_ended ( ) self . add_fault_flag ( ) if hasattr ( exception , '_recorded' ) : setattr ( self , 'cause' , getattr ( exception , '_cause_id' ) ) return exceptions = [ ] exceptions . append ( Throwable ( exception , stack , remote ) ) self . cause [ 'exceptions' ] = exceptions self . cause [ 'working_directory' ] = os . getcwd ( )
Add an exception to trace entities .
24,825
def serialize ( self ) : try : return jsonpickle . encode ( self , unpicklable = False ) except Exception : log . exception ( "got an exception during serialization" )
Serialize to JSON document that can be accepted by the X - Ray backend service . It uses jsonpickle to perform serialization .
24,826
def _delete_empty_properties ( self , properties ) : if not self . parent_id : del properties [ 'parent_id' ] if not self . subsegments : del properties [ 'subsegments' ] if not self . aws : del properties [ 'aws' ] if not self . http : del properties [ 'http' ] if not self . cause : del properties [ 'cause' ] if not self . annotations : del properties [ 'annotations' ] if not self . metadata : del properties [ 'metadata' ] properties . pop ( ORIGIN_TRACE_HEADER_ATTR_KEY , None ) del properties [ 'sampled' ]
Delete empty properties before serialization to avoid extra keys with empty values in the output json .
24,827
def reload_settings ( * args , ** kwargs ) : global settings setting , value = kwargs [ 'setting' ] , kwargs [ 'value' ] if setting == XRAY_NAMESPACE : settings = XRaySettings ( value )
Reload X - Ray user settings upon Django server hot restart
24,828
def add_subsegment ( self , subsegment ) : super ( Segment , self ) . add_subsegment ( subsegment ) self . increment ( )
Add input subsegment as a child subsegment and increment reference counter and total subsegments counter .
24,829
def remove_subsegment ( self , subsegment ) : super ( Segment , self ) . remove_subsegment ( subsegment ) self . decrement_subsegments_size ( )
Remove the reference of input subsegment .
24,830
def set_user ( self , user ) : super ( Segment , self ) . _check_ended ( ) self . user = user
set user of a segment . One segment can only have one user . User is indexed and can be later queried .
24,831
def set_rule_name ( self , rule_name ) : if not self . aws . get ( 'xray' , None ) : self . aws [ 'xray' ] = { } self . aws [ 'xray' ] [ 'sampling_rule_name' ] = rule_name
Add the matched centralized sampling rule name if a segment is sampled because of that rule . This method should be only used by the recorder .
24,832
def inject_trace_header ( headers , entity ) : if not entity : return if hasattr ( entity , 'type' ) and entity . type == 'subsegment' : header = entity . parent_segment . get_origin_trace_header ( ) else : header = entity . get_origin_trace_header ( ) data = header . data if header else None to_insert = TraceHeader ( root = entity . trace_id , parent = entity . id , sampled = entity . sampled , data = data , ) value = to_insert . to_header_str ( ) headers [ http . XRAY_HEADER ] = value
Extract trace id entity id and sampling decision from the input entity and inject these information to headers .
24,833
def calculate_sampling_decision ( trace_header , recorder , sampling_req ) : if trace_header . sampled is not None and trace_header . sampled != '?' : return trace_header . sampled elif not recorder . sampling : return 1 else : decision = recorder . sampler . should_trace ( sampling_req ) return decision if decision else 0
Return 1 or the matched rule name if should sample and 0 if should not . The sampling decision coming from trace_header always has the highest precedence . If the trace_header doesn t contain sampling decision then it checks if sampling is enabled or not in the recorder . If not enbaled it returns 1 . Otherwise it uses user defined sampling rules to decide .
24,834
def construct_xray_header ( headers ) : header_str = headers . get ( http . XRAY_HEADER ) or headers . get ( http . ALT_XRAY_HEADER ) if header_str : return TraceHeader . from_header_str ( header_str ) else : return TraceHeader ( )
Construct a TraceHeader object from dictionary headers of the incoming request . This method should always return a TraceHeader object regardless of tracing header s presence in the incoming request .
24,835
def calculate_segment_name ( host_name , recorder ) : if recorder . dynamic_naming : return recorder . dynamic_naming . get_name ( host_name ) else : return recorder . service
Returns the segment name based on recorder configuration and input host name . This is a helper generally used in web framework middleware where a host name is available from incoming request s headers .
24,836
def prepare_response_header ( origin_header , segment ) : if origin_header and origin_header . sampled == '?' : new_header = TraceHeader ( root = segment . trace_id , sampled = segment . sampled ) else : new_header = TraceHeader ( root = segment . trace_id ) return new_header . to_header_str ( )
Prepare a trace header to be inserted into response based on original header and the request segment .
24,837
def to_snake_case ( name ) : s1 = first_cap_re . sub ( r'\1_\2' , name ) return all_cap_re . sub ( r'\1_\2' , s1 ) . lower ( )
Convert the input string to snake - cased string .
24,838
def patch ( ) : if hasattr ( botocore . client , '_xray_enabled' ) : return setattr ( botocore . client , '_xray_enabled' , True ) wrapt . wrap_function_wrapper ( 'botocore.client' , 'BaseClient._make_api_call' , _xray_traced_botocore , ) wrapt . wrap_function_wrapper ( 'botocore.endpoint' , 'Endpoint.prepare_request' , inject_header , )
Patch botocore client so it generates subsegments when calling AWS services .
24,839
def configure ( self , sampling = None , plugins = None , context_missing = None , sampling_rules = None , daemon_address = None , service = None , context = None , emitter = None , streaming = None , dynamic_naming = None , streaming_threshold = None , max_trace_back = None , sampler = None , stream_sql = True ) : if sampling is not None : self . sampling = sampling if sampler : self . sampler = sampler if service : self . service = os . getenv ( TRACING_NAME_KEY , service ) if sampling_rules : self . _load_sampling_rules ( sampling_rules ) if emitter : self . emitter = emitter if daemon_address : self . emitter . set_daemon_address ( os . getenv ( DAEMON_ADDR_KEY , daemon_address ) ) if context : self . context = context if context_missing : self . context . context_missing = os . getenv ( CONTEXT_MISSING_KEY , context_missing ) if dynamic_naming : self . dynamic_naming = dynamic_naming if streaming : self . streaming = streaming if streaming_threshold : self . streaming_threshold = streaming_threshold if type ( max_trace_back ) == int and max_trace_back >= 0 : self . max_trace_back = max_trace_back if stream_sql is not None : self . stream_sql = stream_sql if plugins : plugin_modules = get_plugin_modules ( plugins ) for plugin in plugin_modules : plugin . initialize ( ) if plugin . runtime_context : self . _aws_metadata [ plugin . SERVICE_NAME ] = plugin . runtime_context self . _origin = plugin . ORIGIN elif plugins is not None : self . _aws_metadata = copy . deepcopy ( XRAY_META ) self . _origin = None if type ( self . sampler ) . __name__ == 'DefaultSampler' : self . sampler . load_settings ( DaemonConfig ( daemon_address ) , self . context , self . _origin )
Configure global X - Ray recorder .
24,840
def begin_segment ( self , name = None , traceid = None , parent_id = None , sampling = None ) : seg_name = name or self . service if not seg_name : raise SegmentNameMissingException ( "Segment name is required." ) decision = True if not global_sdk_config . sdk_enabled ( ) : sampling = 0 if sampling == 0 : decision = False elif sampling : decision = sampling elif self . sampling : decision = self . _sampler . should_trace ( ) if not decision : segment = DummySegment ( seg_name ) else : segment = Segment ( name = seg_name , traceid = traceid , parent_id = parent_id ) self . _populate_runtime_context ( segment , decision ) self . context . put_segment ( segment ) return segment
Begin a segment on the current thread and return it . The recorder only keeps one segment at a time . Create the second one without closing existing one will overwrite it .
24,841
def end_segment ( self , end_time = None ) : self . context . end_segment ( end_time ) segment = self . current_segment ( ) if segment and segment . ready_to_send ( ) : self . _send_segment ( )
End the current segment and send it to X - Ray daemon if it is ready to send . Ready means segment and all its subsegments are closed .
24,842
def current_segment ( self ) : entity = self . get_trace_entity ( ) if self . _is_subsegment ( entity ) : return entity . parent_segment else : return entity
Return the currently active segment . In a multithreading environment this will make sure the segment returned is the one created by the same thread .
24,843
def begin_subsegment ( self , name , namespace = 'local' ) : segment = self . current_segment ( ) if not segment : log . warning ( "No segment found, cannot begin subsegment %s." % name ) return None if not segment . sampled : subsegment = DummySubsegment ( segment , name ) else : subsegment = Subsegment ( name , namespace , segment ) self . context . put_subsegment ( subsegment ) return subsegment
Begin a new subsegment . If there is open subsegment the newly created subsegment will be the child of latest opened subsegment . If not it will be the child of the current open segment .
24,844
def end_subsegment ( self , end_time = None ) : if not self . context . end_subsegment ( end_time ) : return if self . current_segment ( ) . ready_to_send ( ) : self . _send_segment ( ) else : self . stream_subsegments ( )
End the current active subsegment . If this is the last one open under its parent segment the entire segment will be sent .
24,845
def put_annotation ( self , key , value ) : entity = self . get_trace_entity ( ) if entity and entity . sampled : entity . put_annotation ( key , value )
Annotate current active trace entity with a key - value pair . Annotations will be indexed for later search query .
24,846
def put_metadata ( self , key , value , namespace = 'default' ) : entity = self . get_trace_entity ( ) if entity and entity . sampled : entity . put_metadata ( key , value , namespace )
Add metadata to the current active trace entity . Metadata is not indexed but can be later retrieved by BatchGetTraces API .
24,847
def stream_subsegments ( self ) : segment = self . current_segment ( ) if self . streaming . is_eligible ( segment ) : self . streaming . stream ( segment , self . _stream_subsegment_out )
Stream all closed subsegments to the daemon and remove reference to the parent segment . No - op for a not sampled segment .
24,848
def _send_segment ( self ) : segment = self . current_segment ( ) if not segment : return if segment . sampled : self . emitter . send_entity ( segment ) self . clear_trace_entities ( )
Send the current segment to X - Ray daemon if it is present and sampled then clean up context storage . The emitter will handle failures .
24,849
def applies ( self , host , method , path ) : return ( not host or wildcard_match ( self . host , host ) ) and ( not method or wildcard_match ( self . method , method ) ) and ( not path or wildcard_match ( self . path , path ) )
Determines whether or not this sampling rule applies to the incoming request based on some of the request s parameters . Any None parameters provided will be considered an implicit match .
24,850
def ready ( self ) : if not settings . AWS_XRAY_TRACING_NAME : raise SegmentNameMissingException ( 'Segment name is required.' ) xray_recorder . configure ( daemon_address = settings . AWS_XRAY_DAEMON_ADDRESS , sampling = settings . SAMPLING , sampling_rules = settings . SAMPLING_RULES , context_missing = settings . AWS_XRAY_CONTEXT_MISSING , plugins = settings . PLUGINS , service = settings . AWS_XRAY_TRACING_NAME , dynamic_naming = settings . DYNAMIC_NAMING , streaming_threshold = settings . STREAMING_THRESHOLD , max_trace_back = settings . MAX_TRACE_BACK , stream_sql = settings . STREAM_SQL , ) if settings . PATCH_MODULES : if settings . AUTO_PATCH_PARENT_SEGMENT_NAME is not None : with xray_recorder . in_segment ( settings . AUTO_PATCH_PARENT_SEGMENT_NAME ) : patch ( settings . PATCH_MODULES , ignore_module_patterns = settings . IGNORE_MODULE_PATTERNS ) else : patch ( settings . PATCH_MODULES , ignore_module_patterns = settings . IGNORE_MODULE_PATTERNS ) if settings . AUTO_INSTRUMENT : try : patch_db ( ) except Exception : log . debug ( 'failed to patch Django built-in database' ) try : patch_template ( ) except Exception : log . debug ( 'failed to patch Django built-in template engine' )
Configure global XRay recorder based on django settings under XRAY_RECORDER namespace . This method could be called twice during server startup because of base command and reload command . So this function must be idempotent
24,851
def start ( self ) : if not global_sdk_config . sdk_enabled ( ) : return with self . _lock : if not self . _started : self . _rule_poller . start ( ) self . _target_poller . start ( ) self . _started = True
Start rule poller and target poller once X - Ray daemon address and context manager is in place .
24,852
def should_trace ( self , sampling_req = None ) : if not global_sdk_config . sdk_enabled ( ) : return False if not self . _started : self . start ( ) now = int ( time . time ( ) ) if sampling_req and not sampling_req . get ( 'service_type' , None ) : sampling_req [ 'service_type' ] = self . _origin elif sampling_req is None : sampling_req = { 'service_type' : self . _origin } matched_rule = self . _cache . get_matched_rule ( sampling_req , now ) if matched_rule : log . debug ( 'Rule %s is selected to make a sampling decision.' , matched_rule . name ) return self . _process_matched_rule ( matched_rule , now ) else : log . info ( 'No effective centralized sampling rule match. Fallback to local rules.' ) return self . _local_sampler . should_trace ( sampling_req )
Return the matched sampling rule name if the sampler finds one and decide to sample . If no sampling rule matched it falls back to the local sampler s should_trace implementation . All optional arguments are extracted from incoming requests by X - Ray middleware to perform path based sampling .
24,853
def patch ( ) : import pynamodb if hasattr ( botocore . vendored . requests . sessions , '_xray_enabled' ) : return setattr ( botocore . vendored . requests . sessions , '_xray_enabled' , True ) wrapt . wrap_function_wrapper ( 'botocore.vendored.requests.sessions' , 'Session.send' , _xray_traced_pynamodb , )
Patch PynamoDB so it generates subsegements when calling DynamoDB .
24,854
def end_segment ( self , end_time = None ) : entity = self . get_trace_entity ( ) if not entity : log . warning ( "No segment to end" ) return if self . _is_subsegment ( entity ) : entity . parent_segment . close ( end_time ) else : entity . close ( end_time )
End the current active segment .
24,855
def put_subsegment ( self , subsegment ) : entity = self . get_trace_entity ( ) if not entity : log . warning ( "Active segment or subsegment not found. Discarded %s." % subsegment . name ) return entity . add_subsegment ( subsegment ) self . _local . entities . append ( subsegment )
Store the subsegment created by xray_recorder to the context . If you put a new subsegment while there is already an open subsegment the new subsegment becomes the child of the existing subsegment .
24,856
def end_subsegment ( self , end_time = None ) : subsegment = self . get_trace_entity ( ) if self . _is_subsegment ( subsegment ) : subsegment . close ( end_time ) self . _local . entities . pop ( ) return True else : log . warning ( "No subsegment to end." ) return False
End the current active segment . Return False if there is no subsegment to end .
24,857
def handle_context_missing ( self ) : if self . context_missing == 'RUNTIME_ERROR' : log . error ( MISSING_SEGMENT_MSG ) raise SegmentNotFoundException ( MISSING_SEGMENT_MSG ) else : log . error ( MISSING_SEGMENT_MSG )
Called whenever there is no trace entity to access or mutate .
24,858
def check_in_lambda ( ) : if not os . getenv ( LAMBDA_TASK_ROOT_KEY ) : return None try : os . mkdir ( TOUCH_FILE_DIR ) except OSError : log . debug ( 'directory %s already exists' , TOUCH_FILE_DIR ) try : f = open ( TOUCH_FILE_PATH , 'w+' ) f . close ( ) os . utime ( TOUCH_FILE_PATH , None ) except ( IOError , OSError ) : log . warning ( "Unable to write to %s. Failed to signal SDK initialization." % TOUCH_FILE_PATH ) return LambdaContext ( )
Return None if SDK is not loaded in AWS Lambda worker . Otherwise drop a touch file and return a lambda context .
24,859
def _refresh_context ( self ) : header_str = os . getenv ( LAMBDA_TRACE_HEADER_KEY ) trace_header = TraceHeader . from_header_str ( header_str ) if not global_sdk_config . sdk_enabled ( ) : trace_header . _sampled = False segment = getattr ( self . _local , 'segment' , None ) if segment : if not trace_header . root or trace_header . root == segment . trace_id : return else : self . _initialize_context ( trace_header ) else : self . _initialize_context ( trace_header )
Get current facade segment . To prevent resource leaking in Lambda worker every time there is segment present we compare its trace id to current environment variables . If it is different we create a new facade segment and clean up subsegments stored .
24,860
def _initialize_context ( self , trace_header ) : sampled = None if not global_sdk_config . sdk_enabled ( ) : sampled = False elif trace_header . sampled == 0 : sampled = False elif trace_header . sampled == 1 : sampled = True segment = FacadeSegment ( name = 'facade' , traceid = trace_header . root , entityid = trace_header . parent , sampled = sampled , ) setattr ( self . _local , 'segment' , segment ) setattr ( self . _local , 'entities' , [ ] )
Create a facade segment based on environment variables set by AWS Lambda and initialize storage for subsegments .
24,861
def set_sdk_enabled ( cls , value ) : if cls . XRAY_ENABLED_KEY in os . environ : cls . __SDK_ENABLED = str ( os . getenv ( cls . XRAY_ENABLED_KEY , 'true' ) ) . lower ( ) != 'false' else : if type ( value ) == bool : cls . __SDK_ENABLED = value else : cls . __SDK_ENABLED = True log . warning ( "Invalid parameter type passed into set_sdk_enabled(). Defaulting to True..." )
Modifies the enabled flag if the AWS_XRAY_SDK_ENABLED environment variable is not set otherwise set the enabled flag to be equal to the environment variable . If the env variable is an invalid string boolean it will default to true .
24,862
def _xray_register_type_fix ( wrapped , instance , args , kwargs ) : our_args = list ( copy . copy ( args ) ) if len ( our_args ) == 2 and isinstance ( our_args [ 1 ] , ( XRayTracedConn , XRayTracedCursor ) ) : our_args [ 1 ] = our_args [ 1 ] . __wrapped__ return wrapped ( * our_args , ** kwargs )
Send the actual connection or curser to register type .
24,863
def from_header_str ( cls , header ) : if not header : return cls ( ) try : params = header . strip ( ) . split ( HEADER_DELIMITER ) header_dict = { } data = { } for param in params : entry = param . split ( '=' ) key = entry [ 0 ] if key in ( ROOT , PARENT , SAMPLE ) : header_dict [ key ] = entry [ 1 ] elif key != SELF : data [ key ] = entry [ 1 ] return cls ( root = header_dict . get ( ROOT , None ) , parent = header_dict . get ( PARENT , None ) , sampled = header_dict . get ( SAMPLE , None ) , data = data , ) except Exception : log . warning ( "malformed tracing header %s, ignore." , header ) return cls ( )
Create a TraceHeader object from a tracing header string extracted from a http request headers .
24,864
def to_header_str ( self ) : h_parts = [ ] if self . root : h_parts . append ( ROOT + '=' + self . root ) if self . parent : h_parts . append ( PARENT + '=' + self . parent ) if self . sampled is not None : h_parts . append ( SAMPLE + '=' + str ( self . sampled ) ) if self . data : for key in self . data : h_parts . append ( key + '=' + self . data [ key ] ) return HEADER_DELIMITER . join ( h_parts )
Convert to a tracing header string that can be injected to outgoing http request headers .
24,865
def get_name ( self , host_name ) : if wildcard_match ( self . _pattern , host_name ) : return host_name else : return self . _fallback
Returns the segment name based on the input host name .
24,866
def set_daemon_address ( self , address ) : if address : daemon_config = DaemonConfig ( address ) self . _ip , self . _port = daemon_config . udp_ip , daemon_config . udp_port
Set up UDP ip and port from the raw daemon address string using DaemonConfig class utlities .
24,867
def patch ( ) : if hasattr ( aiobotocore . client , '_xray_enabled' ) : return setattr ( aiobotocore . client , '_xray_enabled' , True ) wrapt . wrap_function_wrapper ( 'aiobotocore.client' , 'AioBaseClient._make_api_call' , _xray_traced_aiobotocore , ) wrapt . wrap_function_wrapper ( 'aiobotocore.endpoint' , 'AioEndpoint.prepare_request' , inject_header , )
Patch aiobotocore client so it generates subsegments when calling AWS services .
24,868
def is_eligible ( self , segment ) : if not segment or not segment . sampled : return False return segment . get_total_subsegments_size ( ) > self . streaming_threshold
A segment is eligible to have its children subsegments streamed if it is sampled and it breaches streaming threshold .
24,869
def stream ( self , entity , callback ) : with self . _lock : self . _stream ( entity , callback )
Stream out all eligible children of the input entity .
24,870
def parse_bind ( bind ) : if isinstance ( bind , Connection ) : engine = bind . engine else : engine = bind m = re . match ( r"Engine\((.*?)\)" , str ( engine ) ) if m is not None : u = urlparse ( m . group ( 1 ) ) uses_netloc . append ( u . scheme ) safe_url = "" if u . password is None : safe_url = u . geturl ( ) else : host_info = u . netloc . rpartition ( '@' ) [ - 1 ] parts = u . _replace ( netloc = '{}@{}' . format ( u . username , host_info ) ) safe_url = parts . geturl ( ) sql = { } sql [ 'database_type' ] = u . scheme sql [ 'url' ] = safe_url if u . username is not None : sql [ 'user' ] = "{}" . format ( u . username ) return sql
Parses a connection string and creates SQL trace metadata
24,871
def add_subsegment ( self , subsegment ) : super ( Subsegment , self ) . add_subsegment ( subsegment ) self . parent_segment . increment ( )
Add input subsegment as a child subsegment and increment reference counter and total subsegments counter of the parent segment .
24,872
def remove_subsegment ( self , subsegment ) : super ( Subsegment , self ) . remove_subsegment ( subsegment ) self . parent_segment . decrement_subsegments_size ( )
Remove input subsegment from child subsegemnts and decrement parent segment total subsegments count .
24,873
def close ( self , end_time = None ) : super ( Subsegment , self ) . close ( end_time ) self . parent_segment . decrement_ref_counter ( )
Close the trace entity by setting end_time and flip the in progress flag to False . Also decrement parent segment s ref counter by 1 .
24,874
def fetch_sampling_rules ( self ) : new_rules = [ ] resp = self . _xray_client . get_sampling_rules ( ) records = resp [ 'SamplingRuleRecords' ] for record in records : rule_def = record [ 'SamplingRule' ] if self . _is_rule_valid ( rule_def ) : rule = SamplingRule ( name = rule_def [ 'RuleName' ] , priority = rule_def [ 'Priority' ] , rate = rule_def [ 'FixedRate' ] , reservoir_size = rule_def [ 'ReservoirSize' ] , host = rule_def [ 'Host' ] , service = rule_def [ 'ServiceName' ] , method = rule_def [ 'HTTPMethod' ] , path = rule_def [ 'URLPath' ] , service_type = rule_def [ 'ServiceType' ] ) new_rules . append ( rule ) return new_rules
Use X - Ray botocore client to get the centralized sampling rules from X - Ray service . The call is proxied and signed by X - Ray Daemon .
24,875
def setup_xray_client ( self , ip , port , client ) : if not client : client = self . _create_xray_client ( ip , port ) self . _xray_client = client
Setup the xray client based on ip and port . If a preset client is specified ip and port will be ignored .
24,876
def _dt_to_epoch ( self , dt ) : if PY2 : time_delta = dt - datetime ( 1970 , 1 , 1 ) . replace ( tzinfo = dt . tzinfo ) return int ( time_delta . total_seconds ( ) ) else : return int ( dt . timestamp ( ) )
Convert a offset - aware datetime to POSIX time .
24,877
def get_stacktrace ( limit = None ) : if limit is not None and limit == 0 : return [ ] stack = traceback . extract_stack ( ) stack = stack [ : - 2 ] _exc_type , _exc , exc_traceback = sys . exc_info ( ) if exc_traceback is not None : exc_stack = traceback . extract_tb ( exc_traceback ) stack += exc_stack if limit is not None : if limit > 0 : stack = stack [ - limit : ] else : stack = stack [ : abs ( limit ) ] return stack
Get a full stacktrace for the current state of execution .
24,878
def should_trace ( self , sampling_req = None ) : if sampling_req is None : return self . _should_trace ( self . _default_rule ) host = sampling_req . get ( 'host' , None ) method = sampling_req . get ( 'method' , None ) path = sampling_req . get ( 'path' , None ) for rule in self . _rules : if rule . applies ( host , method , path ) : return self . _should_trace ( rule ) return self . _should_trace ( self . _default_rule )
Return True if the sampler decide to sample based on input information and sampling rules . It will first check if any custom rule should be applied if not it falls back to the default sampling rule .
24,879
def process_exception ( self , request , exception ) : if self . in_lambda_ctx : segment = xray_recorder . current_subsegment ( ) else : segment = xray_recorder . current_segment ( ) segment . put_http_meta ( http . STATUS , 500 ) stack = stacktrace . get_stacktrace ( limit = xray_recorder . _max_trace_back ) segment . add_exception ( exception , stack )
Add exception information and fault flag to the current segment .
24,880
def take ( self ) : with self . _lock : now = int ( time . time ( ) ) if now != self . this_sec : self . used_this_sec = 0 self . this_sec = now if self . used_this_sec >= self . traces_per_sec : return False self . used_this_sec = self . used_this_sec + 1 return True
Returns True if there are segments left within the current second otherwise return False .
24,881
def get_default_connection ( ) : tid = id ( threading . current_thread ( ) ) conn = _conn_holder . get ( tid ) if not conn : with ( _rlock ) : if 'project_endpoint' not in _options and 'project_id' not in _options : _options [ 'project_endpoint' ] = helper . get_project_endpoint_from_env ( ) if 'credentials' not in _options : _options [ 'credentials' ] = helper . get_credentials_from_env ( ) _conn_holder [ tid ] = conn = connection . Datastore ( ** _options ) return conn
Returns the default datastore connection .
24,882
def get_all ( cls ) : req = datastore . RunQueryRequest ( ) q = req . query set_kind ( q , kind = 'Todo' ) add_property_orders ( q , 'created' ) resp = datastore . run_query ( req ) todos = [ Todo . from_proto ( r . entity ) for r in resp . batch . entity_results ] return todos
Query for all Todo items ordered by creation date .
24,883
def archive ( cls ) : req = datastore . BeginTransactionRequest ( ) resp = datastore . begin_transaction ( req ) tx = resp . transaction req = datastore . RunQueryRequest ( ) req . read_options . transaction = tx q = req . query set_kind ( q , kind = 'Todo' ) add_projection ( q , '__key__' ) set_composite_filter ( q . filter , datastore . CompositeFilter . AND , set_property_filter ( datastore . Filter ( ) , 'done' , datastore . PropertyFilter . EQUAL , True ) , set_property_filter ( datastore . Filter ( ) , '__key__' , datastore . PropertyFilter . HAS_ANCESTOR , default_todo_list . key ) ) resp = datastore . run_query ( req ) req = datastore . CommitRequest ( ) req . transaction = tx for result in resp . batch . entity_results : req . mutations . add ( ) . delete . CopyFrom ( result . entity . key ) resp = datastore . commit ( req ) return ''
Delete all Todo items that are done .
24,884
def save ( self ) : req = datastore . CommitRequest ( ) req . mode = datastore . CommitRequest . NON_TRANSACTIONAL req . mutations . add ( ) . upsert . CopyFrom ( self . to_proto ( ) ) resp = datastore . commit ( req ) if not self . id : self . id = resp . mutation_results [ 0 ] . key . path [ - 1 ] . id return self
Update or insert a Todo item .
24,885
def Get ( self , project_id ) : if project_id in self . _emulators : return self . _emulators [ project_id ] emulator = self . Create ( project_id ) self . _emulators [ project_id ] = emulator return emulator
Returns an existing emulator instance for the provided project_id .
24,886
def Create ( self , project_id , start_options = None , deadline = 10 ) : return DatastoreEmulator ( self . _emulator_cmd , self . _working_directory , project_id , deadline , start_options )
Creates an emulator instance .
24,887
def _WaitForStartup ( self , deadline ) : start = time . time ( ) sleep = 0.05 def Elapsed ( ) : return time . time ( ) - start while True : try : response , _ = self . _http . request ( self . _host ) if response . status == 200 : logging . info ( 'emulator responded after %f seconds' , Elapsed ( ) ) return True except ( socket . error , httplib . ResponseNotReady ) : pass if Elapsed ( ) >= deadline : return False else : time . sleep ( sleep ) sleep *= 2
Waits for the emulator to start .
24,888
def Clear ( self ) : headers = { 'Content-length' : '0' } response , _ = self . _http . request ( '%s/reset' % self . _host , method = 'POST' , headers = headers ) if response . status == 200 : return True else : logging . warning ( 'failed to clear emulator; response was: %s' , response )
Clears all data from the emulator instance .
24,889
def Stop ( self ) : if not self . __running : return logging . info ( 'shutting down the emulator running at %s' , self . _host ) headers = { 'Content-length' : '0' } response , _ = self . _http . request ( '%s/shutdown' % self . _host , method = 'POST' , headers = headers ) if response . status != 200 : logging . warning ( 'failed to shut down emulator; response: %s' , response ) self . __running = False shutil . rmtree ( self . _tmp_dir )
Stops the emulator instance .
24,890
def _call_method ( self , method , req , resp_class ) : payload = req . SerializeToString ( ) headers = { 'Content-Type' : 'application/x-protobuf' , 'Content-Length' : str ( len ( payload ) ) , 'X-Goog-Api-Format-Version' : '2' } response , content = self . _http . request ( '%s:%s' % ( self . _url , method ) , method = 'POST' , body = payload , headers = headers ) if response . status != 200 : raise _make_rpc_error ( method , response , content ) resp = resp_class ( ) resp . ParseFromString ( content ) return resp
_call_method call the given RPC method over HTTP .
24,891
def get_credentials_from_env ( ) : if os . getenv ( _DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV ) : logging . info ( 'connecting without credentials because %s is set.' , _DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV ) return None if os . getenv ( _DATASTORE_EMULATOR_HOST_ENV ) : logging . info ( 'connecting without credentials because %s is set.' , _DATASTORE_EMULATOR_HOST_ENV ) return None if ( os . getenv ( _DATASTORE_SERVICE_ACCOUNT_ENV ) and os . getenv ( _DATASTORE_PRIVATE_KEY_FILE_ENV ) ) : with open ( os . getenv ( _DATASTORE_PRIVATE_KEY_FILE_ENV ) , 'rb' ) as f : key = f . read ( ) credentials = client . SignedJwtAssertionCredentials ( os . getenv ( _DATASTORE_SERVICE_ACCOUNT_ENV ) , key , SCOPE ) logging . info ( 'connecting using private key file.' ) return credentials try : credentials = client . GoogleCredentials . get_application_default ( ) credentials = credentials . create_scoped ( SCOPE ) logging . info ( 'connecting using Google Application Default Credentials.' ) return credentials except client . ApplicationDefaultCredentialsError , e : logging . error ( 'Unable to find any credentials to use. ' 'If you are running locally, make sure to set the ' '%s environment variable.' , _DATASTORE_EMULATOR_HOST_ENV ) raise e
Get credentials from environment variables .
24,892
def get_project_endpoint_from_env ( project_id = None , host = None ) : project_id = project_id or os . getenv ( _DATASTORE_PROJECT_ID_ENV ) if not project_id : raise ValueError ( 'project_id was not provided. Either pass it in ' 'directly or set DATASTORE_PROJECT_ID.' ) if os . getenv ( _DATASTORE_HOST_ENV ) : logging . warning ( 'Ignoring value of environment variable DATASTORE_HOST. ' 'To point datastore to a host running locally, use the ' 'environment variable DATASTORE_EMULATOR_HOST' ) url_override = os . getenv ( _DATASTORE_URL_OVERRIDE_ENV ) if url_override : return '%s/projects/%s' % ( url_override , project_id ) localhost = os . getenv ( _DATASTORE_EMULATOR_HOST_ENV ) if localhost : return ( 'http://%s/%s/projects/%s' % ( localhost , API_VERSION , project_id ) ) host = host or GOOGLEAPIS_HOST return 'https://%s/%s/projects/%s' % ( host , API_VERSION , project_id )
Get Datastore project endpoint from environment variables .
24,893
def add_key_path ( key_proto , * path_elements ) : for i in range ( 0 , len ( path_elements ) , 2 ) : pair = path_elements [ i : i + 2 ] elem = key_proto . path . add ( ) elem . kind = pair [ 0 ] if len ( pair ) == 1 : return id_or_name = pair [ 1 ] if isinstance ( id_or_name , ( int , long ) ) : elem . id = id_or_name elif isinstance ( id_or_name , basestring ) : elem . name = id_or_name else : raise TypeError ( 'Expected an integer id or string name as argument %d; ' 'received %r (a %s).' % ( i + 2 , id_or_name , type ( id_or_name ) ) ) return key_proto
Add path elements to the given datastore . Key proto message .
24,894
def add_properties ( entity_proto , property_dict , exclude_from_indexes = None ) : for name , value in property_dict . iteritems ( ) : set_property ( entity_proto . properties , name , value , exclude_from_indexes )
Add values to the given datastore . Entity proto message .
24,895
def set_property ( property_map , name , value , exclude_from_indexes = None ) : set_value ( property_map [ name ] , value , exclude_from_indexes )
Set property value in the given datastore . Property proto message .
24,896
def set_value ( value_proto , value , exclude_from_indexes = None ) : value_proto . Clear ( ) if isinstance ( value , ( list , tuple ) ) : for sub_value in value : set_value ( value_proto . array_value . values . add ( ) , sub_value , exclude_from_indexes ) return if isinstance ( value , entity_pb2 . Value ) : value_proto . MergeFrom ( value ) elif isinstance ( value , unicode ) : value_proto . string_value = value elif isinstance ( value , str ) : value_proto . blob_value = value elif isinstance ( value , bool ) : value_proto . boolean_value = value elif isinstance ( value , ( int , long ) ) : value_proto . integer_value = value elif isinstance ( value , float ) : value_proto . double_value = value elif isinstance ( value , datetime . datetime ) : to_timestamp ( value , value_proto . timestamp_value ) elif isinstance ( value , entity_pb2 . Key ) : value_proto . key_value . CopyFrom ( value ) elif isinstance ( value , entity_pb2 . Entity ) : value_proto . entity_value . CopyFrom ( value ) else : raise TypeError ( 'value type: %r not supported' % ( value , ) ) if exclude_from_indexes is not None : value_proto . exclude_from_indexes = exclude_from_indexes
Set the corresponding datastore . Value _value field for the given arg .
24,897
def get_value ( value_proto ) : field = value_proto . WhichOneof ( 'value_type' ) if field in __native_value_types : return getattr ( value_proto , field ) if field == 'timestamp_value' : return from_timestamp ( value_proto . timestamp_value ) if field == 'array_value' : return [ get_value ( sub_value ) for sub_value in value_proto . array_value . values ] return None
Gets the python object equivalent for the given value proto .
24,898
def get_property_dict ( entity_proto ) : return dict ( ( p . key , p . value ) for p in entity_proto . property )
Convert datastore . Entity to a dict of property name - > datastore . Value .
24,899
def set_kind ( query_proto , kind ) : del query_proto . kind [ : ] query_proto . kind . add ( ) . name = kind
Set the kind constraint for the given datastore . Query proto message .