idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
223,500
def ReadTriggers ( self , collection_link , options = None ) : if options is None : options = { } return self . QueryTriggers ( collection_link , None , options )
Reads all triggers in a collection .
42
8
223,501
def CreateTrigger ( self , collection_link , trigger , options = None ) : if options is None : options = { } collection_id , path , trigger = self . _GetContainerIdWithPathForTrigger ( collection_link , trigger ) return self . Create ( trigger , path , 'triggers' , collection_id , None , options )
Creates a trigger in a collection .
74
8
223,502
def UpsertTrigger ( self , collection_link , trigger , options = None ) : if options is None : options = { } collection_id , path , trigger = self . _GetContainerIdWithPathForTrigger ( collection_link , trigger ) return self . Upsert ( trigger , path , 'triggers' , collection_id , None , options )
Upserts a trigger in a collection .
76
10
223,503
def ReadTrigger ( self , trigger_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( trigger_link ) trigger_id = base . GetResourceIdOrFullNameFromLink ( trigger_link ) return self . Read ( path , 'triggers' , trigger_id , None , options )
Reads a trigger .
77
5
223,504
def ReadUserDefinedFunctions ( self , collection_link , options = None ) : if options is None : options = { } return self . QueryUserDefinedFunctions ( collection_link , None , options )
Reads all user defined functions in a collection .
46
10
223,505
def QueryUserDefinedFunctions ( self , collection_link , query , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( collection_link , 'udfs' ) collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , 'udfs' , collection_id , lambda r : r [ 'UserDefinedFunctions' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn )
Queries user defined functions in a collection .
146
9
223,506
def CreateUserDefinedFunction ( self , collection_link , udf , options = None ) : if options is None : options = { } collection_id , path , udf = self . _GetContainerIdWithPathForUDF ( collection_link , udf ) return self . Create ( udf , path , 'udfs' , collection_id , None , options )
Creates a user defined function in a collection .
81
10
223,507
def UpsertUserDefinedFunction ( self , collection_link , udf , options = None ) : if options is None : options = { } collection_id , path , udf = self . _GetContainerIdWithPathForUDF ( collection_link , udf ) return self . Upsert ( udf , path , 'udfs' , collection_id , None , options )
Upserts a user defined function in a collection .
83
12
223,508
def ReadUserDefinedFunction ( self , udf_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( udf_link ) udf_id = base . GetResourceIdOrFullNameFromLink ( udf_link ) return self . Read ( path , 'udfs' , udf_id , None , options )
Reads a user defined function .
84
7
223,509
def ReadStoredProcedures ( self , collection_link , options = None ) : if options is None : options = { } return self . QueryStoredProcedures ( collection_link , None , options )
Reads all store procedures in a collection .
46
9
223,510
def CreateStoredProcedure ( self , collection_link , sproc , options = None ) : if options is None : options = { } collection_id , path , sproc = self . _GetContainerIdWithPathForSproc ( collection_link , sproc ) return self . Create ( sproc , path , 'sprocs' , collection_id , None , options )
Creates a stored procedure in a collection .
83
9
223,511
def UpsertStoredProcedure ( self , collection_link , sproc , options = None ) : if options is None : options = { } collection_id , path , sproc = self . _GetContainerIdWithPathForSproc ( collection_link , sproc ) return self . Upsert ( sproc , path , 'sprocs' , collection_id , None , options )
Upserts a stored procedure in a collection .
85
11
223,512
def ReadStoredProcedure ( self , sproc_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( sproc_link ) sproc_id = base . GetResourceIdOrFullNameFromLink ( sproc_link ) return self . Read ( path , 'sprocs' , sproc_id , None , options )
Reads a stored procedure .
86
6
223,513
def ReadConflicts ( self , collection_link , feed_options = None ) : if feed_options is None : feed_options = { } return self . QueryConflicts ( collection_link , None , feed_options )
Reads conflicts .
48
4
223,514
def ReadConflict ( self , conflict_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( conflict_link ) conflict_id = base . GetResourceIdOrFullNameFromLink ( conflict_link ) return self . Read ( path , 'conflicts' , conflict_id , None , options )
Reads a conflict .
77
5
223,515
def DeleteContainer ( self , collection_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( collection_link ) collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) return self . DeleteResource ( path , 'colls' , collection_id , None , options )
Deletes a collection .
77
5
223,516
def ReplaceItem ( self , document_link , new_document , options = None ) : CosmosClient . __ValidateResource ( new_document ) path = base . GetPathFromLink ( document_link ) document_id = base . GetResourceIdOrFullNameFromLink ( document_link ) # Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). # This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well. # So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the function so that it remains local # For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/ if options is None : options = { } # Extract the document collection link and add the partition key to options collection_link = base . GetItemContainerLink ( document_link ) options = self . _AddPartitionKey ( collection_link , new_document , options ) return self . Replace ( new_document , path , 'docs' , document_id , None , options )
Replaces a document and returns it .
272
8
223,517
def CreateAttachment ( self , document_link , attachment , options = None ) : if options is None : options = { } document_id , path = self . _GetItemIdWithPathForAttachment ( attachment , document_link ) return self . Create ( attachment , path , 'attachments' , document_id , None , options )
Creates an attachment in a document .
73
8
223,518
def UpsertAttachment ( self , document_link , attachment , options = None ) : if options is None : options = { } document_id , path = self . _GetItemIdWithPathForAttachment ( attachment , document_link ) return self . Upsert ( attachment , path , 'attachments' , document_id , None , options )
Upserts an attachment in a document .
75
10
223,519
def CreateAttachmentAndUploadMedia ( self , document_link , readable_stream , options = None ) : if options is None : options = { } document_id , initial_headers , path = self . _GetItemIdWithPathForAttachmentMedia ( document_link , options ) return self . Create ( readable_stream , path , 'attachments' , document_id , initial_headers , options )
Creates an attachment and upload media .
87
8
223,520
def UpsertAttachmentAndUploadMedia ( self , document_link , readable_stream , options = None ) : if options is None : options = { } document_id , initial_headers , path = self . _GetItemIdWithPathForAttachmentMedia ( document_link , options ) return self . Upsert ( readable_stream , path , 'attachments' , document_id , initial_headers , options )
Upserts an attachment and upload media .
89
10
223,521
def ReadAttachment ( self , attachment_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( attachment_link ) attachment_id = base . GetResourceIdOrFullNameFromLink ( attachment_link ) return self . Read ( path , 'attachments' , attachment_id , None , options )
Reads an attachment .
77
5
223,522
def ReadAttachments ( self , document_link , options = None ) : if options is None : options = { } return self . QueryAttachments ( document_link , None , options )
Reads all attachments in a document .
40
8
223,523
def QueryAttachments ( self , document_link , query , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( document_link , 'attachments' ) document_id = base . GetResourceIdOrFullNameFromLink ( document_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , 'attachments' , document_id , lambda r : r [ 'Attachments' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn )
Queries attachments in a document .
140
7
223,524
def ReadMedia ( self , media_link ) : default_headers = self . default_headers path = base . GetPathFromLink ( media_link ) media_id = base . GetResourceIdOrFullNameFromLink ( media_link ) attachment_id = base . GetAttachmentIdFromMediaId ( media_id ) headers = base . GetHeaders ( self , default_headers , 'get' , path , attachment_id , 'media' , { } ) # ReadMedia will always use WriteEndpoint since it's not replicated in readable Geo regions request = request_object . _RequestObject ( 'media' , documents . _OperationType . Read ) result , self . last_response_headers = self . __Get ( path , request , headers ) return result
Reads a media .
164
5
223,525
def UpdateMedia ( self , media_link , readable_stream , options = None ) : if options is None : options = { } initial_headers = dict ( self . default_headers ) # Add required headers slug and content-type in case the body is a stream if options . get ( 'slug' ) : initial_headers [ http_constants . HttpHeaders . Slug ] = options [ 'slug' ] if options . get ( 'contentType' ) : initial_headers [ http_constants . HttpHeaders . ContentType ] = ( options [ 'contentType' ] ) else : initial_headers [ http_constants . HttpHeaders . ContentType ] = ( runtime_constants . MediaTypes . OctetStream ) path = base . GetPathFromLink ( media_link ) media_id = base . GetResourceIdOrFullNameFromLink ( media_link ) attachment_id = base . GetAttachmentIdFromMediaId ( media_id ) headers = base . GetHeaders ( self , initial_headers , 'put' , path , attachment_id , 'media' , options ) # UpdateMedia will use WriteEndpoint since it uses PUT operation request = request_object . _RequestObject ( 'media' , documents . _OperationType . Update ) result , self . last_response_headers = self . __Put ( path , request , readable_stream , headers ) self . _UpdateSessionIfRequired ( headers , result , self . last_response_headers ) return result
Updates a media and returns it .
325
8
223,526
def ReplaceAttachment ( self , attachment_link , attachment , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( attachment ) path = base . GetPathFromLink ( attachment_link ) attachment_id = base . GetResourceIdOrFullNameFromLink ( attachment_link ) return self . Replace ( attachment , path , 'attachments' , attachment_id , None , options )
Replaces an attachment and returns it .
91
8
223,527
def DeleteAttachment ( self , attachment_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( attachment_link ) attachment_id = base . GetResourceIdOrFullNameFromLink ( attachment_link ) return self . DeleteResource ( path , 'attachments' , attachment_id , None , options )
Deletes an attachment .
78
5
223,528
def ReplaceTrigger ( self , trigger_link , trigger , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( trigger ) trigger = trigger . copy ( ) if trigger . get ( 'serverScript' ) : trigger [ 'body' ] = str ( trigger [ 'serverScript' ] ) elif trigger . get ( 'body' ) : trigger [ 'body' ] = str ( trigger [ 'body' ] ) path = base . GetPathFromLink ( trigger_link ) trigger_id = base . GetResourceIdOrFullNameFromLink ( trigger_link ) return self . Replace ( trigger , path , 'triggers' , trigger_id , None , options )
Replaces a trigger and returns it .
153
8
223,529
def DeleteTrigger ( self , trigger_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( trigger_link ) trigger_id = base . GetResourceIdOrFullNameFromLink ( trigger_link ) return self . DeleteResource ( path , 'triggers' , trigger_id , None , options )
Deletes a trigger .
78
5
223,530
def ReplaceUserDefinedFunction ( self , udf_link , udf , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( udf ) udf = udf . copy ( ) if udf . get ( 'serverScript' ) : udf [ 'body' ] = str ( udf [ 'serverScript' ] ) elif udf . get ( 'body' ) : udf [ 'body' ] = str ( udf [ 'body' ] ) path = base . GetPathFromLink ( udf_link ) udf_id = base . GetResourceIdOrFullNameFromLink ( udf_link ) return self . Replace ( udf , path , 'udfs' , udf_id , None , options )
Replaces a user defined function and returns it .
171
10
223,531
def DeleteUserDefinedFunction ( self , udf_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( udf_link ) udf_id = base . GetResourceIdOrFullNameFromLink ( udf_link ) return self . DeleteResource ( path , 'udfs' , udf_id , None , options )
Deletes a user defined function .
85
7
223,532
def ExecuteStoredProcedure ( self , sproc_link , params , options = None ) : if options is None : options = { } initial_headers = dict ( self . default_headers ) initial_headers . update ( { http_constants . HttpHeaders . Accept : ( runtime_constants . MediaTypes . Json ) } ) if params and not type ( params ) is list : params = [ params ] path = base . GetPathFromLink ( sproc_link ) sproc_id = base . GetResourceIdOrFullNameFromLink ( sproc_link ) headers = base . GetHeaders ( self , initial_headers , 'post' , path , sproc_id , 'sprocs' , options ) # ExecuteStoredProcedure will use WriteEndpoint since it uses POST operation request = request_object . _RequestObject ( 'sprocs' , documents . _OperationType . ExecuteJavaScript ) result , self . last_response_headers = self . __Post ( path , request , params , headers ) return result
Executes a store procedure .
231
6
223,533
def ReplaceStoredProcedure ( self , sproc_link , sproc , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( sproc ) sproc = sproc . copy ( ) if sproc . get ( 'serverScript' ) : sproc [ 'body' ] = str ( sproc [ 'serverScript' ] ) elif sproc . get ( 'body' ) : sproc [ 'body' ] = str ( sproc [ 'body' ] ) path = base . GetPathFromLink ( sproc_link ) sproc_id = base . GetResourceIdOrFullNameFromLink ( sproc_link ) return self . Replace ( sproc , path , 'sprocs' , sproc_id , None , options )
Replaces a stored procedure and returns it .
173
9
223,534
def DeleteStoredProcedure ( self , sproc_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( sproc_link ) sproc_id = base . GetResourceIdOrFullNameFromLink ( sproc_link ) return self . DeleteResource ( path , 'sprocs' , sproc_id , None , options )
Deletes a stored procedure .
87
6
223,535
def DeleteConflict ( self , conflict_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( conflict_link ) conflict_id = base . GetResourceIdOrFullNameFromLink ( conflict_link ) return self . DeleteResource ( path , 'conflicts' , conflict_id , None , options )
Deletes a conflict .
78
5
223,536
def ReplaceOffer ( self , offer_link , offer ) : CosmosClient . __ValidateResource ( offer ) path = base . GetPathFromLink ( offer_link ) offer_id = base . GetResourceIdOrFullNameFromLink ( offer_link ) return self . Replace ( offer , path , 'offers' , offer_id , None , None )
Replaces an offer and returns it .
78
8
223,537
def ReadOffer ( self , offer_link ) : path = base . GetPathFromLink ( offer_link ) offer_id = base . GetResourceIdOrFullNameFromLink ( offer_link ) return self . Read ( path , 'offers' , offer_id , None , { } )
Reads an offer .
65
5
223,538
def GetDatabaseAccount ( self , url_connection = None ) : if url_connection is None : url_connection = self . url_connection initial_headers = dict ( self . default_headers ) headers = base . GetHeaders ( self , initial_headers , 'get' , '' , # path '' , # id '' , # type { } ) request = request_object . _RequestObject ( 'databaseaccount' , documents . _OperationType . Read , url_connection ) result , self . last_response_headers = self . __Get ( '' , request , headers ) database_account = documents . DatabaseAccount ( ) database_account . DatabasesLink = '/dbs/' database_account . MediaLink = '/media/' if ( http_constants . HttpHeaders . MaxMediaStorageUsageInMB in self . last_response_headers ) : database_account . MaxMediaStorageUsageInMB = ( self . last_response_headers [ http_constants . HttpHeaders . MaxMediaStorageUsageInMB ] ) if ( http_constants . HttpHeaders . CurrentMediaStorageUsageInMB in self . last_response_headers ) : database_account . CurrentMediaStorageUsageInMB = ( self . last_response_headers [ http_constants . HttpHeaders . CurrentMediaStorageUsageInMB ] ) database_account . ConsistencyPolicy = result . get ( constants . _Constants . UserConsistencyPolicy ) # WritableLocations and ReadableLocations fields will be available only for geo-replicated database accounts if constants . _Constants . WritableLocations in result : database_account . _WritableLocations = result [ constants . _Constants . WritableLocations ] if constants . _Constants . ReadableLocations in result : database_account . _ReadableLocations = result [ constants . _Constants . ReadableLocations ] if constants . _Constants . EnableMultipleWritableLocations in result : database_account . _EnableMultipleWritableLocations = result [ constants . _Constants . EnableMultipleWritableLocations ] self . _useMultipleWriteLocations = self . connection_policy . UseMultipleWriteLocations and database_account . _EnableMultipleWritableLocations return database_account
Gets database account info .
489
6
223,539
def Create ( self , body , path , type , id , initial_headers , options = None ) : if options is None : options = { } initial_headers = initial_headers or self . default_headers headers = base . GetHeaders ( self , initial_headers , 'post' , path , id , type , options ) # Create will use WriteEndpoint since it uses POST operation request = request_object . _RequestObject ( type , documents . _OperationType . Create ) result , self . last_response_headers = self . __Post ( path , request , body , headers ) # update session for write request self . _UpdateSessionIfRequired ( headers , result , self . last_response_headers ) return result
Creates a Azure Cosmos resource and returns it .
153
10
223,540
def Replace ( self , resource , path , type , id , initial_headers , options = None ) : if options is None : options = { } initial_headers = initial_headers or self . default_headers headers = base . GetHeaders ( self , initial_headers , 'put' , path , id , type , options ) # Replace will use WriteEndpoint since it uses PUT operation request = request_object . _RequestObject ( type , documents . _OperationType . Replace ) result , self . last_response_headers = self . __Put ( path , request , resource , headers ) # update session for request mutates data on server side self . _UpdateSessionIfRequired ( headers , result , self . last_response_headers ) return result
Replaces a Azure Cosmos resource and returns it .
159
10
223,541
def Read ( self , path , type , id , initial_headers , options = None ) : if options is None : options = { } initial_headers = initial_headers or self . default_headers headers = base . GetHeaders ( self , initial_headers , 'get' , path , id , type , options ) # Read will use ReadEndpoint since it uses GET operation request = request_object . _RequestObject ( type , documents . _OperationType . Read ) result , self . last_response_headers = self . __Get ( path , request , headers ) return result
Reads a Azure Cosmos resource and returns it .
123
10
223,542
def DeleteResource ( self , path , type , id , initial_headers , options = None ) : if options is None : options = { } initial_headers = initial_headers or self . default_headers headers = base . GetHeaders ( self , initial_headers , 'delete' , path , id , type , options ) # Delete will use WriteEndpoint since it uses DELETE operation request = request_object . _RequestObject ( type , documents . _OperationType . Delete ) result , self . last_response_headers = self . __Delete ( path , request , headers ) # update session for request mutates data on server side self . _UpdateSessionIfRequired ( headers , result , self . last_response_headers ) return result
Deletes a Azure Cosmos resource and returns it .
157
10
223,543
def __Get ( self , path , request , headers ) : return synchronized_request . SynchronizedRequest ( self , request , self . _global_endpoint_manager , self . connection_policy , self . _requests_session , 'GET' , path , None , None , headers )
Azure Cosmos GET http request .
63
7
223,544
def __Post ( self , path , request , body , headers ) : return synchronized_request . SynchronizedRequest ( self , request , self . _global_endpoint_manager , self . connection_policy , self . _requests_session , 'POST' , path , body , query_params = None , headers = headers )
Azure Cosmos POST http request .
71
7
223,545
def __Delete ( self , path , request , headers ) : return synchronized_request . SynchronizedRequest ( self , request , self . _global_endpoint_manager , self . connection_policy , self . _requests_session , 'DELETE' , path , request_data = None , query_params = None , headers = headers )
Azure Cosmos DELETE http request .
75
9
223,546
def QueryFeed ( self , path , collection_id , query , options , partition_key_range_id = None ) : return self . __QueryFeed ( path , 'docs' , collection_id , lambda r : r [ 'Documents' ] , lambda _ , b : b , query , options , partition_key_range_id ) , self . last_response_headers
Query Feed for Document Collection resource .
81
7
223,547
def __QueryFeed ( self , path , type , id , result_fn , create_fn , query , options = None , partition_key_range_id = None ) : if options is None : options = { } if query : __GetBodiesFromQueryResult = result_fn else : def __GetBodiesFromQueryResult ( result ) : if result is not None : return [ create_fn ( self , body ) for body in result_fn ( result ) ] else : # If there is no change feed, the result data is empty and result is None. # This case should be interpreted as an empty array. return [ ] initial_headers = self . default_headers . copy ( ) # Copy to make sure that default_headers won't be changed. if query is None : # Query operations will use ReadEndpoint even though it uses GET(for feed requests) request = request_object . _RequestObject ( type , documents . _OperationType . ReadFeed ) headers = base . GetHeaders ( self , initial_headers , 'get' , path , id , type , options , partition_key_range_id ) result , self . last_response_headers = self . __Get ( path , request , headers ) return __GetBodiesFromQueryResult ( result ) else : query = self . __CheckAndUnifyQueryFormat ( query ) initial_headers [ http_constants . HttpHeaders . IsQuery ] = 'true' if ( self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . Default or self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . Query ) : initial_headers [ http_constants . HttpHeaders . ContentType ] = runtime_constants . MediaTypes . QueryJson elif self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . SqlQuery : initial_headers [ http_constants . HttpHeaders . ContentType ] = runtime_constants . MediaTypes . SQL else : raise SystemError ( 'Unexpected query compatibility mode.' ) # Query operations will use ReadEndpoint even though it uses POST(for regular query operations) request = request_object . _RequestObject ( type , documents . _OperationType . SqlQuery ) headers = base . GetHeaders ( self , initial_headers , 'post' , path , id , type , options , partition_key_range_id ) result , self . last_response_headers = self . __Post ( path , request , query , headers ) return __GetBodiesFromQueryResult ( result )
Query for more than one Azure Cosmos resources .
558
9
223,548
def __CheckAndUnifyQueryFormat ( self , query_body ) : if ( self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . Default or self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . Query ) : if not isinstance ( query_body , dict ) and not isinstance ( query_body , six . string_types ) : raise TypeError ( 'query body must be a dict or string.' ) if isinstance ( query_body , dict ) and not query_body . get ( 'query' ) : raise ValueError ( 'query body must have valid query text with key "query".' ) if isinstance ( query_body , six . string_types ) : return { 'query' : query_body } elif ( self . _query_compatibility_mode == CosmosClient . _QueryCompatibilityMode . SqlQuery and not isinstance ( query_body , six . string_types ) ) : raise TypeError ( 'query body must be a string.' ) else : raise SystemError ( 'Unexpected query compatibility mode.' ) return query_body
Checks and unifies the format of the query body .
243
12
223,549
def _UpdateSessionIfRequired ( self , request_headers , response_result , response_headers ) : '''if this request was made with consistency level as session, then update the session''' if response_result is None or response_headers is None : return is_session_consistency = False if http_constants . HttpHeaders . ConsistencyLevel in request_headers : if documents . ConsistencyLevel . Session == request_headers [ http_constants . HttpHeaders . ConsistencyLevel ] : is_session_consistency = True if is_session_consistency : # update session self . session . update_session ( response_result , response_headers )
Updates session if necessary .
151
6
223,550
def GetResourceIdOrFullNameFromLink ( resource_link ) : # For named based, the resource link is the full name if IsNameBased ( resource_link ) : return TrimBeginningAndEndingSlashes ( resource_link ) # Padding the resource link with leading and trailing slashes if not already if resource_link [ - 1 ] != '/' : resource_link = resource_link + '/' if resource_link [ 0 ] != '/' : resource_link = '/' + resource_link # The path will be in the form of # /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/ or # /[resourceType]/[resourceId]/ .... /[resourceType]/ # The result of split will be in the form of # ["", [resourceType], [resourceId] ... ,[resourceType], [resourceId], ""] # In the first case, to extract the resourceId it will the element # before last ( at length -2 ) and the the type will before it # ( at length -3 ) # In the second case, to extract the resource type it will the element # before last ( at length -2 ) path_parts = resource_link . split ( "/" ) if len ( path_parts ) % 2 == 0 : # request in form # /[resourceType]/[resourceId]/ .... /[resourceType]/[resourceId]/. return str ( path_parts [ - 2 ] ) return None
Gets resource id or full name from resource link .
324
11
223,551
def GetAttachmentIdFromMediaId ( media_id ) : altchars = '+-' if not six . PY2 : altchars = altchars . encode ( 'utf-8' ) # altchars for '+' and '/'. We keep '+' but replace '/' with '-' buffer = base64 . b64decode ( str ( media_id ) , altchars ) resoure_id_length = 20 attachment_id = '' if len ( buffer ) > resoure_id_length : # We are cutting off the storage index. attachment_id = base64 . b64encode ( buffer [ 0 : resoure_id_length ] , altchars ) if not six . PY2 : attachment_id = attachment_id . decode ( 'utf-8' ) else : attachment_id = media_id return attachment_id
Gets attachment id from media id .
192
8
223,552
def GetPathFromLink ( resource_link , resource_type = '' ) : resource_link = TrimBeginningAndEndingSlashes ( resource_link ) if IsNameBased ( resource_link ) : # Replace special characters in string using the %xx escape. For example, space(' ') would be replaced by %20 # This function is intended for quoting the path section of the URL and excludes '/' to be quoted as that's the default safe char resource_link = urllib_quote ( resource_link ) # Padding leading and trailing slashes to the path returned both for name based and resource id based links if resource_type : return '/' + resource_link + '/' + resource_type + '/' else : return '/' + resource_link + '/'
Gets path from resource link with optional resource type
165
10
223,553
def IsNameBased ( link ) : if not link : return False # trimming the leading "/" if link . startswith ( '/' ) and len ( link ) > 1 : link = link [ 1 : ] # Splitting the link(separated by "/") into parts parts = link . split ( '/' ) # First part should be "dbs" if len ( parts ) == 0 or not parts [ 0 ] or not parts [ 0 ] . lower ( ) == 'dbs' : return False # The second part is the database id(ResourceID or Name) and cannot be empty if len ( parts ) < 2 or not parts [ 1 ] : return False # Either ResourceID or database name databaseID = parts [ 1 ] # Length of databaseID(in case of ResourceID) is always 8 if len ( databaseID ) != 8 : return True return not IsValidBase64String ( str ( databaseID ) )
Finds whether the link is name based or not
196
10
223,554
def IsDatabaseLink ( link ) : if not link : return False # trimming the leading and trailing "/" from the input string link = TrimBeginningAndEndingSlashes ( link ) # Splitting the link(separated by "/") into parts parts = link . split ( '/' ) if len ( parts ) != 2 : return False # First part should be "dbs" if not parts [ 0 ] or not parts [ 0 ] . lower ( ) == 'dbs' : return False # The second part is the database id(ResourceID or Name) and cannot be empty if not parts [ 1 ] : return False return True
Finds whether the link is a database Self Link or a database ID based link
134
16
223,555
def GetItemContainerInfo ( self_link , alt_content_path , id_from_response ) : self_link = TrimBeginningAndEndingSlashes ( self_link ) + '/' index = IndexOfNth ( self_link , '/' , 4 ) if index != - 1 : collection_id = self_link [ 0 : index ] if 'colls' in self_link : # this is a collection request index_second_slash = IndexOfNth ( alt_content_path , '/' , 2 ) if index_second_slash == - 1 : collection_name = alt_content_path + '/colls/' + urllib_quote ( id_from_response ) return collection_id , collection_name else : collection_name = alt_content_path return collection_id , collection_name else : raise ValueError ( 'Response Not from Server Partition, self_link: {0}, alt_content_path: {1}, id: {2}' . format ( self_link , alt_content_path , id_from_response ) ) else : raise ValueError ( 'Unable to parse document collection link from ' + self_link )
Given the self link and alt_content_path from the reponse header and result extract the collection name and collection id
259
24
223,556
def GetItemContainerLink ( link ) : link = TrimBeginningAndEndingSlashes ( link ) + '/' index = IndexOfNth ( link , '/' , 4 ) if index != - 1 : return link [ 0 : index ] else : raise ValueError ( 'Unable to parse document collection link from ' + link )
Gets the document collection link
71
6
223,557
def IndexOfNth ( s , value , n ) : remaining = n for i in xrange ( 0 , len ( s ) ) : if s [ i ] == value : remaining -= 1 if remaining == 0 : return i return - 1
Gets the index of Nth occurance of a given character in a string
51
16
223,558
def TrimBeginningAndEndingSlashes ( path ) : if path . startswith ( '/' ) : # Returns substring starting from index 1 to end of the string path = path [ 1 : ] if path . endswith ( '/' ) : # Returns substring starting from beginning to last but one char in the string path = path [ : - 1 ] return path
Trims beginning and ending slashes
80
7
223,559
def _RequestBodyFromData ( data ) : if isinstance ( data , six . string_types ) or _IsReadableStream ( data ) : return data elif isinstance ( data , ( dict , list , tuple ) ) : json_dumped = json . dumps ( data , separators = ( ',' , ':' ) ) if six . PY2 : return json_dumped . decode ( 'utf-8' ) else : return json_dumped return None
Gets request body from data .
102
7
223,560
def _Request ( global_endpoint_manager , request , connection_policy , requests_session , path , request_options , request_body ) : is_media = request_options [ 'path' ] . find ( 'media' ) > - 1 is_media_stream = is_media and connection_policy . MediaReadMode == documents . MediaReadMode . Streamed connection_timeout = ( connection_policy . MediaRequestTimeout if is_media else connection_policy . RequestTimeout ) # Every request tries to perform a refresh global_endpoint_manager . refresh_endpoint_list ( None ) if ( request . endpoint_override ) : base_url = request . endpoint_override else : base_url = global_endpoint_manager . resolve_service_endpoint ( request ) if path : resource_url = base_url + path else : resource_url = base_url parse_result = urlparse ( resource_url ) # The requests library now expects header values to be strings only starting 2.11, # and will raise an error on validation if they are not, so casting all header values to strings. request_options [ 'headers' ] = { header : str ( value ) for header , value in request_options [ 'headers' ] . items ( ) } # We are disabling the SSL verification for local emulator(localhost/127.0.0.1) or if the user # has explicitly specified to disable SSL verification. is_ssl_enabled = ( parse_result . hostname != 'localhost' and parse_result . hostname != '127.0.0.1' and not connection_policy . DisableSSLVerification ) if connection_policy . SSLConfiguration : ca_certs = connection_policy . SSLConfiguration . SSLCaCerts cert_files = ( connection_policy . SSLConfiguration . SSLCertFile , connection_policy . SSLConfiguration . SSLKeyFile ) response = requests_session . request ( request_options [ 'method' ] , resource_url , data = request_body , headers = request_options [ 'headers' ] , timeout = connection_timeout / 1000.0 , stream = is_media_stream , verify = ca_certs , cert = cert_files ) else : response = requests_session . request ( request_options [ 'method' ] , resource_url , data = request_body , headers = request_options [ 'headers' ] , timeout = connection_timeout / 1000.0 , stream = is_media_stream , # If SSL is disabled, verify = false verify = is_ssl_enabled ) headers = dict ( response . headers ) # In case of media stream response, return the response to the user and the user # will need to handle reading the response. if is_media_stream : return ( response . raw , headers ) data = response . content if not six . PY2 : # python 3 compatible: convert data from byte to unicode string data = data . decode ( 'utf-8' ) if response . status_code >= 400 : raise errors . HTTPFailure ( response . status_code , data , headers ) result = None if is_media : result = data else : if len ( data ) > 0 : try : result = json . loads ( data ) except : raise errors . JSONParseFailure ( data ) return ( result , headers )
Makes one http request using the requests module .
711
10
223,561
def SynchronizedRequest ( client , request , global_endpoint_manager , connection_policy , requests_session , method , path , request_data , query_params , headers ) : request_body = None if request_data : request_body = _RequestBodyFromData ( request_data ) if not request_body : raise errors . UnexpectedDataType ( 'parameter data must be a JSON object, string or' + ' readable stream.' ) request_options = { } request_options [ 'path' ] = path request_options [ 'method' ] = method if query_params : request_options [ 'path' ] += '?' + urlencode ( query_params ) request_options [ 'headers' ] = headers if request_body and ( type ( request_body ) is str or type ( request_body ) is six . text_type ) : request_options [ 'headers' ] [ http_constants . HttpHeaders . ContentLength ] = ( len ( request_body ) ) elif request_body is None : request_options [ 'headers' ] [ http_constants . HttpHeaders . ContentLength ] = 0 # Pass _Request function with it's parameters to retry_utility's Execute method that wraps the call with retries return retry_utility . _Execute ( client , global_endpoint_manager , _Request , request , connection_policy , requests_session , path , request_options , request_body )
Performs one synchronized http request according to the parameters .
319
11
223,562
def get_range_by_effective_partition_key ( self , effective_partition_key_value ) : if _CollectionRoutingMap . MinimumInclusiveEffectivePartitionKey == effective_partition_key_value : return self . _orderedPartitionKeyRanges [ 0 ] if _CollectionRoutingMap . MaximumExclusiveEffectivePartitionKey == effective_partition_key_value : return None sortedLow = [ ( r . min , not r . isMinInclusive ) for r in self . _orderedRanges ] index = bisect . bisect_right ( sortedLow , ( effective_partition_key_value , True ) ) if ( index > 0 ) : index = index - 1 return self . _orderedPartitionKeyRanges [ index ]
Gets the range containing the given partition key
167
9
223,563
def get_range_by_partition_key_range_id ( self , partition_key_range_id ) : t = self . _rangeById . get ( partition_key_range_id ) if t is None : return None return t [ 0 ]
Gets the partition key range given the partition key range id
57
12
223,564
def get_overlapping_ranges ( self , provided_partition_key_ranges ) : if isinstance ( provided_partition_key_ranges , routing_range . _Range ) : return self . get_overlapping_ranges ( [ provided_partition_key_ranges ] ) minToPartitionRange = { } sortedLow = [ ( r . min , not r . isMinInclusive ) for r in self . _orderedRanges ] sortedHigh = [ ( r . max , r . isMaxInclusive ) for r in self . _orderedRanges ] for providedRange in provided_partition_key_ranges : minIndex = bisect . bisect_right ( sortedLow , ( providedRange . min , not providedRange . isMinInclusive ) ) if minIndex > 0 : minIndex = minIndex - 1 maxIndex = bisect . bisect_left ( sortedHigh , ( providedRange . max , providedRange . isMaxInclusive ) ) if maxIndex >= len ( sortedHigh ) : maxIndex = maxIndex - 1 for i in xrange ( minIndex , maxIndex + 1 ) : if routing_range . _Range . overlaps ( self . _orderedRanges [ i ] , providedRange ) : minToPartitionRange [ self . _orderedPartitionKeyRanges [ i ] [ _PartitionKeyRange . MinInclusive ] ] = self . _orderedPartitionKeyRanges [ i ] overlapping_partition_key_ranges = list ( minToPartitionRange . values ( ) ) def getKey ( r ) : return r [ _PartitionKeyRange . MinInclusive ] overlapping_partition_key_ranges . sort ( key = getKey ) return overlapping_partition_key_ranges
Gets the partition key ranges overlapping the provided ranges
385
10
223,565
def GetAuthorizationHeader ( cosmos_client , verb , path , resource_id_or_fullname , is_name_based , resource_type , headers ) : # In the AuthorizationToken generation logic, lower casing of ResourceID is required as rest of the fields are lower cased # Lower casing should not be done for named based "ID", which should be used as is if resource_id_or_fullname is not None and not is_name_based : resource_id_or_fullname = resource_id_or_fullname . lower ( ) if cosmos_client . master_key : return __GetAuthorizationTokenUsingMasterKey ( verb , resource_id_or_fullname , resource_type , headers , cosmos_client . master_key ) elif cosmos_client . resource_tokens : return __GetAuthorizationTokenUsingResourceTokens ( cosmos_client . resource_tokens , path , resource_id_or_fullname )
Gets the authorization header .
207
6
223,566
def __GetAuthorizationTokenUsingMasterKey ( verb , resource_id_or_fullname , resource_type , headers , master_key ) : # decodes the master key which is encoded in base64 key = base64 . b64decode ( master_key ) # Skipping lower casing of resource_id_or_fullname since it may now contain "ID" of the resource as part of the fullname text = '{verb}\n{resource_type}\n{resource_id_or_fullname}\n{x_date}\n{http_date}\n' . format ( verb = ( verb . lower ( ) or '' ) , resource_type = ( resource_type . lower ( ) or '' ) , resource_id_or_fullname = ( resource_id_or_fullname or '' ) , x_date = headers . get ( http_constants . HttpHeaders . XDate , '' ) . lower ( ) , http_date = headers . get ( http_constants . HttpHeaders . HttpDate , '' ) . lower ( ) ) if six . PY2 : body = text . decode ( 'utf-8' ) digest = hmac . new ( key , body , sha256 ) . digest ( ) signature = digest . encode ( 'base64' ) else : # python 3 support body = text . encode ( 'utf-8' ) digest = hmac . new ( key , body , sha256 ) . digest ( ) signature = base64 . encodebytes ( digest ) . decode ( 'utf-8' ) master_token = 'master' token_version = '1.0' return 'type={type}&ver={ver}&sig={sig}' . format ( type = master_token , ver = token_version , sig = signature [ : - 1 ] )
Gets the authorization token using master_key .
400
10
223,567
def __GetAuthorizationTokenUsingResourceTokens ( resource_tokens , path , resource_id_or_fullname ) : if resource_tokens and len ( resource_tokens ) > 0 : # For database account access(through GetDatabaseAccount API), path and resource_id_or_fullname are '', # so in this case we return the first token to be used for creating the auth header as the service will accept any token in this case if not path and not resource_id_or_fullname : return next ( six . itervalues ( resource_tokens ) ) if resource_tokens . get ( resource_id_or_fullname ) : return resource_tokens [ resource_id_or_fullname ] else : path_parts = [ ] if path : path_parts = path . split ( '/' ) resource_types = [ 'dbs' , 'colls' , 'docs' , 'sprocs' , 'udfs' , 'triggers' , 'users' , 'permissions' , 'attachments' , 'media' , 'conflicts' , 'offers' ] # Get the last resource id or resource name from the path and get it's token from resource_tokens for one_part in reversed ( path_parts ) : if not one_part in resource_types and one_part in resource_tokens : return resource_tokens [ one_part ] return None
Get the authorization token using resource_tokens .
319
11
223,568
def parse_session_token ( response_headers ) : # extract session token from response header session_token = '' if http_constants . HttpHeaders . SessionToken in response_headers : session_token = response_headers [ http_constants . HttpHeaders . SessionToken ] id_to_sessionlsn = { } if session_token is not '' : ''' extract id, lsn from the token. For p-collection, the token will be a concatenation of pairs for each collection''' token_pairs = session_token . split ( ',' ) for token_pair in token_pairs : tokens = token_pair . split ( ':' ) if ( len ( tokens ) == 2 ) : id = tokens [ 0 ] sessionToken = VectorSessionToken . create ( tokens [ 1 ] ) if sessionToken is None : raise HTTPFailure ( http_constants . StatusCodes . INTERNAL_SERVER_ERROR , "Could not parse the received session token: %s" % tokens [ 1 ] ) id_to_sessionlsn [ id ] = sessionToken return id_to_sessionlsn
Extracts session token from response headers and parses
244
11
223,569
def generate_vector_color_map ( self ) : vector_stops = [ ] # if join data specified as filename or URL, parse JSON to list of Python dicts if type ( self . data ) == str : self . data = geojson_to_dict_list ( self . data ) # loop through features in self.data to create join-data map for row in self . data : # map color to JSON feature using color_property color = color_map ( row [ self . color_property ] , self . color_stops , self . color_default ) # link to vector feature using data_join_property (from JSON object) vector_stops . append ( [ row [ self . data_join_property ] , color ] ) return vector_stops
Generate color stops array for use with match expression in mapbox template
167
14
223,570
def generate_vector_numeric_map ( self , numeric_property ) : vector_stops = [ ] function_type = getattr ( self , '{}_function_type' . format ( numeric_property ) ) lookup_property = getattr ( self , '{}_property' . format ( numeric_property ) ) numeric_stops = getattr ( self , '{}_stops' . format ( numeric_property ) ) default = getattr ( self , '{}_default' . format ( numeric_property ) ) if function_type == 'match' : match_width = numeric_stops # if join data specified as filename or URL, parse JSON to list of Python dicts if type ( self . data ) == str : self . data = geojson_to_dict_list ( self . data ) for row in self . data : # map value to JSON feature using the numeric property value = numeric_map ( row [ lookup_property ] , numeric_stops , default ) # link to vector feature using data_join_property (from JSON object) vector_stops . append ( [ row [ self . data_join_property ] , value ] ) return vector_stops
Generate stops array for use with match expression in mapbox template
261
13
223,571
def check_vector_template ( self ) : if self . vector_url is not None and self . vector_layer_name is not None : self . template = 'vector_' + self . template self . vector_source = True else : self . vector_source = False
Determines if features are defined as vector source based on MapViz arguments .
59
17
223,572
def as_iframe ( self , html_data ) : srcdoc = html_data . replace ( '"' , "'" ) return ( '<iframe id="{div_id}", srcdoc="{srcdoc}" style="width: {width}; ' 'height: {height};"></iframe>' . format ( div_id = self . div_id , srcdoc = srcdoc , width = self . width , height = self . height ) )
Build the HTML representation for the mapviz .
97
10
223,573
def add_unique_template_variables ( self , options ) : options . update ( dict ( geojson_data = json . dumps ( self . data , ensure_ascii = False ) , colorProperty = self . color_property , colorType = self . color_function_type , colorStops = self . color_stops , strokeWidth = self . stroke_width , strokeColor = self . stroke_color , radius = self . radius , defaultColor = self . color_default , highlightColor = self . highlight_color ) ) if self . vector_source : options . update ( vectorColorStops = self . generate_vector_color_map ( ) )
Update map template variables specific to circle visual
146
8
223,574
def add_unique_template_variables ( self , options ) : options . update ( dict ( colorProperty = self . color_property , colorStops = self . color_stops , colorType = self . color_function_type , radiusType = self . radius_function_type , defaultColor = self . color_default , defaultRadius = self . radius_default , radiusProperty = self . radius_property , radiusStops = self . radius_stops , strokeWidth = self . stroke_width , strokeColor = self . stroke_color , highlightColor = self . highlight_color ) ) if self . vector_source : options . update ( dict ( vectorColorStops = self . generate_vector_color_map ( ) , vectorRadiusStops = self . generate_vector_numeric_map ( 'radius' ) ) )
Update map template variables specific to graduated circle visual
183
9
223,575
def add_unique_template_variables ( self , options ) : options . update ( dict ( colorStops = self . color_stops , colorDefault = self . color_default , radiusStops = self . radius_stops , clusterRadius = self . clusterRadius , clusterMaxZoom = self . clusterMaxZoom , strokeWidth = self . stroke_width , strokeColor = self . stroke_color , radiusDefault = self . radius_default , highlightColor = self . highlight_color ) )
Update map template variables specific to a clustered circle visual
111
10
223,576
def add_unique_template_variables ( self , options ) : options . update ( dict ( image = self . image , coordinates = self . coordinates ) )
Update map template variables specific to image visual
34
8
223,577
def add_unique_template_variables ( self , options ) : options . update ( dict ( tiles_url = self . tiles_url , tiles_size = self . tiles_size , tiles_minzoom = self . tiles_minzoom , tiles_maxzoom = self . tiles_maxzoom , tiles_bounds = self . tiles_bounds if self . tiles_bounds else 'undefined' ) )
Update map template variables specific to a raster visual
94
10
223,578
def add_unique_template_variables ( self , options ) : # set line stroke dash interval based on line_stroke property if self . line_stroke in [ "dashed" , "--" ] : self . line_dash_array = [ 6 , 4 ] elif self . line_stroke in [ "dotted" , ":" ] : self . line_dash_array = [ 0.5 , 4 ] elif self . line_stroke in [ "dash dot" , "-." ] : self . line_dash_array = [ 6 , 4 , 0.5 , 4 ] elif self . line_stroke in [ "solid" , "-" ] : self . line_dash_array = [ 1 , 0 ] else : # default to solid line self . line_dash_array = [ 1 , 0 ] # common variables for vector and geojson-based linestring maps options . update ( dict ( colorStops = self . color_stops , colorProperty = self . color_property , colorType = self . color_function_type , defaultColor = self . color_default , lineColor = self . color_default , lineDashArray = self . line_dash_array , lineStroke = self . line_stroke , widthStops = self . line_width_stops , widthProperty = self . line_width_property , widthType = self . line_width_function_type , defaultWidth = self . line_width_default , highlightColor = self . highlight_color ) ) # vector-based linestring map variables if self . vector_source : options . update ( dict ( vectorColorStops = [ [ 0 , self . color_default ] ] , vectorWidthStops = [ [ 0 , self . line_width_default ] ] , ) ) if self . color_property : options . update ( vectorColorStops = self . generate_vector_color_map ( ) ) if self . line_width_property : options . update ( vectorWidthStops = self . generate_vector_numeric_map ( 'line_width' ) ) # geojson-based linestring map variables else : options . update ( geojson_data = json . dumps ( self . data , ensure_ascii = False ) )
Update map template variables specific to linestring visual
493
10
223,579
def row_to_geojson ( row , lon , lat , precision , date_format = 'epoch' ) : # Let pandas handle json serialization row_json = json . loads ( row . to_json ( date_format = date_format , date_unit = 's' ) ) return geojson . Feature ( geometry = geojson . Point ( ( round ( row_json [ lon ] , precision ) , round ( row_json [ lat ] , precision ) ) ) , properties = { key : row_json [ key ] for key in row_json . keys ( ) if key not in [ lon , lat ] } )
Convert a pandas dataframe row to a geojson format object . Converts all datetimes to epoch seconds .
143
25
223,580
def scale_between ( minval , maxval , numStops ) : scale = [ ] if numStops < 2 : return [ minval , maxval ] elif maxval < minval : raise ValueError ( ) else : domain = maxval - minval interval = float ( domain ) / float ( numStops ) for i in range ( numStops ) : scale . append ( round ( minval + interval * i , 2 ) ) return scale
Scale a min and max value to equal interval domain with numStops discrete values
98
16
223,581
def create_radius_stops ( breaks , min_radius , max_radius ) : num_breaks = len ( breaks ) radius_breaks = scale_between ( min_radius , max_radius , num_breaks ) stops = [ ] for i , b in enumerate ( breaks ) : stops . append ( [ b , radius_breaks [ i ] ] ) return stops
Convert a data breaks into a radius ramp
79
9
223,582
def create_weight_stops ( breaks ) : num_breaks = len ( breaks ) weight_breaks = scale_between ( 0 , 1 , num_breaks ) stops = [ ] for i , b in enumerate ( breaks ) : stops . append ( [ b , weight_breaks [ i ] ] ) return stops
Convert data breaks into a heatmap - weight ramp
67
11
223,583
def create_color_stops ( breaks , colors = 'RdYlGn' , color_ramps = color_ramps ) : num_breaks = len ( breaks ) stops = [ ] if isinstance ( colors , list ) : # Check if colors contain a list of color values if len ( colors ) == 0 or len ( colors ) != num_breaks : raise ValueError ( 'custom color list must be of same length as breaks list' ) for color in colors : # Check if color is valid string try : Colour ( color ) except : raise ValueError ( 'The color code {color} is in the wrong format' . format ( color = color ) ) for i , b in enumerate ( breaks ) : stops . append ( [ b , colors [ i ] ] ) else : if colors not in color_ramps . keys ( ) : raise ValueError ( 'color does not exist in colorBrewer!' ) else : try : ramp = color_ramps [ colors ] [ num_breaks ] except KeyError : raise ValueError ( "Color ramp {} does not have a {} breaks" . format ( colors , num_breaks ) ) for i , b in enumerate ( breaks ) : stops . append ( [ b , ramp [ i ] ] ) return stops
Convert a list of breaks into color stops using colors from colorBrewer or a custom list of color values in RGB RGBA HSL CSS text or HEX format . See www . colorbrewer2 . org for a list of color options to pass
270
52
223,584
def numeric_map ( lookup , numeric_stops , default = 0.0 ) : # if no numeric_stops, use default if len ( numeric_stops ) == 0 : return default # dictionary to lookup value from match-type numeric_stops match_map = dict ( ( x , y ) for ( x , y ) in numeric_stops ) # if lookup matches stop exactly, return corresponding stop (first priority) # (includes non-numeric numeric_stop "keys" for finding value by match) if lookup in match_map . keys ( ) : return match_map . get ( lookup ) # if lookup value numeric, map value by interpolating from scale if isinstance ( lookup , ( int , float , complex ) ) : # try ordering stops try : stops , values = zip ( * sorted ( numeric_stops ) ) # if not all stops are numeric, attempt looking up as if categorical stops except TypeError : return match_map . get ( lookup , default ) # for interpolation, all stops must be numeric if not all ( isinstance ( x , ( int , float , complex ) ) for x in stops ) : return default # check if lookup value in stops bounds if float ( lookup ) <= stops [ 0 ] : return values [ 0 ] elif float ( lookup ) >= stops [ - 1 ] : return values [ - 1 ] # check if lookup value matches any stop value elif float ( lookup ) in stops : return values [ stops . index ( lookup ) ] # interpolation required else : # identify bounding stop values lower = max ( [ stops [ 0 ] ] + [ x for x in stops if x < lookup ] ) upper = min ( [ stops [ - 1 ] ] + [ x for x in stops if x > lookup ] ) # values from bounding stops lower_value = values [ stops . index ( lower ) ] upper_value = values [ stops . index ( upper ) ] # compute linear "relative distance" from lower bound to upper bound distance = ( lookup - lower ) / ( upper - lower ) # return interpolated value return lower_value + distance * ( upper_value - lower_value ) # default value catch-all return default
Return a number value interpolated from given numeric_stops
463
12
223,585
def load_yaml_from_docstring ( docstring ) : split_lines = trim_docstring ( docstring ) . split ( "\n" ) # Cut YAML from rest of docstring for index , line in enumerate ( split_lines ) : line = line . strip ( ) if line . startswith ( "---" ) : cut_from = index break else : return { } yaml_string = "\n" . join ( split_lines [ cut_from : ] ) yaml_string = dedent ( yaml_string ) return yaml . safe_load ( yaml_string ) or { }
Loads YAML from docstring .
137
9
223,586
def load_operations_from_docstring ( docstring ) : doc_data = load_yaml_from_docstring ( docstring ) return { key : val for key , val in iteritems ( doc_data ) if key in PATH_KEYS or key . startswith ( "x-" ) }
Return a dictionary of OpenAPI operations parsed from a a docstring .
68
14
223,587
def get_fields ( schema , exclude_dump_only = False ) : if hasattr ( schema , "fields" ) : fields = schema . fields elif hasattr ( schema , "_declared_fields" ) : fields = copy . deepcopy ( schema . _declared_fields ) else : raise ValueError ( "{!r} doesn't have either `fields` or `_declared_fields`." . format ( schema ) ) Meta = getattr ( schema , "Meta" , None ) warn_if_fields_defined_in_meta ( fields , Meta ) return filter_excluded_fields ( fields , Meta , exclude_dump_only )
Return fields from schema
141
4
223,588
def warn_if_fields_defined_in_meta ( fields , Meta ) : if getattr ( Meta , "fields" , None ) or getattr ( Meta , "additional" , None ) : declared_fields = set ( fields . keys ( ) ) if ( set ( getattr ( Meta , "fields" , set ( ) ) ) > declared_fields or set ( getattr ( Meta , "additional" , set ( ) ) ) > declared_fields ) : warnings . warn ( "Only explicitly-declared fields will be included in the Schema Object. " "Fields defined in Meta.fields or Meta.additional are ignored." )
Warns user that fields defined in Meta . fields or Meta . additional will be ignored
140
18
223,589
def filter_excluded_fields ( fields , Meta , exclude_dump_only ) : exclude = list ( getattr ( Meta , "exclude" , [ ] ) ) if exclude_dump_only : exclude . extend ( getattr ( Meta , "dump_only" , [ ] ) ) filtered_fields = OrderedDict ( ( key , value ) for key , value in fields . items ( ) if key not in exclude ) return filtered_fields
Filter fields that should be ignored in the OpenAPI spec
97
11
223,590
def get_unique_schema_name ( components , name , counter = 0 ) : if name not in components . _schemas : return name if not counter : # first time through recursion warnings . warn ( "Multiple schemas resolved to the name {}. The name has been modified. " "Either manually add each of the schemas with a different name or " "provide a custom schema_name_resolver." . format ( name ) , UserWarning , ) else : # subsequent recursions name = name [ : - len ( str ( counter ) ) ] counter += 1 return get_unique_schema_name ( components , name + str ( counter ) , counter )
Function to generate a unique name based on the provided name and names already in the spec . Will append a number to the name to make it unique if the name is already in the spec .
144
38
223,591
def build_reference ( component_type , openapi_major_version , component_name ) : return { "$ref" : "#/{}{}/{}" . format ( "components/" if openapi_major_version >= 3 else "" , COMPONENT_SUBSECTIONS [ openapi_major_version ] [ component_type ] , component_name , ) }
Return path to reference
82
4
223,592
def deepupdate ( original , update ) : for key , value in original . items ( ) : if key not in update : update [ key ] = value elif isinstance ( value , dict ) : deepupdate ( value , update [ key ] ) return update
Recursively update a dict .
54
7
223,593
def _observed_name ( field , name ) : if MARSHMALLOW_VERSION_INFO [ 0 ] < 3 : # use getattr in case we're running against older versions of marshmallow. dump_to = getattr ( field , "dump_to" , None ) load_from = getattr ( field , "load_from" , None ) return dump_to or load_from or name return field . data_key or name
Adjust field name to reflect dump_to and load_from attributes .
96
14
223,594
def map_to_openapi_type ( self , * args ) : if len ( args ) == 1 and args [ 0 ] in self . field_mapping : openapi_type_field = self . field_mapping [ args [ 0 ] ] elif len ( args ) == 2 : openapi_type_field = args else : raise TypeError ( "Pass core marshmallow field type or (type, fmt) pair." ) def inner ( field_type ) : self . field_mapping [ field_type ] = openapi_type_field return field_type return inner
Decorator to set mapping for custom fields .
126
10
223,595
def field2type_and_format ( self , field ) : # If this type isn't directly in the field mapping then check the # hierarchy until we find something that does. for field_class in type ( field ) . __mro__ : if field_class in self . field_mapping : type_ , fmt = self . field_mapping [ field_class ] break else : warnings . warn ( "Field of type {} does not inherit from marshmallow.Field." . format ( type ( field ) ) , UserWarning , ) type_ , fmt = "string" , None ret = { "type" : type_ } if fmt : ret [ "format" ] = fmt return ret
Return the dictionary of OpenAPI type and format based on the field type
147
14
223,596
def field2default ( self , field ) : ret = { } if "doc_default" in field . metadata : ret [ "default" ] = field . metadata [ "doc_default" ] else : default = field . missing if default is not marshmallow . missing and not callable ( default ) : ret [ "default" ] = default return ret
Return the dictionary containing the field s default value
75
9
223,597
def field2choices ( self , field , * * kwargs ) : attributes = { } comparable = [ validator . comparable for validator in field . validators if hasattr ( validator , "comparable" ) ] if comparable : attributes [ "enum" ] = comparable else : choices = [ OrderedSet ( validator . choices ) for validator in field . validators if hasattr ( validator , "choices" ) ] if choices : attributes [ "enum" ] = list ( functools . reduce ( operator . and_ , choices ) ) return attributes
Return the dictionary of OpenAPI field attributes for valid choices definition
123
12
223,598
def field2read_only ( self , field , * * kwargs ) : attributes = { } if field . dump_only : attributes [ "readOnly" ] = True return attributes
Return the dictionary of OpenAPI field attributes for a dump_only field .
40
15
223,599
def field2write_only ( self , field , * * kwargs ) : attributes = { } if field . load_only and self . openapi_version . major >= 3 : attributes [ "writeOnly" ] = True return attributes
Return the dictionary of OpenAPI field attributes for a load_only field .
51
15