idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
223,400
def _path_column ( self ) : path_column = self . _csv_kwargs . get ( 'include_path_column' ) if path_column is None : # if path column name is not set by user, set to a unique string to # avoid conflicts path_column = unique_string ( ) self . _csv_kwargs [ 'include_path_column' ] = path_column elif isinstance ( path_column , bool ) : path_column = 'path' self . _csv_kwargs [ 'include_path_column' ] = path_column return path_column
Set include_path_column in csv_kwargs and returns path column name
130
17
223,401
def _open_dataset ( self , urlpath ) : import dask . dataframe if self . pattern is None : self . _dataframe = dask . dataframe . read_csv ( urlpath , storage_options = self . _storage_options , * * self . _csv_kwargs ) return if not ( DASK_VERSION >= '0.19.0' ) : raise ValueError ( "Your version of dask is '{}'. " "The ability to include filenames in read_csv output " "(``include_path_column``) was added in 0.19.0, so " "pattern urlpaths are not supported." . format ( DASK_VERSION ) ) drop_path_column = 'include_path_column' not in self . _csv_kwargs path_column = self . _path_column ( ) self . _dataframe = dask . dataframe . read_csv ( urlpath , storage_options = self . _storage_options , * * self . _csv_kwargs ) # add the new columns to the dataframe self . _set_pattern_columns ( path_column ) if drop_path_column : self . _dataframe = self . _dataframe . drop ( [ path_column ] , axis = 1 )
Open dataset using dask and use pattern fields to set new columns
283
13
223,402
def do_search ( self , arg = None ) : new_cats = [ ] for cat in self . cats : new_cat = cat . search ( self . inputs . text , depth = self . inputs . depth ) if len ( list ( new_cat ) ) > 0 : new_cats . append ( new_cat ) if len ( new_cats ) > 0 : self . done_callback ( new_cats ) self . visible = False
Do search and close panel
95
5
223,403
def _persist ( source , path , component = None , storage_options = None , * * kwargs ) : from dask . array import to_zarr , from_array from . . source . zarr import ZarrArraySource try : arr = source . to_dask ( ) except NotImplementedError : arr = from_array ( source . read ( ) , chunks = - 1 ) . rechunk ( 'auto' ) to_zarr ( arr , path , component = None , storage_options = storage_options , * * kwargs ) source = ZarrArraySource ( path , storage_options , component ) return source
Save array to local persistent store
141
6
223,404
def source ( self , source ) : BaseView . source . fset ( self , source ) if self . select : self . select . options = self . options
When the source gets updated update the the options in the selector
34
12
223,405
def get_file ( f , decoder , read ) : with f as f : if decoder is None : return list ( f ) else : d = f . read ( ) if read else f out = decoder ( d ) if isinstance ( out , ( tuple , list ) ) : return out else : return [ out ]
Serializable function to take an OpenFile object and read lines
70
12
223,406
def get_case_insensitive ( self , dictionary , key , default = None ) : lower_key = key . lower ( ) for k , v in dictionary . items ( ) : if lower_key == k . lower ( ) : return v else : return default
Case - insensitive search of a dictionary for key .
56
10
223,407
def url ( self ) : return os . path . join ( self . path , self . main . value [ 0 ] )
Path to local catalog file
26
5
223,408
def validate ( self , arg = None ) : if os . path . isdir ( self . path ) : self . validator . object = None else : self . validator . object = ICONS [ 'error' ]
Check that inputted path is valid - set validator accordingly
48
12
223,409
def add_cat ( self , arg = None ) : try : self . done_callback ( self . cat ) self . visible = False except Exception as e : self . validator . object = ICONS [ 'error' ] raise e
Add cat and close panel
51
5
223,410
def tab_change ( self , event ) : self . remove_error ( ) if event . new == 1 : self . widget . disabled = False
When tab changes remove error and enable widget if on url tab
31
12
223,411
def callback ( self , cats ) : enable = bool ( cats ) if not enable : # close search if it is visible self . search . visible = False enable_widget ( self . search_widget , enable ) enable_widget ( self . remove_widget , enable ) if self . done_callback : self . done_callback ( cats )
When a catalog is selected enable widgets that depend on that condition and do done_callback
71
17
223,412
def on_click_search_widget ( self , event ) : self . search . cats = self . cats self . search . visible = event . new if self . search . visible : self . search . watchers . append ( self . select . widget . link ( self . search , value = 'cats' ) )
When the search control is toggled set visibility and hand down cats
67
14
223,413
def no_duplicates_constructor ( loader , node , deep = False ) : mapping = { } for key_node , value_node in node . value : key = loader . construct_object ( key_node , deep = deep ) value = loader . construct_object ( value_node , deep = deep ) if key in mapping : from intake . catalog . exceptions import DuplicateKeyError raise DuplicateKeyError ( "while constructing a mapping" , node . start_mark , "found duplicate key (%s)" % key , key_node . start_mark ) mapping [ key ] = value return loader . construct_mapping ( node , deep )
Check for duplicate keys while loading YAML
139
9
223,414
def classname ( ob ) : import inspect if inspect . isclass ( ob ) : return '.' . join ( [ ob . __module__ , ob . __name__ ] ) else : return '.' . join ( [ ob . __class__ . __module__ , ob . __class__ . __name__ ] )
Get the object s class s name as package . module . Class
68
13
223,415
def pretty_describe ( object , nestedness = 0 , indent = 2 ) : if not isinstance ( object , dict ) : return str ( object ) sep = f'\n{" " * nestedness * indent}' out = sep . join ( ( f'{k}: {pretty_describe(v, nestedness + 1)}' for k , v in object . items ( ) ) ) if nestedness > 0 and out : return f'{sep}{out}' return out
Maintain dict ordering - but make string version prettier
106
11
223,416
def add ( self , * args , * * kwargs ) : return self . cat . select . add ( * args , * * kwargs )
Add to list of cats
33
5
223,417
def coerce_to_list ( items , preprocess = None ) : if not isinstance ( items , list ) : items = [ items ] if preprocess : items = list ( map ( preprocess , items ) ) return items
Given an instance or list coerce to list .
49
10
223,418
def _repr_mimebundle_ ( self , * args , * * kwargs ) : try : if self . logo : p = pn . Row ( self . logo_panel , self . panel , margin = 0 ) return p . _repr_mimebundle_ ( * args , * * kwargs ) else : return self . panel . _repr_mimebundle_ ( * args , * * kwargs ) except : raise RuntimeError ( "Panel does not seem to be set up properly" )
Display in a notebook or a server
117
7
223,419
def unwatch ( self ) : if self . watchers is not None : unwatched = [ ] for watcher in self . watchers : watcher . inst . param . unwatch ( watcher ) unwatched . append ( watcher ) self . watchers = [ w for w in self . watchers if w not in unwatched ]
Get rid of any lingering watchers and remove from list
72
11
223,420
def _create_options ( self , items ) : return OrderedDict ( map ( lambda x : ( x . name , x ) , coerce_to_list ( items , self . preprocess ) ) )
Helper method to create options from list or instance .
46
10
223,421
def options ( self , new ) : options = self . _create_options ( new ) if self . widget . value : self . widget . set_param ( options = options , value = list ( options . values ( ) ) [ : 1 ] ) else : self . widget . options = options self . widget . value = list ( options . values ( ) ) [ : 1 ]
Set options from list or instance of named item
80
9
223,422
def add ( self , items ) : options = self . _create_options ( items ) for k , v in options . items ( ) : if k in self . labels and v not in self . items : options . pop ( k ) count = 0 while f'{k}_{count}' in self . labels : count += 1 options [ f'{k}_{count}' ] = v self . widget . options . update ( options ) self . widget . param . trigger ( 'options' ) self . widget . value = list ( options . values ( ) ) [ : 1 ]
Add items to options
125
4
223,423
def remove ( self , items ) : items = coerce_to_list ( items ) new_options = { k : v for k , v in self . options . items ( ) if v not in items } self . widget . options = new_options self . widget . param . trigger ( 'options' )
Remove items from options
66
4
223,424
def selected ( self , new ) : def preprocess ( item ) : if isinstance ( item , str ) : return self . options [ item ] return item items = coerce_to_list ( new , preprocess ) self . widget . value = items
Set selected from list or instance of object or name .
54
11
223,425
def source ( self , source ) : if isinstance ( source , list ) : # if source is a list, get first item or None source = source [ 0 ] if len ( source ) > 0 else None self . _source = source
When the source gets updated update the select widget
50
9
223,426
def callback ( self , sources ) : enable = bool ( sources ) if not enable : self . plot_widget . value = False enable_widget ( self . plot_widget , enable ) if self . done_callback : self . done_callback ( sources )
When a source is selected enable widgets that depend on that condition and do done_callback
54
17
223,427
def on_click_plot_widget ( self , event ) : self . plot . source = self . sources self . plot . visible = event . new if self . plot . visible : self . plot . watchers . append ( self . select . widget . link ( self . plot , value = 'source' ) )
When the plot control is toggled set visibility and hand down source
67
14
223,428
def sanitize_path ( path ) : storage_option = infer_storage_options ( path ) protocol = storage_option [ 'protocol' ] if protocol in ( 'http' , 'https' ) : # Most FSs remove the protocol but not HTTPFS. We need to strip # it to match properly. path = os . path . normpath ( path . replace ( "{}://" . format ( protocol ) , '' ) ) elif protocol == 'file' : # Remove trailing slashes from file paths. path = os . path . normpath ( path ) # Remove colons path = path . replace ( ':' , '' ) # Otherwise we just make sure that path is posix return make_path_posix ( path )
Utility for cleaning up paths .
158
7
223,429
def _download ( file_in , file_out , blocksize , output = False ) : with warnings . catch_warnings ( ) : warnings . filterwarnings ( 'ignore' ) if output : try : from tqdm . autonotebook import tqdm except ImportError : logger . warn ( "Cache progress bar requires tqdm to be installed:" " conda/pip install tqdm" ) output = False if output : try : file_size = file_in . fs . size ( file_in . path ) pbar_disabled = False except ValueError as err : logger . debug ( "File system error requesting size: {}" . format ( err ) ) file_size = 0 pbar_disabled = True for i in range ( 100 ) : if i not in display : display . add ( i ) out = i break pbar = tqdm ( total = file_size // 2 ** 20 , leave = False , disable = pbar_disabled , position = out , desc = os . path . basename ( file_out . path ) , mininterval = 0.1 , bar_format = r'{n}/|/{l_bar}' ) logger . debug ( "Caching {}" . format ( file_in . path ) ) with file_in as f1 : with file_out as f2 : data = True while data : data = f1 . read ( blocksize ) f2 . write ( data ) if output : pbar . update ( len ( data ) // 2 ** 20 ) if output : try : pbar . update ( pbar . total - pbar . n ) # force to full pbar . close ( ) except Exception as e : logger . debug ( 'tqdm exception: %s' % e ) finally : display . remove ( out )
Read from input and write to output file in blocks
391
10
223,430
def make_caches ( driver , specs , catdir = None , cache_dir = None , storage_options = { } ) : if specs is None : return [ ] return [ registry . get ( spec [ 'type' ] , FileCache ) ( driver , spec , catdir = catdir , cache_dir = cache_dir , storage_options = storage_options ) for spec in specs ]
Creates Cache objects from the cache_specs provided in the catalog yaml file
85
17
223,431
def load ( self , urlpath , output = None , * * kwargs ) : if conf . get ( 'cache_disabled' , False ) : return [ urlpath ] self . output = output if output is not None else conf . get ( 'cache_download_progress' , True ) cache_paths = self . _from_metadata ( urlpath ) if cache_paths is None : files_in , files_out = self . _make_files ( urlpath ) self . _load ( files_in , files_out , urlpath ) cache_paths = self . _from_metadata ( urlpath ) return cache_paths
Downloads data from a given url generates a hashed filename logs metadata and caches it locally .
140
19
223,432
def _load ( self , files_in , files_out , urlpath , meta = True ) : import dask out = [ ] outnames = [ ] for file_in , file_out in zip ( files_in , files_out ) : cache_path = file_out . path outnames . append ( cache_path ) # If `_munge_path` did not find a match we want to avoid # writing to the urlpath. if cache_path == urlpath : continue if not os . path . isfile ( cache_path ) : logger . debug ( "Caching file: {}" . format ( file_in . path ) ) logger . debug ( "Original path: {}" . format ( urlpath ) ) logger . debug ( "Cached at: {}" . format ( cache_path ) ) if meta : self . _log_metadata ( urlpath , file_in . path , cache_path ) ddown = dask . delayed ( _download ) out . append ( ddown ( file_in , file_out , self . blocksize , self . output ) ) dask . compute ( * out ) return outnames
Download a set of files
247
5
223,433
def clear_cache ( self , urlpath ) : cache_entries = self . _metadata . pop ( urlpath , [ ] ) # ignore if missing for cache_entry in cache_entries : try : os . remove ( cache_entry [ 'cache_path' ] ) except ( OSError , IOError ) : pass try : fn = os . path . dirname ( cache_entry [ 'cache_path' ] ) os . rmdir ( fn ) except ( OSError , IOError ) : logger . debug ( "Failed to remove cache directory: %s" % fn )
Clears cache and metadata for a given urlpath .
131
11
223,434
def clear_all ( self ) : for urlpath in self . _metadata . keys ( ) : self . clear_cache ( urlpath ) # Safely clean up anything else. if not os . path . isdir ( self . _cache_dir ) : return for subdir in os . listdir ( self . _cache_dir ) : try : fn = posixpath . join ( self . _cache_dir , subdir ) if os . path . isdir ( fn ) : shutil . rmtree ( fn ) if os . path . isfile ( fn ) : os . remove ( fn ) except ( OSError , IOError ) as e : logger . warning ( str ( e ) )
Clears all cache and metadata .
152
7
223,435
def write_json ( dictionary , filename ) : with open ( filename , 'w' ) as data_file : json . dump ( dictionary , data_file , indent = 4 , sort_keys = True ) print ( '--> Wrote ' + os . path . basename ( filename ) )
Write dictionary to JSON
62
4
223,436
def compare ( dicts ) : common_members = { } common_keys = reduce ( lambda x , y : x & y , map ( dict . keys , dicts ) ) for k in common_keys : common_members [ k ] = list ( reduce ( lambda x , y : x & y , [ set ( d [ k ] ) for d in dicts ] ) ) return common_members
Compare by iteration
85
3
223,437
def sort_common_members ( ) : filename = PREFIX + '/common_members.json' sorted_json_data = { } json_data = read_json ( filename ) all_keys = [ ] for key , value in json_data . items ( ) : all_keys . append ( key ) sorted_keys = sorted ( all_keys ) for key in sorted_keys : if len ( json_data [ key ] ) > 0 : # Only add modules which have common members sorted_json_data [ key ] = sorted ( json_data [ key ] ) print ( '--> Sorted/cleaned ' + os . path . basename ( filename ) ) write_json ( sorted_json_data , filename )
Sorts the keys and members
155
6
223,438
def generate_common_members ( ) : pyside = read_json ( PREFIX + '/PySide.json' ) pyside2 = read_json ( PREFIX + '/PySide2.json' ) pyqt4 = read_json ( PREFIX + '/PyQt4.json' ) pyqt5 = read_json ( PREFIX + '/PyQt5.json' ) dicts = [ pyside , pyside2 , pyqt4 , pyqt5 ] common_members = compare ( dicts ) write_json ( common_members , PREFIX + '/common_members.json' )
Generate JSON with commonly shared members
136
7
223,439
def parse ( fname ) : blocks = list ( ) with io . open ( fname , "r" , encoding = "utf-8" ) as f : in_block = False current_block = None current_header = "" for line in f : # Doctests are within a quadruple hashtag header. if line . startswith ( "#### " ) : current_header = line . rstrip ( ) # The actuat test is within a fenced block. if line . startswith ( "```" ) : in_block = False if in_block : current_block . append ( line ) if line . startswith ( "```python" ) : in_block = True current_block = list ( ) current_block . append ( current_header ) blocks . append ( current_block ) tests = list ( ) for block in blocks : header = ( block [ 0 ] . strip ( "# " ) # Remove Markdown . rstrip ( ) # Remove newline . lower ( ) # PEP08 ) # Remove unsupported characters header = re . sub ( r"\W" , "_" , header ) # Adding "untested" anywhere in the first line of # the doctest excludes it from the test. if "untested" in block [ 1 ] . lower ( ) : continue data = re . sub ( " " , "" , block [ 1 ] ) # Remove spaces data = ( data . strip ( "#" ) . rstrip ( ) # Remove newline . split ( "," ) ) binding , doctest_version = ( data + [ None ] ) [ : 2 ] # Run tests on both Python 2 and 3, unless explicitly stated if doctest_version is not None : if doctest_version not in ( "Python2" , "Python3" ) : raise SyntaxError ( "Invalid Python version:\n%s\n" "Python version must follow binding, e.g.\n" "# PyQt5, Python3" % doctest_version ) active_version = "Python%i" % sys . version_info [ 0 ] if doctest_version != active_version : continue tests . append ( { "header" : header , "binding" : binding , "body" : block [ 2 : ] } ) return tests
Return blocks of code as list of dicts
487
9
223,440
def _qInstallMessageHandler ( handler ) : def messageOutputHandler ( * args ) : # In Qt4 bindings, message handlers are passed 2 arguments # In Qt5 bindings, message handlers are passed 3 arguments # The first argument is a QtMsgType # The last argument is the message to be printed # The Middle argument (if passed) is a QMessageLogContext if len ( args ) == 3 : msgType , logContext , msg = args elif len ( args ) == 2 : msgType , msg = args logContext = None else : raise TypeError ( "handler expected 2 or 3 arguments, got {0}" . format ( len ( args ) ) ) if isinstance ( msg , bytes ) : # In python 3, some bindings pass a bytestring, which cannot be # used elsewhere. Decoding a python 2 or 3 bytestring object will # consistently return a unicode object. msg = msg . decode ( ) handler ( msgType , logContext , msg ) passObject = messageOutputHandler if handler else handler if Qt . IsPySide or Qt . IsPyQt4 : return Qt . _QtCore . qInstallMsgHandler ( passObject ) elif Qt . IsPySide2 or Qt . IsPyQt5 : return Qt . _QtCore . qInstallMessageHandler ( passObject )
Install a message handler that works in all bindings
279
9
223,441
def _import_sub_module ( module , name ) : module = __import__ ( module . __name__ + "." + name ) for level in name . split ( "." ) : module = getattr ( module , level ) return module
import_sub_module will mimic the function of importlib . import_module
52
16
223,442
def _setup ( module , extras ) : Qt . __binding__ = module . __name__ for name in list ( _common_members ) + extras : try : submodule = _import_sub_module ( module , name ) except ImportError : try : # For extra modules like sip and shiboken that may not be # children of the binding. submodule = __import__ ( name ) except ImportError : continue setattr ( Qt , "_" + name , submodule ) if name not in extras : # Store reference to original binding, # but don't store speciality modules # such as uic or QtUiTools setattr ( Qt , name , _new_module ( name ) )
Install common submodules
148
4
223,443
def _build_compatibility_members ( binding , decorators = None ) : decorators = decorators or dict ( ) # Allow optional site-level customization of the compatibility members. # This method does not need to be implemented in QtSiteConfig. try : import QtSiteConfig except ImportError : pass else : if hasattr ( QtSiteConfig , 'update_compatibility_decorators' ) : QtSiteConfig . update_compatibility_decorators ( binding , decorators ) _QtCompat = type ( "QtCompat" , ( object , ) , { } ) for classname , bindings in _compatibility_members [ binding ] . items ( ) : attrs = { } for target , binding in bindings . items ( ) : namespaces = binding . split ( '.' ) try : src_object = getattr ( Qt , "_" + namespaces [ 0 ] ) except AttributeError as e : _log ( "QtCompat: AttributeError: %s" % e ) # Skip reassignment of non-existing members. # This can happen if a request was made to # rename a member that didn't exist, for example # if QtWidgets isn't available on the target platform. continue # Walk down any remaining namespace getting the object assuming # that if the first namespace exists the rest will exist. for namespace in namespaces [ 1 : ] : src_object = getattr ( src_object , namespace ) # decorate the Qt method if a decorator was provided. if target in decorators . get ( classname , [ ] ) : # staticmethod must be called on the decorated method to # prevent a TypeError being raised when the decorated method # is called. src_object = staticmethod ( decorators [ classname ] [ target ] ( src_object ) ) attrs [ target ] = src_object # Create the QtCompat class and install it into the namespace compat_class = type ( classname , ( _QtCompat , ) , attrs ) setattr ( Qt . QtCompat , classname , compat_class )
Apply binding to QtCompat
437
5
223,444
def _convert ( lines ) : def parse ( line ) : line = line . replace ( "from PySide2 import" , "from Qt import QtCompat," ) line = line . replace ( "QtWidgets.QApplication.translate" , "QtCompat.translate" ) if "QtCore.SIGNAL" in line : raise NotImplementedError ( "QtCore.SIGNAL is missing from PyQt5 " "and so Qt.py does not support it: you " "should avoid defining signals inside " "your ui files." ) return line parsed = list ( ) for line in lines : line = parse ( line ) parsed . append ( line ) return parsed
Convert compiled . ui file from PySide2 to Qt . py
152
15
223,445
def update_compatibility_decorators ( binding , decorators ) : def _widgetDecorator ( some_function ) : def wrapper ( * args , * * kwargs ) : ret = some_function ( * args , * * kwargs ) # Modifies the returned value so we can test that the # decorator works. return "Test: {}" . format ( ret ) # preserve docstring and name of original function wrapper . __doc__ = some_function . __doc__ wrapper . __name__ = some_function . __name__ return wrapper # Assign a different decorator for the same method name on each class def _mainWindowDecorator ( some_function ) : def wrapper ( * args , * * kwargs ) : ret = some_function ( * args , * * kwargs ) # Modifies the returned value so we can test that the # decorator works. return "QMainWindow Test: {}" . format ( ret ) # preserve docstring and name of original function wrapper . __doc__ = some_function . __doc__ wrapper . __name__ = some_function . __name__ return wrapper decorators . setdefault ( "QWidget" , { } ) [ "windowTitleDecorator" ] = ( _widgetDecorator ) decorators . setdefault ( "QMainWindow" , { } ) [ "windowTitleDecorator" ] = ( _mainWindowDecorator )
This optional function is called by Qt . py to modify the decorators applied to QtCompat namespace objects .
309
21
223,446
def load_ui_type ( uifile ) : import pysideuic import xml . etree . ElementTree as ElementTree from cStringIO import StringIO parsed = ElementTree . parse ( uifile ) widget_class = parsed . find ( 'widget' ) . get ( 'class' ) form_class = parsed . find ( 'class' ) . text with open ( uifile , 'r' ) as f : o = StringIO ( ) frame = { } pysideuic . compileUi ( f , o , indent = 0 ) pyc = compile ( o . getvalue ( ) , '<string>' , 'exec' ) exec ( pyc ) in frame # Fetch the base_class and form class based on their type in # the xml from designer form_class = frame [ 'Ui_%s' % form_class ] base_class = eval ( 'QtWidgets.%s' % widget_class ) return form_class , base_class
Pyside equivalent for the loadUiType function in PyQt .
218
16
223,447
def pyside_load_ui ( uifile , base_instance = None ) : form_class , base_class = load_ui_type ( uifile ) if not base_instance : typeName = form_class . __name__ finalType = type ( typeName , ( form_class , base_class ) , { } ) base_instance = finalType ( ) else : if not isinstance ( base_instance , base_class ) : raise RuntimeError ( 'The base_instance passed to loadUi does not inherit from' ' needed base type (%s)' % type ( base_class ) ) typeName = type ( base_instance ) . __name__ base_instance . __class__ = type ( typeName , ( form_class , type ( base_instance ) ) , { } ) base_instance . setupUi ( base_instance ) return base_instance
Provide PyQt4 . uic . loadUi functionality to PySide
191
17
223,448
def ExplicitlyExcludeFromIndex ( client , database_id ) : try : DeleteContainerIfExists ( client , database_id , COLLECTION_ID ) database_link = GetDatabaseLink ( database_id ) # collections = Query_Entities(client, 'collection', parent_link = database_link) # print(collections) # Create a collection with default index policy (i.e. automatic = true) created_Container = client . CreateContainer ( database_link , { "id" : COLLECTION_ID } ) print ( created_Container ) print ( "\n" + "-" * 25 + "\n1. Collection created with index policy" ) print_dictionary_items ( created_Container [ "indexingPolicy" ] ) # Create a document and query on it immediately. # Will work as automatic indexing is still True collection_link = GetContainerLink ( database_id , COLLECTION_ID ) doc = client . CreateItem ( collection_link , { "id" : "doc1" , "orderId" : "order1" } ) print ( "\n" + "-" * 25 + "Document doc1 created with order1" + "-" * 25 ) print ( doc ) query = { "query" : "SELECT * FROM r WHERE r.orderId=@orderNo" , "parameters" : [ { "name" : "@orderNo" , "value" : "order1" } ] } QueryDocumentsWithCustomQuery ( client , collection_link , query ) # Now, create a document but this time explictly exclude it from the collection using IndexingDirective # Then query for that document # Shoud NOT find it, because we excluded it from the index # BUT, the document is there and doing a ReadDocument by Id will prove it doc2 = client . CreateItem ( collection_link , { "id" : "doc2" , "orderId" : "order2" } , { 'indexingDirective' : documents . IndexingDirective . Exclude } ) print ( "\n" + "-" * 25 + "Document doc2 created with order2" + "-" * 25 ) print ( doc2 ) query = { "query" : "SELECT * FROM r WHERE r.orderId=@orderNo" , "parameters" : [ { "name" : "@orderNo" , "value" : "order2" } ] } QueryDocumentsWithCustomQuery ( client , collection_link , query ) docRead = client . ReadItem ( GetDocumentLink ( database_id , COLLECTION_ID , "doc2" ) ) print ( "Document read by ID: \n" , docRead [ "id" ] ) # Cleanup client . DeleteContainer ( collection_link ) print ( "\n" ) except errors . HTTPFailure as e : if e . status_code == 409 : print ( "Entity already exists" ) elif e . status_code == 404 : print ( "Entity doesn't exist" ) else : raise
The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added . There may be scenarios where you want to exclude a specific doc from the index even though all other documents are being indexed automatically . This method demonstrates how to use an index directive to control this
645
56
223,449
def UseRangeIndexesOnStrings ( client , database_id ) : try : DeleteContainerIfExists ( client , database_id , COLLECTION_ID ) database_link = GetDatabaseLink ( database_id ) # collections = Query_Entities(client, 'collection', parent_link = database_link) # print(collections) # Use range indexes on strings # This is how you can specify a range index on strings (and numbers) for all properties. # This is the recommended indexing policy for collections. i.e. precision -1 #indexingPolicy = { # 'indexingPolicy': { # 'includedPaths': [ # { # 'indexes': [ # { # 'kind': documents.IndexKind.Range, # 'dataType': documents.DataType.String, # 'precision': -1 # } # ] # } # ] # } #} # For demo purposes, we are going to use the default (range on numbers, hash on strings) for the whole document (/* ) # and just include a range index on strings for the "region". collection_definition = { 'id' : COLLECTION_ID , 'indexingPolicy' : { 'includedPaths' : [ { 'path' : '/region/?' , 'indexes' : [ { 'kind' : documents . IndexKind . Range , 'dataType' : documents . DataType . String , 'precision' : - 1 } ] } , { 'path' : '/*' } ] } } created_Container = client . CreateContainer ( database_link , collection_definition ) print ( created_Container ) print ( "\n" + "-" * 25 + "\n6. Collection created with index policy" ) print_dictionary_items ( created_Container [ "indexingPolicy" ] ) collection_link = GetContainerLink ( database_id , COLLECTION_ID ) client . CreateItem ( collection_link , { "id" : "doc1" , "region" : "USA" } ) client . CreateItem ( collection_link , { "id" : "doc2" , "region" : "UK" } ) client . CreateItem ( collection_link , { "id" : "doc3" , "region" : "Armenia" } ) client . CreateItem ( collection_link , { "id" : "doc4" , "region" : "Egypt" } ) # Now ordering against region is allowed. You can run the following query query = { "query" : "SELECT * FROM r ORDER BY r.region" } message = "Documents ordered by region" QueryDocumentsWithCustomQuery ( client , collection_link , query , message ) # You can also perform filters against string comparison like >= 'UK'. Note that you can perform a prefix query, # the equivalent of LIKE 'U%' (is >= 'U' AND < 'U') query = { "query" : "SELECT * FROM r WHERE r.region >= 'U'" } message = "Documents with region begining with U" QueryDocumentsWithCustomQuery ( client , collection_link , query , message ) # Cleanup client . DeleteContainer ( collection_link ) print ( "\n" ) except errors . HTTPFailure as e : if e . status_code == 409 : print ( "Entity already exists" ) elif e . status_code == 404 : print ( "Entity doesn't exist" ) else : raise
Showing how range queries can be performed even on strings .
735
12
223,450
def ResolveForCreate ( self , document ) : if document is None : raise ValueError ( "document is None." ) partition_key = self . partition_key_extractor ( document ) containing_range = self . _GetContainingRange ( partition_key ) if containing_range is None : raise ValueError ( "A containing range for " + str ( partition_key ) + " doesn't exist in the partition map." ) return self . partition_map . get ( containing_range )
Resolves the collection for creating the document based on the partition key .
105
14
223,451
def _GetContainingRange ( self , partition_key ) : for keyrange in self . partition_map . keys ( ) : if keyrange . Contains ( partition_key ) : return keyrange return None
Gets the containing range based on the partition key .
44
11
223,452
def _GetIntersectingRanges ( self , partition_key ) : partitionkey_ranges = set ( ) intersecting_ranges = set ( ) if partition_key is None : return list ( self . partition_map . keys ( ) ) if isinstance ( partition_key , prange . Range ) : partitionkey_ranges . add ( partition_key ) elif isinstance ( partition_key , list ) : for key in partition_key : if key is None : return list ( self . partition_map . keys ( ) ) elif isinstance ( key , prange . Range ) : partitionkey_ranges . add ( key ) else : partitionkey_ranges . add ( prange . Range ( key , key ) ) else : partitionkey_ranges . add ( prange . Range ( partition_key , partition_key ) ) for partitionKeyRange in partitionkey_ranges : for keyrange in self . partition_map . keys ( ) : if keyrange . Intersect ( partitionKeyRange ) : intersecting_ranges . add ( keyrange ) return intersecting_ranges
Gets the intersecting ranges based on the partition key .
238
12
223,453
def _create_execution_context ( self ) : if hasattr ( self , '_database_link' ) : # client side partitioning query return base_execution_context . _MultiCollectionQueryExecutionContext ( self . _client , self . _options , self . _database_link , self . _query , self . _partition_key ) else : # return execution_dispatcher . _ProxyQueryExecutionContext ( self . _client , self . _collection_link , self . _query , self . _options , self . _fetch_function )
instantiates the internal query execution context based .
125
10
223,454
def _Execute ( client , global_endpoint_manager , function , * args , * * kwargs ) : # instantiate all retry policies here to be applied for each request execution endpointDiscovery_retry_policy = endpoint_discovery_retry_policy . _EndpointDiscoveryRetryPolicy ( client . connection_policy , global_endpoint_manager , * args ) resourceThrottle_retry_policy = resource_throttle_retry_policy . _ResourceThrottleRetryPolicy ( client . connection_policy . RetryOptions . MaxRetryAttemptCount , client . connection_policy . RetryOptions . FixedRetryIntervalInMilliseconds , client . connection_policy . RetryOptions . MaxWaitTimeInSeconds ) defaultRetry_policy = default_retry_policy . _DefaultRetryPolicy ( * args ) sessionRetry_policy = session_retry_policy . _SessionRetryPolicy ( client . connection_policy . EnableEndpointDiscovery , global_endpoint_manager , * args ) while True : try : if args : result = _ExecuteFunction ( function , global_endpoint_manager , * args , * * kwargs ) else : result = _ExecuteFunction ( function , * args , * * kwargs ) if not client . last_response_headers : client . last_response_headers = { } # setting the throttle related response headers before returning the result client . last_response_headers [ HttpHeaders . ThrottleRetryCount ] = resourceThrottle_retry_policy . current_retry_attempt_count client . last_response_headers [ HttpHeaders . ThrottleRetryWaitTimeInMs ] = resourceThrottle_retry_policy . cummulative_wait_time_in_milliseconds return result except errors . HTTPFailure as e : retry_policy = None if ( e . status_code == StatusCodes . FORBIDDEN and e . sub_status == SubStatusCodes . WRITE_FORBIDDEN ) : retry_policy = endpointDiscovery_retry_policy elif e . status_code == StatusCodes . TOO_MANY_REQUESTS : retry_policy = resourceThrottle_retry_policy elif e . status_code == StatusCodes . NOT_FOUND and e . sub_status and e . sub_status == SubStatusCodes . READ_SESSION_NOTAVAILABLE : retry_policy = sessionRetry_policy else : retry_policy = defaultRetry_policy # If none of the retry policies applies or there is no retry needed, set the throttle related response hedaers and # re-throw the exception back # arg[0] is the request. It needs to be modified for write forbidden exception if not ( retry_policy . ShouldRetry ( e ) ) : if not client . last_response_headers : client . last_response_headers = { } client . last_response_headers [ HttpHeaders . ThrottleRetryCount ] = resourceThrottle_retry_policy . current_retry_attempt_count client . last_response_headers [ HttpHeaders . ThrottleRetryWaitTimeInMs ] = resourceThrottle_retry_policy . cummulative_wait_time_in_milliseconds if len ( args ) > 0 and args [ 0 ] . should_clear_session_token_on_session_read_failure : client . session . clear_session_token ( client . last_response_headers ) raise else : # Wait for retry_after_in_milliseconds time before the next retry time . sleep ( retry_policy . retry_after_in_milliseconds / 1000.0 )
Exectutes the function with passed parameters applying all retry policies
834
13
223,455
def _GetDatabaseAccount ( self ) : try : database_account = self . _GetDatabaseAccountStub ( self . DefaultEndpoint ) return database_account # If for any reason(non-globaldb related), we are not able to get the database account from the above call to GetDatabaseAccount, # we would try to get this information from any of the preferred locations that the user might have specified(by creating a locational endpoint) # and keeping eating the exception until we get the database account and return None at the end, if we are not able to get that info from any endpoints except errors . HTTPFailure : for location_name in self . PreferredLocations : locational_endpoint = _GlobalEndpointManager . GetLocationalEndpoint ( self . DefaultEndpoint , location_name ) try : database_account = self . _GetDatabaseAccountStub ( locational_endpoint ) return database_account except errors . HTTPFailure : pass return None
Gets the database account first by using the default endpoint and if that doesn t returns use the endpoints for the preferred locations in the order they are specified to get the database account .
205
37
223,456
def CompareTo ( self , other_hash_value ) : if len ( self . hash_value ) != len ( other_hash_value ) : raise ValueError ( "Length of hashes doesn't match." ) # The hash byte array that is returned from ComputeHash method has the MSB at the end of the array # so comparing the bytes from the end for compare operations. for i in xrange ( 0 , len ( self . hash_value ) ) : if ( self . hash_value [ len ( self . hash_value ) - i - 1 ] < other_hash_value [ len ( self . hash_value ) - i - 1 ] ) : return - 1 elif self . hash_value [ len ( self . hash_value ) - i - 1 ] > other_hash_value [ len ( self . hash_value ) - i - 1 ] : return 1 return 0
Compares the passed hash value with the hash value of this object
190
13
223,457
def ComputeHash ( self , key ) : if key is None : raise ValueError ( "key is None." ) hash_value = self . _ComputeHash ( key ) return bytearray ( pack ( 'I' , hash_value ) )
Computes the hash of the value passed using MurmurHash3 algorithm .
55
15
223,458
def _ComputeHash ( key , seed = 0x0 ) : def fmix ( h ) : h ^= h >> 16 h = ( h * 0x85ebca6b ) & 0xFFFFFFFF h ^= h >> 13 h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >> 16 return h length = len ( key ) nblocks = int ( length / 4 ) h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 # body for block_start in xrange ( 0 , nblocks * 4 , 4 ) : k1 = key [ block_start + 3 ] << 24 | key [ block_start + 2 ] << 16 | key [ block_start + 1 ] << 8 | key [ block_start + 0 ] k1 = c1 * k1 & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32 k1 = ( c2 * k1 ) & 0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined _ROTL32 h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF # tail tail_index = nblocks * 4 k1 = 0 tail_size = length & 3 if tail_size >= 3 : k1 ^= key [ tail_index + 2 ] << 16 if tail_size >= 2 : k1 ^= key [ tail_index + 1 ] << 8 if tail_size >= 1 : k1 ^= key [ tail_index + 0 ] if tail_size != 0 : k1 = ( k1 * c1 ) & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # _ROTL32 k1 = ( k1 * c2 ) & 0xFFFFFFFF h1 ^= k1 return fmix ( h1 ^ length )
Computes the hash of the value passed using MurmurHash3 algorithm with the seed value .
449
19
223,459
def create ( cls , session_token ) : version = None global_lsn = None local_lsn_by_region = { } if not session_token : return None segments = session_token . split ( cls . segment_separator ) if len ( segments ) < 2 : return None try : version = int ( segments [ 0 ] ) except ValueError as _ : return None try : global_lsn = int ( segments [ 1 ] ) except ValueError as _ : return None for i in range ( 2 , len ( segments ) ) : region_segment = segments [ i ] region_id_with_lsn = region_segment . split ( cls . region_progress_separator ) if len ( region_id_with_lsn ) != 2 : return None try : region_id = int ( region_id_with_lsn [ 0 ] ) local_lsn = int ( region_id_with_lsn [ 1 ] ) except ValueError as _ : return None local_lsn_by_region [ region_id ] = local_lsn return VectorSessionToken ( version , global_lsn , local_lsn_by_region , session_token )
Parses session token and creates the vector session token
261
11
223,460
def _ConstructPartitions ( self , collection_links , partitions_per_node ) : collections_node_count = len ( collection_links ) partitions = [ partition . _Partition ( ) for _ in xrange ( 0 , partitions_per_node * collections_node_count ) ] index = 0 for collection_node in collection_links : hash_value = self . hash_generator . ComputeHash ( self . _GetBytes ( collection_node ) ) for _ in xrange ( 0 , partitions_per_node ) : partitions [ index ] = partition . _Partition ( hash_value , collection_node ) index += 1 hash_value = self . hash_generator . ComputeHash ( hash_value ) partitions . sort ( ) return partitions
Constructs the partitions in the consistent ring by assigning them to collection nodes using the hashing algorithm and then finally sorting the partitions based on the hash value .
163
30
223,461
def _FindPartition ( self , key ) : hash_value = self . hash_generator . ComputeHash ( key ) return self . _LowerBoundSearch ( self . partitions , hash_value )
Finds the partition from the byte array representation of the partition key .
44
14
223,462
def _GetSerializedPartitionList ( self ) : partition_list = list ( ) for part in self . partitions : partition_list . append ( ( part . node , unpack ( "<L" , part . hash_value ) [ 0 ] ) ) return partition_list
Gets the serialized version of the ConsistentRing . Added this helper for the test code .
59
20
223,463
def _GetBytes ( partition_key ) : if isinstance ( partition_key , six . string_types ) : return bytearray ( partition_key , encoding = 'utf-8' ) else : raise ValueError ( "Unsupported " + str ( type ( partition_key ) ) + " for partitionKey." )
Gets the bytes representing the value of the partition key .
70
12
223,464
def _LowerBoundSearch ( partitions , hash_value ) : for i in xrange ( 0 , len ( partitions ) - 1 ) : if partitions [ i ] . CompareTo ( hash_value ) <= 0 and partitions [ i + 1 ] . CompareTo ( hash_value ) > 0 : return i return len ( partitions ) - 1
Searches the partition in the partition array using hashValue .
71
13
223,465
def _fetch_items_helper_no_retries ( self , fetch_function ) : fetched_items = [ ] # Continues pages till finds a non empty page or all results are exhausted while self . _continuation or not self . _has_started : if not self . _has_started : self . _has_started = True self . _options [ 'continuation' ] = self . _continuation ( fetched_items , response_headers ) = fetch_function ( self . _options ) fetched_items continuation_key = http_constants . HttpHeaders . Continuation # Use Etag as continuation token for change feed queries. if self . _is_change_feed : continuation_key = http_constants . HttpHeaders . ETag # In change feed queries, the continuation token is always populated. The hasNext() test is whether # there is any items in the response or not. if not self . _is_change_feed or len ( fetched_items ) > 0 : self . _continuation = response_headers . get ( continuation_key ) else : self . _continuation = None if fetched_items : break return fetched_items
Fetches more items and doesn t retry on failure
259
12
223,466
def _fetch_next_block ( self ) : # Fetch next block of results by executing the query against the current document collection fetched_items = self . _fetch_items_helper_with_retries ( self . _fetch_function ) # If there are multiple document collections to query for(in case of partitioning), keep looping through each one of them, # creating separate feed queries for each collection and fetching the items while not fetched_items : if self . _collection_links and self . _current_collection_index < self . _collection_links_length : path = base . GetPathFromLink ( self . _collection_links [ self . _current_collection_index ] , 'docs' ) collection_id = base . GetResourceIdOrFullNameFromLink ( self . _collection_links [ self . _current_collection_index ] ) self . _continuation = None self . _has_started = False def fetch_fn ( options ) : return self . _client . QueryFeed ( path , collection_id , self . _query , options ) self . _fetch_function = fetch_fn fetched_items = self . _fetch_items_helper_with_retries ( self . _fetch_function ) self . _current_collection_index += 1 else : break return fetched_items
Fetches the next block of query results . This iterates fetches the next block of results from the current collection link . Once the current collection results were exhausted . It moves to the next collection link .
294
42
223,467
def Contains ( self , other ) : if other is None : raise ValueError ( "other is None." ) if isinstance ( other , Range ) : if other . low >= self . low and other . high <= self . high : return True return False else : return self . Contains ( Range ( other , other ) )
Checks if the passed parameter is in the range of this object .
67
14
223,468
def Intersect ( self , other ) : if isinstance ( other , Range ) : max_low = self . low if ( self . low >= other . low ) else other . low min_high = self . high if ( self . high <= other . high ) else other . high if max_low <= min_high : return True return False
Checks if the passed parameter intersects the range of this object .
73
14
223,469
def RegisterPartitionResolver ( self , database_link , partition_resolver ) : if not database_link : raise ValueError ( "database_link is None or empty." ) if partition_resolver is None : raise ValueError ( "partition_resolver is None." ) self . partition_resolvers = { base . TrimBeginningAndEndingSlashes ( database_link ) : partition_resolver }
Registers the partition resolver associated with the database link
91
11
223,470
def GetPartitionResolver ( self , database_link ) : if not database_link : raise ValueError ( "database_link is None or empty." ) return self . partition_resolvers . get ( base . TrimBeginningAndEndingSlashes ( database_link ) )
Gets the partition resolver associated with the database link
61
11
223,471
def CreateDatabase ( self , database , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( database ) path = '/dbs' return self . Create ( database , path , 'dbs' , None , None , options )
Creates a database .
58
5
223,472
def ReadDatabase ( self , database_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( database_link ) database_id = base . GetResourceIdOrFullNameFromLink ( database_link ) return self . Read ( path , 'dbs' , database_id , None , options )
Reads a database .
76
5
223,473
def QueryDatabases ( self , query , options = None ) : if options is None : options = { } def fetch_fn ( options ) : return self . __QueryFeed ( '/dbs' , 'dbs' , '' , lambda r : r [ 'Databases' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn )
Queries databases .
100
4
223,474
def ReadContainers ( self , database_link , options = None ) : if options is None : options = { } return self . QueryContainers ( database_link , None , options )
Reads all collections in a database .
40
8
223,475
def CreateContainer ( self , database_link , collection , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( collection ) path = base . GetPathFromLink ( database_link , 'colls' ) database_id = base . GetResourceIdOrFullNameFromLink ( database_link ) return self . Create ( collection , path , 'colls' , database_id , None , options )
Creates a collection in a database .
95
8
223,476
def ReplaceContainer ( self , collection_link , collection , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( collection ) path = base . GetPathFromLink ( collection_link ) collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) return self . Replace ( collection , path , 'colls' , collection_id , None , options )
Replaces a collection and return it .
90
8
223,477
def ReadContainer ( self , collection_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( collection_link ) collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) return self . Read ( path , 'colls' , collection_id , None , options )
Reads a collection .
76
5
223,478
def UpsertUser ( self , database_link , user , options = None ) : if options is None : options = { } database_id , path = self . _GetDatabaseIdWithPathForUser ( database_link , user ) return self . Upsert ( user , path , 'users' , database_id , None , options )
Upserts a user .
72
7
223,479
def ReadUser ( self , user_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( user_link ) user_id = base . GetResourceIdOrFullNameFromLink ( user_link ) return self . Read ( path , 'users' , user_id , None , options )
Reads a user .
75
5
223,480
def ReadUsers ( self , database_link , options = None ) : if options is None : options = { } return self . QueryUsers ( database_link , None , options )
Reads all users in a database .
38
8
223,481
def QueryUsers ( self , database_link , query , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( database_link , 'users' ) database_id = base . GetResourceIdOrFullNameFromLink ( database_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , 'users' , database_id , lambda r : r [ 'Users' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn )
Queries users in a database .
136
7
223,482
def DeleteDatabase ( self , database_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( database_link ) database_id = base . GetResourceIdOrFullNameFromLink ( database_link ) return self . DeleteResource ( path , 'dbs' , database_id , None , options )
Deletes a database .
77
5
223,483
def CreatePermission ( self , user_link , permission , options = None ) : if options is None : options = { } path , user_id = self . _GetUserIdWithPathForPermission ( permission , user_link ) return self . Create ( permission , path , 'permissions' , user_id , None , options )
Creates a permission for a user .
73
8
223,484
def UpsertPermission ( self , user_link , permission , options = None ) : if options is None : options = { } path , user_id = self . _GetUserIdWithPathForPermission ( permission , user_link ) return self . Upsert ( permission , path , 'permissions' , user_id , None , options )
Upserts a permission for a user .
75
10
223,485
def ReadPermission ( self , permission_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( permission_link ) permission_id = base . GetResourceIdOrFullNameFromLink ( permission_link ) return self . Read ( path , 'permissions' , permission_id , None , options )
Reads a permission .
77
5
223,486
def ReadPermissions ( self , user_link , options = None ) : if options is None : options = { } return self . QueryPermissions ( user_link , None , options )
Reads all permissions for a user .
40
8
223,487
def QueryPermissions ( self , user_link , query , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( user_link , 'permissions' ) user_id = base . GetResourceIdOrFullNameFromLink ( user_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , 'permissions' , user_id , lambda r : r [ 'Permissions' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn )
Queries permissions for a user .
140
7
223,488
def ReplaceUser ( self , user_link , user , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( user ) path = base . GetPathFromLink ( user_link ) user_id = base . GetResourceIdOrFullNameFromLink ( user_link ) return self . Replace ( user , path , 'users' , user_id , None , options )
Replaces a user and return it .
89
8
223,489
def DeleteUser ( self , user_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( user_link ) user_id = base . GetResourceIdOrFullNameFromLink ( user_link ) return self . DeleteResource ( path , 'users' , user_id , None , options )
Deletes a user .
76
5
223,490
def ReplacePermission ( self , permission_link , permission , options = None ) : if options is None : options = { } CosmosClient . __ValidateResource ( permission ) path = base . GetPathFromLink ( permission_link ) permission_id = base . GetResourceIdOrFullNameFromLink ( permission_link ) return self . Replace ( permission , path , 'permissions' , permission_id , None , options )
Replaces a permission and return it .
91
8
223,491
def DeletePermission ( self , permission_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( permission_link ) permission_id = base . GetResourceIdOrFullNameFromLink ( permission_link ) return self . DeleteResource ( path , 'permissions' , permission_id , None , options )
Deletes a permission .
78
5
223,492
def ReadItems ( self , collection_link , feed_options = None ) : if feed_options is None : feed_options = { } return self . QueryItems ( collection_link , None , feed_options )
Reads all documents in a collection .
46
8
223,493
def QueryItems ( self , database_or_Container_link , query , options = None , partition_key = None ) : database_or_Container_link = base . TrimBeginningAndEndingSlashes ( database_or_Container_link ) if options is None : options = { } if ( base . IsDatabaseLink ( database_or_Container_link ) ) : # Python doesn't have a good way of specifying an overloaded constructor, and this is how it's generally overloaded constructors are specified(by calling a @classmethod) and returning the 'self' instance return query_iterable . QueryIterable . PartitioningQueryIterable ( self , query , options , database_or_Container_link , partition_key ) else : path = base . GetPathFromLink ( database_or_Container_link , 'docs' ) collection_id = base . GetResourceIdOrFullNameFromLink ( database_or_Container_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , 'docs' , collection_id , lambda r : r [ 'Documents' ] , lambda _ , b : b , query , options ) , self . last_response_headers return query_iterable . QueryIterable ( self , query , options , fetch_fn , database_or_Container_link )
Queries documents in a collection .
285
7
223,494
def QueryItemsChangeFeed ( self , collection_link , options = None ) : partition_key_range_id = None if options is not None and 'partitionKeyRangeId' in options : partition_key_range_id = options [ 'partitionKeyRangeId' ] return self . _QueryChangeFeed ( collection_link , "Documents" , options , partition_key_range_id )
Queries documents change feed in a collection .
86
9
223,495
def _QueryChangeFeed ( self , collection_link , resource_type , options = None , partition_key_range_id = None ) : if options is None : options = { } options [ 'changeFeed' ] = True resource_key_map = { 'Documents' : 'docs' } # For now, change feed only supports Documents and Partition Key Range resouce type if resource_type not in resource_key_map : raise NotImplementedError ( resource_type + " change feed query is not supported." ) resource_key = resource_key_map [ resource_type ] path = base . GetPathFromLink ( collection_link , resource_key ) collection_id = base . GetResourceIdOrFullNameFromLink ( collection_link ) def fetch_fn ( options ) : return self . __QueryFeed ( path , resource_key , collection_id , lambda r : r [ resource_type ] , lambda _ , b : b , None , options , partition_key_range_id ) , self . last_response_headers return query_iterable . QueryIterable ( self , None , options , fetch_fn , collection_link )
Queries change feed of a resource in a collection .
250
11
223,496
def _ReadPartitionKeyRanges ( self , collection_link , feed_options = None ) : if feed_options is None : feed_options = { } return self . _QueryPartitionKeyRanges ( collection_link , None , feed_options )
Reads Partition Key Ranges .
56
8
223,497
def CreateItem ( self , database_or_Container_link , document , options = None ) : # Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). # This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well. # So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the method # For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/ if options is None : options = { } # We check the link to be document collection link since it can be database link in case of client side partitioning if ( base . IsItemContainerLink ( database_or_Container_link ) ) : options = self . _AddPartitionKey ( database_or_Container_link , document , options ) collection_id , document , path = self . _GetContainerIdWithPathForItem ( database_or_Container_link , document , options ) return self . Create ( document , path , 'docs' , collection_id , None , options )
Creates a document in a collection .
270
8
223,498
def UpsertItem ( self , database_or_Container_link , document , options = None ) : # Python's default arguments are evaluated once when the function is defined, not each time the function is called (like it is in say, Ruby). # This means that if you use a mutable default argument and mutate it, you will and have mutated that object for all future calls to the function as well. # So, using a non-mutable deafult in this case(None) and assigning an empty dict(mutable) inside the method # For more details on this gotcha, please refer http://docs.python-guide.org/en/latest/writing/gotchas/ if options is None : options = { } # We check the link to be document collection link since it can be database link in case of client side partitioning if ( base . IsItemContainerLink ( database_or_Container_link ) ) : options = self . _AddPartitionKey ( database_or_Container_link , document , options ) collection_id , document , path = self . _GetContainerIdWithPathForItem ( database_or_Container_link , document , options ) return self . Upsert ( document , path , 'docs' , collection_id , None , options )
Upserts a document in a collection .
272
10
223,499
def ReadItem ( self , document_link , options = None ) : if options is None : options = { } path = base . GetPathFromLink ( document_link ) document_id = base . GetResourceIdOrFullNameFromLink ( document_link ) return self . Read ( path , 'docs' , document_id , None , options )
Reads a document .
75
5