idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
26,900
def add_adjustment ( self , adjustment ) : resource = self . post ( "adjustments" , adjustment . encode ( ) ) return GiftCardAdjustment ( GiftCard . format . decode ( resource . body ) )
Create a new Gift Card Adjustment
26,901
def atomic_create_file ( file_path ) : fd , tmp_file_path = tempfile . mkstemp ( prefix = os . path . basename ( file_path ) , dir = os . path . dirname ( file_path ) , suffix = ".metadata_tmp" ) try : with os . fdopen ( fd , "w" ) as out_file : yield out_file os . rename ( tmp_file_path , file_path ) except Exception ...
Open a temporary file for writing rename to final name when done
26,902
def _stream_blob ( self , key , fileobj , progress_callback ) : file_size = None start_range = 0 chunk_size = self . conn . MAX_CHUNK_GET_SIZE end_range = chunk_size - 1 while True : try : blob = self . conn . _get_blob ( self . container_name , key , start_range = start_range , end_range = end_range ) if file_size is ...
Streams contents of given key to given fileobj . Data is read sequentially in chunks without any seeks . This requires duplicating some functionality of the Azure SDK which only allows reading entire blob into memory at once or returning data from random offsets
26,903
def format_key_for_backend ( self , key , remove_slash_prefix = False , trailing_slash = False ) : path = self . prefix + key if trailing_slash : if not path or path [ - 1 ] != "/" : path += "/" else : path = path . rstrip ( "/" ) if remove_slash_prefix : path = path . lstrip ( "/" ) return path
Add a possible prefix to the key before sending it to the backend
26,904
def format_key_from_backend ( self , key ) : if not self . prefix : return key if not key . startswith ( self . prefix ) : raise StorageError ( "Key {!r} does not start with expected prefix {!r}" . format ( key , self . prefix ) ) return key [ len ( self . prefix ) : ]
Strip the configured prefix from a key retrieved from the backend before passing it on to other pghoard code and presenting it to the user .
26,905
def delete_tree ( self , key ) : self . log . debug ( "Deleting tree: %r" , key ) names = [ item [ "name" ] for item in self . list_path ( key , with_metadata = False , deep = True ) ] for name in names : self . delete_key ( name )
Delete all keys under given root key . Basic implementation works by just listing all available keys and deleting them individually but storage providers can implement more efficient logic .
26,906
def sanitize_metadata ( self , metadata , replace_hyphen_with = "-" ) : return { str ( k ) . replace ( "-" , replace_hyphen_with ) : str ( v ) for k , v in ( metadata or { } ) . items ( ) if v is not None }
Convert non - string metadata values to strings and drop null values
26,907
def parse_timestamp ( ts , * , with_tz = True , assume_local = False ) : parse_result = dateutil . parser . parse ( ts ) dt = parse_result [ 0 ] if isinstance ( parse_result , tuple ) else parse_result if with_tz is False : if not dt . tzinfo : return dt tz = dateutil . tz . tzlocal ( ) if assume_local else datetime . ...
Parse a given timestamp and return a datetime object with or without tzinfo .
26,908
def create_pgpass_file ( connection_string_or_info ) : info = pgutil . get_connection_info ( connection_string_or_info ) if "password" not in info : return pgutil . create_connection_string ( info ) linekey = "{host}:{port}:{dbname}:{user}:" . format ( host = info . get ( "host" , "localhost" ) , port = info . get ( "p...
Look up password from the given object which can be a dict or a string and write a possible password in a pgpass file ; returns a connection_string without a password in it
26,909
def replication_connection_string_and_slot_using_pgpass ( target_node_info ) : connection_info , slot = connection_info_and_slot ( target_node_info ) connection_info [ "dbname" ] = "replication" connection_info [ "replication" ] = "true" connection_string = create_pgpass_file ( connection_info ) return connection_strin...
Like connection_string_and_slot_using_pgpass but returns a connection string for a replication connection .
26,910
def transmit_metrics ( self ) : global _last_stats_transmit_time with _STATS_LOCK : if time . monotonic ( ) - _last_stats_transmit_time < 10.0 : return for site in self . state : for filetype , prop in self . state [ site ] [ "upload" ] . items ( ) : if prop [ "last_success" ] : self . metrics . gauge ( "pghoard.last_u...
Keep metrics updated about how long time ago each filetype was successfully uploaded . Transmits max once per ten seconds regardless of how many threads are running .
26,911
def write ( self , data ) : self . _check_not_closed ( ) if not data : return 0 enc_data = self . encryptor . update ( data ) self . next_fp . write ( enc_data ) self . offset += len ( data ) return len ( data )
Encrypt and write the given bytes
26,912
def read ( self , size = - 1 ) : self . _check_not_closed ( ) if self . state == "EOF" or size == 0 : return b"" elif size < 0 : return self . _read_all ( ) else : return self . _read_block ( size )
Read up to size decrypted bytes
26,913
def tokenize_string ( cls , string , separator ) : results = [ ] token = "" found_escape = False for c in string : if found_escape : if c == separator : token += separator else : token += "\\" + c found_escape = False continue if c == "\\" : found_escape = True elif c == separator : results . append ( token ) token = "...
Split string with given separator unless the separator is escaped with backslash
26,914
def _unpaginate ( self , domain , initial_op , * , on_properties ) : request = initial_op ( domain ) while request is not None : result = self . _retry_on_reset ( request , request . execute ) for on_property in on_properties : items = result . get ( on_property ) if items is not None : yield on_property , items reques...
Iterate thru the request pages until all items have been processed
26,915
def get_or_create_bucket ( self , bucket_name ) : start_time = time . time ( ) gs_buckets = self . gs . buckets ( ) try : request = gs_buckets . get ( bucket = bucket_name ) self . _retry_on_reset ( request , request . execute ) self . log . debug ( "Bucket: %r already exists, took: %.3fs" , bucket_name , time . time (...
Look up the bucket if it already exists and try to create the bucket in case it doesn t . Note that we can t just always try to unconditionally create the bucket as Google imposes a strict rate limit on bucket creation operations even if it doesn t result in a new bucket .
26,916
def fetch_timeline_history_files ( self , max_timeline ) : while max_timeline > 1 : self . c . execute ( "TIMELINE_HISTORY {}" . format ( max_timeline ) ) timeline_history = self . c . fetchone ( ) history_filename = timeline_history [ 0 ] history_data = timeline_history [ 1 ] . tobytes ( ) self . log . debug ( "Receiv...
Copy all timeline history files found on the server without checking if we have them or not . The history files are very small so reuploading them should not matter .
26,917
def check_backup_count_and_state ( self , site ) : basebackups = self . get_remote_basebackups_info ( site ) self . log . debug ( "Found %r basebackups" , basebackups ) if basebackups : last_backup_time = basebackups [ - 1 ] [ "metadata" ] [ "start-time" ] else : last_backup_time = None allowed_basebackup_count = self ...
Look up basebackups from the object store prune any extra backups and return the datetime of the latest backup .
26,918
def startup_walk_for_missed_files ( self ) : for site in self . config [ "backup_sites" ] : compressed_xlog_path , _ = self . create_backup_site_paths ( site ) uncompressed_xlog_path = compressed_xlog_path + "_incoming" for filename in os . listdir ( uncompressed_xlog_path ) : full_path = os . path . join ( uncompresse...
Check xlog and xlog_incoming directories for files that receivexlog has received but not yet compressed as well as the files we have compressed but not yet uploaded and process them .
26,919
def write_backup_state_to_json_file ( self ) : start_time = time . time ( ) state_file_path = self . config [ "json_state_file_path" ] self . state [ "walreceivers" ] = { key : { "latest_activity" : value . latest_activity , "running" : value . running , "last_flushed_lsn" : value . last_flushed_lsn } for key , value i...
Periodically write a JSON state file to disk
26,920
def get_connection_info ( info ) : if isinstance ( info , dict ) : return info . copy ( ) elif info . startswith ( "postgres://" ) or info . startswith ( "postgresql://" ) : return parse_connection_string_url ( info ) else : return parse_connection_string_libpq ( info )
turn a connection info object into a dict or return it if it was a dict already . supports both the traditional libpq format and the new url format
26,921
def list_basebackups_http ( self , arg ) : self . storage = HTTPRestore ( arg . host , arg . port , arg . site ) self . storage . show_basebackup_list ( verbose = arg . verbose )
List available basebackups from a HTTP source
26,922
def list_basebackups ( self , arg ) : self . config = config . read_json_config_file ( arg . config , check_commands = False , check_pgdata = False ) site = config . get_site_from_config ( self . config , arg . site ) self . storage = self . _get_object_storage ( site , pgdata = None ) self . storage . show_basebackup_...
List basebackups from an object store
26,923
def get_basebackup ( self , arg ) : if not arg . tablespace_dir : tablespace_mapping = { } else : try : tablespace_mapping = dict ( v . split ( "=" , 1 ) for v in arg . tablespace_dir ) except ValueError : raise RestoreError ( "Invalid tablespace mapping {!r}" . format ( arg . tablespace_dir ) ) self . config = config ...
Download a basebackup from an object store
26,924
def get ( self , dismiss = True ) : try : return _classof ( self . _context , self . _fClassName ) . read ( self . _source , self . _cursor . copied ( ) , self . _context , self ) finally : if dismiss : self . _source . dismiss ( )
Extract the object this key points to .
26,925
def open_group ( store = None , mode = 'a' , cache_attrs = True , synchronizer = None , path = None , chunk_store = None ) : store = _normalize_store_arg ( store ) if chunk_store is not None : chunk_store = _normalize_store_arg ( chunk_store ) path = normalize_storage_path ( path ) if mode in [ 'r' , 'r+' ] : if contai...
Open a group using file - mode - like semantics .
26,926
def name ( self ) : if self . _path : name = self . _path if name [ 0 ] != '/' : name = '/' + name return name return '/'
Group name following h5py convention .
26,927
def group_keys ( self ) : for key in sorted ( listdir ( self . _store , self . _path ) ) : path = self . _key_prefix + key if contains_group ( self . _store , path ) : yield key
Return an iterator over member names for groups only .
26,928
def array_keys ( self ) : for key in sorted ( listdir ( self . _store , self . _path ) ) : path = self . _key_prefix + key if contains_array ( self . _store , path ) : yield key
Return an iterator over member names for arrays only .
26,929
def visitvalues ( self , func ) : def _visit ( obj ) : yield obj keys = sorted ( getattr ( obj , "keys" , lambda : [ ] ) ( ) ) for k in keys : for v in _visit ( obj [ k ] ) : yield v for each_obj in islice ( _visit ( self ) , 1 , None ) : value = func ( each_obj ) if value is not None : return value
Run func on each object .
26,930
def visit ( self , func ) : base_len = len ( self . name ) return self . visitvalues ( lambda o : func ( o . name [ base_len : ] . lstrip ( "/" ) ) )
Run func on each object s path .
26,931
def tree ( self , expand = False , level = None ) : return TreeViewer ( self , expand = expand , level = level )
Provide a print - able display of the hierarchy .
26,932
def create_group ( self , name , overwrite = False ) : return self . _write_op ( self . _create_group_nosync , name , overwrite = overwrite )
Create a sub - group .
26,933
def create_groups ( self , * names , ** kwargs ) : return tuple ( self . create_group ( name , ** kwargs ) for name in names )
Convenience method to create multiple groups in a single call .
26,934
def require_group ( self , name , overwrite = False ) : return self . _write_op ( self . _require_group_nosync , name , overwrite = overwrite )
Obtain a sub - group creating one if it doesn t exist .
26,935
def move ( self , source , dest ) : source = self . _item_path ( source ) dest = self . _item_path ( dest ) if not ( contains_array ( self . _store , source ) or contains_group ( self . _store , source ) ) : raise ValueError ( 'The source, "%s", does not exist.' % source ) if contains_array ( self . _store , dest ) or ...
Move contents from one path to another relative to the Group .
26,936
def asdict ( self ) : if self . cache and self . _cached_asdict is not None : return self . _cached_asdict d = self . _get_nosync ( ) if self . cache : self . _cached_asdict = d return d
Retrieve all attributes as a dictionary .
26,937
def update ( self , * args , ** kwargs ) : self . _write_op ( self . _update_nosync , * args , ** kwargs )
Update the values of several attributes in a single operation .
26,938
def json_dumps ( o ) : return json . dumps ( o , indent = 4 , sort_keys = True , ensure_ascii = True , separators = ( ',' , ': ' ) ) . encode ( 'ascii' )
Write JSON in a consistent human - readable way .
26,939
def normalize_shape ( shape ) : if shape is None : raise TypeError ( 'shape is None' ) if isinstance ( shape , numbers . Integral ) : shape = ( int ( shape ) , ) shape = tuple ( int ( s ) for s in shape ) return shape
Convenience function to normalize the shape argument .
26,940
def guess_chunks ( shape , typesize ) : ndims = len ( shape ) chunks = np . maximum ( np . array ( shape , dtype = '=f8' ) , 1 ) dset_size = np . product ( chunks ) * typesize target_size = CHUNK_BASE * ( 2 ** np . log10 ( dset_size / ( 1024. * 1024 ) ) ) if target_size > CHUNK_MAX : target_size = CHUNK_MAX elif target...
Guess an appropriate chunk layout for a dataset given its shape and the size of each element in bytes . Will allocate chunks only as large as MAX_SIZE . Chunks are generally close to some power - of - 2 fraction of each axis slightly favoring bigger values for the last index . Undocumented and subject to change without...
26,941
def normalize_chunks ( chunks , shape , typesize ) : if chunks is None or chunks is True : return guess_chunks ( shape , typesize ) if chunks is False : return shape if isinstance ( chunks , numbers . Integral ) : chunks = ( int ( chunks ) , ) if len ( chunks ) > len ( shape ) : raise ValueError ( 'too many dimensions ...
Convenience function to normalize the chunks argument for an array with the given shape .
26,942
def contains_array ( store , path = None ) : path = normalize_storage_path ( path ) prefix = _path_to_prefix ( path ) key = prefix + array_meta_key return key in store
Return True if the store contains an array at the given logical path .
26,943
def contains_group ( store , path = None ) : path = normalize_storage_path ( path ) prefix = _path_to_prefix ( path ) key = prefix + group_meta_key return key in store
Return True if the store contains a group at the given logical path .
26,944
def rmdir ( store , path = None ) : path = normalize_storage_path ( path ) if hasattr ( store , 'rmdir' ) : store . rmdir ( path ) else : _rmdir_from_keys ( store , path )
Remove all items under the given path . If store provides a rmdir method this will be called otherwise will fall back to implementation via the MutableMapping interface .
26,945
def rename ( store , src_path , dst_path ) : src_path = normalize_storage_path ( src_path ) dst_path = normalize_storage_path ( dst_path ) if hasattr ( store , 'rename' ) : store . rename ( src_path , dst_path ) else : _rename_from_keys ( store , src_path , dst_path )
Rename all items under the given path . If store provides a rename method this will be called otherwise will fall back to implementation via the MutableMapping interface .
26,946
def listdir ( store , path = None ) : path = normalize_storage_path ( path ) if hasattr ( store , 'listdir' ) : return store . listdir ( path ) else : return _listdir_from_keys ( store , path )
Obtain a directory listing for the given path . If store provides a listdir method this will be called otherwise will fall back to implementation via the MutableMapping interface .
26,947
def getsize ( store , path = None ) : path = normalize_storage_path ( path ) if hasattr ( store , 'getsize' ) : return store . getsize ( path ) elif isinstance ( store , dict ) : if path in store : v = store [ path ] size = buffer_size ( v ) else : members = listdir ( store , path ) prefix = _path_to_prefix ( path ) si...
Compute size of stored items for a given path . If store provides a getsize method this will be called otherwise will return - 1 .
26,948
def init_array ( store , shape , chunks = True , dtype = None , compressor = 'default' , fill_value = None , order = 'C' , overwrite = False , path = None , chunk_store = None , filters = None , object_codec = None ) : path = normalize_storage_path ( path ) _require_parent_group ( path , store = store , chunk_store = c...
Initialize an array store with the given configuration . Note that this is a low - level function and there should be no need to call this directly from user code .
26,949
def init_group ( store , overwrite = False , path = None , chunk_store = None ) : path = normalize_storage_path ( path ) _require_parent_group ( path , store = store , chunk_store = chunk_store , overwrite = overwrite ) _init_group_metadata ( store = store , overwrite = overwrite , path = path , chunk_store = chunk_sto...
Initialize a group store . Note that this is a low - level function and there should be no need to call this directly from user code .
26,950
def atexit_rmtree ( path , isdir = os . path . isdir , rmtree = shutil . rmtree ) : if isdir ( path ) : rmtree ( path )
Ensure directory removal at interpreter exit .
26,951
def atexit_rmglob ( path , glob = glob . glob , isdir = os . path . isdir , isfile = os . path . isfile , remove = os . remove , rmtree = shutil . rmtree ) : for p in glob ( path ) : if isfile ( p ) : remove ( p ) elif isdir ( p ) : rmtree ( p )
Ensure removal of multiple files at interpreter exit .
26,952
def migrate_1to2 ( store ) : from zarr import meta_v1 meta = meta_v1 . decode_metadata ( store [ 'meta' ] ) del store [ 'meta' ] meta [ 'filters' ] = None compression = meta [ 'compression' ] if compression is None or compression == 'none' : compressor_config = None else : compression_opts = meta [ 'compression_opts' ]...
Migrate array metadata in store from Zarr format version 1 to version 2 .
26,953
def flush ( self ) : if self . mode != 'r' : with self . mutex : self . zf . close ( ) self . zf = zipfile . ZipFile ( self . path , mode = 'a' , compression = self . compression , allowZip64 = self . allowZip64 )
Closes the underlying zip file ensuring all records are written then re - opens the file for further modifications .
26,954
def close ( self ) : if hasattr ( self . db , 'close' ) : with self . write_mutex : self . db . close ( )
Closes the underlying database file .
26,955
def flush ( self ) : if self . flag [ 0 ] != 'r' : with self . write_mutex : if hasattr ( self . db , 'sync' ) : self . db . sync ( ) else : flag = self . flag if flag [ 0 ] == 'n' : flag = 'c' + flag [ 1 : ] self . db . close ( ) self . db = self . open ( self . path , flag , self . mode , ** self . open_kwargs )
Synchronizes data to the underlying database file .
26,956
def oindex ( a , selection ) : selection = replace_ellipsis ( selection , a . shape ) drop_axes = tuple ( [ i for i , s in enumerate ( selection ) if is_integer ( s ) ] ) selection = ix_ ( selection , a . shape ) result = a [ selection ] if drop_axes : result = result . squeeze ( axis = drop_axes ) return result
Implementation of orthogonal indexing with slices and ints .
26,957
def array_metadata_to_n5 ( array_metadata ) : for f , t in zarr_to_n5_keys : array_metadata [ t ] = array_metadata [ f ] del array_metadata [ f ] del array_metadata [ 'zarr_format' ] try : dtype = np . dtype ( array_metadata [ 'dataType' ] ) except TypeError : raise TypeError ( "data type %s not supported by N5" % arra...
Convert array metadata from zarr to N5 format .
26,958
def array_metadata_to_zarr ( array_metadata ) : for t , f in zarr_to_n5_keys : array_metadata [ t ] = array_metadata [ f ] del array_metadata [ f ] array_metadata [ 'zarr_format' ] = ZARR_FORMAT array_metadata [ 'shape' ] = array_metadata [ 'shape' ] [ : : - 1 ] array_metadata [ 'chunks' ] = array_metadata [ 'chunks' ]...
Convert array metadata from N5 to zarr format .
26,959
def name ( self ) : if self . path : name = self . path if name [ 0 ] != '/' : name = '/' + name return name return None
Array name following h5py convention .
26,960
def nbytes_stored ( self ) : m = getsize ( self . _store , self . _path ) if self . _chunk_store is None : return m else : n = getsize ( self . _chunk_store , self . _path ) if m < 0 or n < 0 : return - 1 else : return m + n
The total number of stored bytes of data for the array . This includes storage required for configuration metadata and user attributes .
26,961
def nchunks_initialized ( self ) : prog = re . compile ( r'\.' . join ( [ r'\d+' ] * min ( 1 , self . ndim ) ) ) return sum ( 1 for k in listdir ( self . chunk_store , self . _path ) if prog . match ( k ) )
The number of chunks that have been initialized with some data .
26,962
def get_basic_selection ( self , selection = Ellipsis , out = None , fields = None ) : if not self . _cache_metadata : self . _load_metadata ( ) check_fields ( fields , self . _dtype ) if self . _shape == ( ) : return self . _get_basic_selection_zd ( selection = selection , out = out , fields = fields ) else : return s...
Retrieve data for an item or region of the array .
26,963
def get_mask_selection ( self , selection , out = None , fields = None ) : if not self . _cache_metadata : self . _load_metadata ( ) check_fields ( fields , self . _dtype ) indexer = MaskIndexer ( selection , self ) return self . _get_selection ( indexer = indexer , out = out , fields = fields )
Retrieve a selection of individual items by providing a Boolean array of the same shape as the array against which the selection is being made where True values indicate a selected item .
26,964
def set_basic_selection ( self , selection , value , fields = None ) : if self . _read_only : err_read_only ( ) if not self . _cache_metadata : self . _load_metadata_nosync ( ) if self . _shape == ( ) : return self . _set_basic_selection_zd ( selection , value , fields = fields ) else : return self . _set_basic_selecti...
Modify data for an item or region of the array .
26,965
def set_orthogonal_selection ( self , selection , value , fields = None ) : if self . _read_only : err_read_only ( ) if not self . _cache_metadata : self . _load_metadata_nosync ( ) indexer = OrthogonalIndexer ( selection , self ) self . _set_selection ( indexer , value , fields = fields )
Modify data via a selection for each dimension of the array .
26,966
def set_mask_selection ( self , selection , value , fields = None ) : if self . _read_only : err_read_only ( ) if not self . _cache_metadata : self . _load_metadata_nosync ( ) indexer = MaskIndexer ( selection , self ) self . _set_selection ( indexer , value , fields = fields )
Modify a selection of individual items by providing a Boolean array of the same shape as the array against which the selection is being made where True values indicate a selected item .
26,967
def _chunk_getitem ( self , chunk_coords , chunk_selection , out , out_selection , drop_axes = None , fields = None ) : assert len ( chunk_coords ) == len ( self . _cdata_shape ) ckey = self . _chunk_key ( chunk_coords ) try : cdata = self . chunk_store [ ckey ] except KeyError : if self . _fill_value is not None : if ...
Obtain part or whole of a chunk .
26,968
def _chunk_setitem ( self , chunk_coords , chunk_selection , value , fields = None ) : if self . _synchronizer is None : lock = nolock else : ckey = self . _chunk_key ( chunk_coords ) lock = self . _synchronizer [ ckey ] with lock : self . _chunk_setitem_nosync ( chunk_coords , chunk_selection , value , fields = fields...
Replace part or whole of a chunk .
26,969
def append ( self , data , axis = 0 ) : return self . _write_op ( self . _append_nosync , data , axis = axis )
Append data to axis .
26,970
def view ( self , shape = None , chunks = None , dtype = None , fill_value = None , filters = None , read_only = None , synchronizer = None ) : store = self . _store chunk_store = self . _chunk_store path = self . _path if read_only is None : read_only = self . _read_only if synchronizer is None : synchronizer = self ....
Return an array sharing the same data .
26,971
def astype ( self , dtype ) : dtype = np . dtype ( dtype ) filters = [ ] if self . _filters : filters . extend ( self . _filters ) filters . insert ( 0 , AsType ( encode_dtype = self . _dtype , decode_dtype = dtype ) ) return self . view ( filters = filters , dtype = dtype , read_only = True )
Returns a view that does on the fly type conversion of the underlying data .
26,972
def full ( shape , fill_value , ** kwargs ) : return create ( shape = shape , fill_value = fill_value , ** kwargs )
Create an array with fill_value being used as the default value for uninitialized portions of the array .
26,973
def array ( data , ** kwargs ) : if not hasattr ( data , 'shape' ) or not hasattr ( data , 'dtype' ) : data = np . asanyarray ( data ) kw_dtype = kwargs . get ( 'dtype' , None ) if kw_dtype is None : kwargs [ 'dtype' ] = data . dtype else : kwargs [ 'dtype' ] = kw_dtype data_shape , data_chunks = _get_shape_chunks ( da...
Create an array filled with data .
26,974
def open_array ( store = None , mode = 'a' , shape = None , chunks = True , dtype = None , compressor = 'default' , fill_value = 0 , order = 'C' , synchronizer = None , filters = None , cache_metadata = True , cache_attrs = True , path = None , object_codec = None , chunk_store = None , ** kwargs ) : clobber = mode == ...
Open an array using file - mode - like semantics .
26,975
def full_like ( a , ** kwargs ) : _like_args ( a , kwargs ) if isinstance ( a , Array ) : kwargs . setdefault ( 'fill_value' , a . fill_value ) return full ( ** kwargs )
Create a filled array like a .
26,976
def open_like ( a , path , ** kwargs ) : _like_args ( a , kwargs ) if isinstance ( a , Array ) : kwargs . setdefault ( 'fill_value' , a . fill_value ) return open_array ( path , ** kwargs )
Open a persistent array like a .
26,977
def open ( store = None , mode = 'a' , ** kwargs ) : path = kwargs . get ( 'path' , None ) clobber = mode == 'w' store = normalize_store_arg ( store , clobber = clobber ) path = normalize_storage_path ( path ) if mode in { 'w' , 'w-' , 'x' } : if 'shape' in kwargs : return open_array ( store , mode = mode , ** kwargs )...
Convenience function to open a group or array using file - mode - like semantics .
26,978
def save ( store , * args , ** kwargs ) : if len ( args ) == 0 and len ( kwargs ) == 0 : raise ValueError ( 'at least one array must be provided' ) if len ( args ) == 1 and len ( kwargs ) == 0 : save_array ( store , args [ 0 ] ) else : save_group ( store , * args , ** kwargs )
Convenience function to save an array or group of arrays to the local file system .
26,979
def load ( store ) : store = normalize_store_arg ( store ) if contains_array ( store , path = None ) : return Array ( store = store , path = None ) [ ... ] elif contains_group ( store , path = None ) : grp = Group ( store = store , path = None ) return LazyLoader ( grp )
Load data from an array or group into memory .
26,980
def copy ( source , dest , name = None , shallow = False , without_attrs = False , log = None , if_exists = 'raise' , dry_run = False , ** create_kws ) : _check_dest_is_group ( dest ) with _LogWriter ( log ) as log : n_copied , n_skipped , n_bytes_copied = _copy ( log , source , dest , name = name , root = True , shall...
Copy the source array or group into the dest group .
26,981
def copy_all ( source , dest , shallow = False , without_attrs = False , log = None , if_exists = 'raise' , dry_run = False , ** create_kws ) : _check_dest_is_group ( dest ) n_copied = n_skipped = n_bytes_copied = 0 with _LogWriter ( log ) as log : for k in source . keys ( ) : c , s , b = _copy ( log , source [ k ] , d...
Copy all children of the source group into the dest group .
26,982
def consolidate_metadata ( store , metadata_key = '.zmetadata' ) : store = normalize_store_arg ( store ) def is_zarr_key ( key ) : return ( key . endswith ( '.zarray' ) or key . endswith ( '.zgroup' ) or key . endswith ( '.zattrs' ) ) out = { 'zarr_consolidated_format' : 1 , 'metadata' : { key : json_loads ( store [ ke...
Consolidate all metadata for groups and arrays within the given store into a single resource and put it under the given key .
26,983
def open_consolidated ( store , metadata_key = '.zmetadata' , mode = 'r+' , ** kwargs ) : from . storage import ConsolidatedMetadataStore store = normalize_store_arg ( store ) if mode not in { 'r' , 'r+' } : raise ValueError ( "invalid mode, expected either 'r' or 'r+'; found {!r}" . format ( mode ) ) meta_store = Cons...
Open group using metadata previously consolidated into a single key .
26,984
def volume_list_paged ( request , search_opts = None , marker = None , paginate = False , sort_dir = "desc" ) : has_more_data = False has_prev_data = False volumes = [ ] c_client = _cinderclient_with_generic_groups ( request ) if c_client is None : return volumes , has_more_data , has_prev_data transfers = { t . volume...
List volumes with pagination .
26,985
def extension_supported ( request , extension_name ) : for extension in list_extensions ( request ) : if extension . name == extension_name : return True return False
This method will determine if Cinder supports a given extension name .
26,986
def transfer_list ( request , detailed = True , search_opts = None ) : c_client = cinderclient ( request ) try : return [ VolumeTransfer ( v ) for v in c_client . transfers . list ( detailed = detailed , search_opts = search_opts ) ] except cinder_exception . Forbidden as error : LOG . error ( error ) return [ ]
List volume transfers .
26,987
def tenant_quota_usages ( request , tenant_id = None , targets = None ) : if not tenant_id : tenant_id = request . user . project_id disabled_quotas = get_disabled_quotas ( request , targets ) usages = QuotaUsage ( ) futurist_utils . call_functions_parallel ( ( _get_tenant_compute_usages , [ request , usages , disabled...
Get our quotas and construct our usage object .
26,988
def add_quota ( self , quota ) : if quota . limit in ( None , - 1 , float ( 'inf' ) ) : self . usages [ quota . name ] [ 'quota' ] = float ( "inf" ) self . usages [ quota . name ] [ 'available' ] = float ( "inf" ) else : self . usages [ quota . name ] [ 'quota' ] = int ( quota . limit )
Adds an internal tracking reference for the given quota .
26,989
def tally ( self , name , value ) : value = value or 0 if 'used' not in self . usages [ name ] : self . usages [ name ] [ 'used' ] = 0 self . usages [ name ] [ 'used' ] += int ( value ) self . update_available ( name )
Adds to the used metric for the given quota .
26,990
def update_available ( self , name ) : quota = self . usages . get ( name , { } ) . get ( 'quota' , float ( 'inf' ) ) available = quota - self . usages [ name ] [ 'used' ] if available < 0 : available = 0 self . usages [ name ] [ 'available' ] = available
Updates the available metric for the given quota .
26,991
def process_non_api_filters ( search_opts , non_api_filter_info ) : for fake_field , real_field , resources in non_api_filter_info : if not _swap_filter ( resources , search_opts , fake_field , real_field ) : return False return True
Process filters by non - API fields
26,992
def validate_port_or_colon_separated_port_range ( port_range ) : if port_range . count ( ':' ) > 1 : raise ValidationError ( _ ( "One colon allowed in port range" ) ) ports = port_range . split ( ':' ) for port in ports : validate_port_range ( port )
Accepts a port number or a single - colon separated range .
26,993
def import_submodules ( module ) : submodules = { } for loader , name , ispkg in pkgutil . iter_modules ( module . __path__ , module . __name__ + '.' ) : try : submodule = import_module ( name ) except ImportError as e : logging . warning ( "Error importing %s" , name ) logging . exception ( e ) else : parent , child =...
Import all submodules and make them available in a dict .
26,994
def import_dashboard_config ( modules ) : config = collections . defaultdict ( dict ) for module in modules : for submodule in import_submodules ( module ) . values ( ) : if hasattr ( submodule , 'DASHBOARD' ) : dashboard = submodule . DASHBOARD config [ dashboard ] . update ( submodule . __dict__ ) elif ( hasattr ( su...
Imports configuration from all the modules and merges it .
26,995
def get_xstatic_dirs ( XSTATIC_MODULES , HORIZON_CONFIG ) : STATICFILES_DIRS = [ ] HORIZON_CONFIG . setdefault ( 'xstatic_lib_files' , [ ] ) for module_name , files in XSTATIC_MODULES : module = import_module ( module_name ) if module_name == 'xstatic.pkg.jquery_ui' : if module . VERSION . startswith ( '1.10.' ) : file...
Discover static file configuration of the xstatic modules .
26,996
def upgrade_api ( request , client , version ) : min_ver , max_ver = api_versions . _get_server_version_range ( client ) if min_ver <= api_versions . APIVersion ( version ) <= max_ver : client = _nova . novaclient ( request , version ) return client
Ugrade the nova API to the specified version if possible .
26,997
def add_tenant_to_flavor ( request , flavor , tenant ) : return _nova . novaclient ( request ) . flavor_access . add_tenant_access ( flavor = flavor , tenant = tenant )
Add a tenant to the given flavor access list .
26,998
def remove_tenant_from_flavor ( request , flavor , tenant ) : return _nova . novaclient ( request ) . flavor_access . remove_tenant_access ( flavor = flavor , tenant = tenant )
Remove a tenant from the given flavor access list .
26,999
def flavor_get_extras ( request , flavor_id , raw = False , flavor = None ) : if flavor is None : flavor = _nova . novaclient ( request ) . flavors . get ( flavor_id ) extras = flavor . get_keys ( ) if raw : return extras return [ FlavorExtraSpec ( flavor_id , key , value ) for key , value in extras . items ( ) ]
Get flavor extra specs .