idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
28,000
def get_meta_options ( self , model ) : result = { 'ordering' : ( '-action_date' , ) , 'app_label' : model . _meta . app_label , } from django . db . models . options import DEFAULT_NAMES if 'default_permissions' in DEFAULT_NAMES : result . update ( { 'default_permissions' : ( ) } ) return result
Returns a dictionary of Meta options for the autdit log model .
92
13
28,001
def create_log_entry_model ( self , model ) : attrs = self . copy_fields ( model ) attrs . update ( self . get_logging_fields ( model ) ) attrs . update ( Meta = type ( str ( 'Meta' ) , ( ) , self . get_meta_options ( model ) ) ) name = str ( '%sAuditLogEntry' % model . _meta . object_name ) return type ( name , ( models . Model , ) , attrs )
Creates a log entry model that will be associated with the model provided .
110
15
28,002
def decode_kempressed ( bytestring ) : subvol = fpzip . decompress ( bytestring , order = 'F' ) return np . swapaxes ( subvol , 3 , 2 ) - 2.0
subvol not bytestring since numpy conversion is done inside fpzip extension .
50
18
28,003
def bbox2array ( vol , bbox , order = 'F' , readonly = False , lock = None , location = None ) : location = location or vol . shared_memory_id shape = list ( bbox . size3 ( ) ) + [ vol . num_channels ] return ndarray ( shape = shape , dtype = vol . dtype , location = location , readonly = readonly , lock = lock , order = order )
Convenince method for creating a shared memory numpy array based on a CloudVolume and Bbox . c . f . sharedmemory . ndarray for information on the optional lock parameter .
98
39
28,004
def ndarray_fs ( shape , dtype , location , lock , readonly = False , order = 'F' , * * kwargs ) : dbytes = np . dtype ( dtype ) . itemsize nbytes = Vec ( * shape ) . rectVolume ( ) * dbytes directory = mkdir ( EMULATED_SHM_DIRECTORY ) filename = os . path . join ( directory , location ) if lock : lock . acquire ( ) exists = os . path . exists ( filename ) size = 0 if not exists else os . path . getsize ( filename ) if readonly and not exists : raise SharedMemoryReadError ( filename + " has not been allocated. Requested " + str ( nbytes ) + " bytes." ) elif readonly and size != nbytes : raise SharedMemoryReadError ( "{} exists, but the allocation size ({} bytes) does not match the request ({} bytes)." . format ( filename , size , nbytes ) ) if exists : if size > nbytes : with open ( filename , 'wb' ) as f : os . ftruncate ( f . fileno ( ) , nbytes ) elif size < nbytes : # too small? just remake it below # if we were being more efficient # we could just append zeros os . unlink ( filename ) exists = os . path . exists ( filename ) if not exists : blocksize = 1024 * 1024 * 10 * dbytes steps = int ( math . ceil ( float ( nbytes ) / float ( blocksize ) ) ) total = 0 with open ( filename , 'wb' ) as f : for i in range ( 0 , steps ) : write_bytes = min ( blocksize , nbytes - total ) f . write ( b'\x00' * write_bytes ) total += blocksize if lock : lock . release ( ) with open ( filename , 'r+b' ) as f : array_like = mmap . mmap ( f . fileno ( ) , 0 ) # map entire file renderbuffer = np . ndarray ( buffer = array_like , dtype = dtype , shape = shape , order = order , * * kwargs ) renderbuffer . setflags ( write = ( not readonly ) ) return array_like , renderbuffer
Emulate shared memory using the filesystem .
489
8
28,005
def cutout ( vol , requested_bbox , steps , channel_slice = slice ( None ) , parallel = 1 , shared_memory_location = None , output_to_shared_memory = False ) : global fs_lock cloudpath_bbox = requested_bbox . expand_to_chunk_size ( vol . underlying , offset = vol . voxel_offset ) cloudpath_bbox = Bbox . clamp ( cloudpath_bbox , vol . bounds ) cloudpaths = list ( chunknames ( cloudpath_bbox , vol . bounds , vol . key , vol . underlying ) ) shape = list ( requested_bbox . size3 ( ) ) + [ vol . num_channels ] handle = None if parallel == 1 : if output_to_shared_memory : array_like , renderbuffer = shm . bbox2array ( vol , requested_bbox , location = shared_memory_location , lock = fs_lock ) shm . track_mmap ( array_like ) else : renderbuffer = np . zeros ( shape = shape , dtype = vol . dtype , order = 'F' ) def process ( img3d , bbox ) : shade ( renderbuffer , requested_bbox , img3d , bbox ) download_multiple ( vol , cloudpaths , fn = process ) else : handle , renderbuffer = multi_process_cutout ( vol , requested_bbox , cloudpaths , parallel , shared_memory_location , output_to_shared_memory ) renderbuffer = renderbuffer [ : : steps . x , : : steps . y , : : steps . z , channel_slice ] return VolumeCutout . from_volume ( vol , renderbuffer , requested_bbox , handle = handle )
Cutout a requested bounding box from storage and return it as a numpy array .
382
18
28,006
def decode ( vol , filename , content ) : bbox = Bbox . from_filename ( filename ) content_len = len ( content ) if content is not None else 0 if not content : if vol . fill_missing : content = '' else : raise EmptyVolumeException ( filename ) shape = list ( bbox . size3 ( ) ) + [ vol . num_channels ] try : return chunks . decode ( content , encoding = vol . encoding , shape = shape , dtype = vol . dtype , block_size = vol . compressed_segmentation_block_size , ) except Exception as error : print ( red ( 'File Read Error: {} bytes, {}, {}, errors: {}' . format ( content_len , bbox , filename , error ) ) ) raise
Decode content according to settings in a cloudvolume instance .
167
12
28,007
def shade ( renderbuffer , bufferbbox , img3d , bbox ) : if not Bbox . intersects ( bufferbbox , bbox ) : return spt = max2 ( bbox . minpt , bufferbbox . minpt ) ept = min2 ( bbox . maxpt , bufferbbox . maxpt ) ZERO3 = Vec ( 0 , 0 , 0 ) istart = max2 ( spt - bbox . minpt , ZERO3 ) iend = min2 ( ept - bbox . maxpt , ZERO3 ) + img3d . shape [ : 3 ] rbox = Bbox ( spt , ept ) - bufferbbox . minpt if len ( img3d . shape ) == 3 : img3d = img3d [ : , : , : , np . newaxis ] renderbuffer [ rbox . to_slices ( ) ] = img3d [ istart . x : iend . x , istart . y : iend . y , istart . z : iend . z , : ]
Shade a renderbuffer with a downloaded chunk . The buffer will only be painted in the overlapping region of the content .
238
24
28,008
def cdn_cache_control ( val ) : if val is None : return 'max-age=3600, s-max-age=3600' elif type ( val ) is str : return val elif type ( val ) is bool : if val : return 'max-age=3600, s-max-age=3600' else : return 'no-cache' elif type ( val ) is int : if val < 0 : raise ValueError ( 'cdn_cache must be a positive integer, boolean, or string. Got: ' + str ( val ) ) if val == 0 : return 'no-cache' else : return 'max-age={}, s-max-age={}' . format ( val , val ) else : raise NotImplementedError ( type ( val ) + ' is not a supported cache_control setting.' )
Translate cdn_cache into a Cache - Control HTTP header .
185
14
28,009
def upload_image ( vol , img , offset , parallel = 1 , manual_shared_memory_id = None , manual_shared_memory_bbox = None , manual_shared_memory_order = 'F' ) : global NON_ALIGNED_WRITE if not np . issubdtype ( img . dtype , np . dtype ( vol . dtype ) . type ) : raise ValueError ( 'The uploaded image data type must match the volume data type. volume: {}, image: {}' . format ( vol . dtype , img . dtype ) ) ( is_aligned , bounds , expanded ) = check_grid_aligned ( vol , img , offset ) if is_aligned : upload_aligned ( vol , img , offset , parallel = parallel , manual_shared_memory_id = manual_shared_memory_id , manual_shared_memory_bbox = manual_shared_memory_bbox , manual_shared_memory_order = manual_shared_memory_order ) return elif vol . non_aligned_writes == False : msg = NON_ALIGNED_WRITE . format ( mip = vol . mip , chunk_size = vol . chunk_size , offset = vol . voxel_offset , got = bounds , check = expanded ) raise AlignmentError ( msg ) # Upload the aligned core retracted = bounds . shrink_to_chunk_size ( vol . underlying , vol . voxel_offset ) core_bbox = retracted . clone ( ) - bounds . minpt if not core_bbox . subvoxel ( ) : core_img = img [ core_bbox . to_slices ( ) ] upload_aligned ( vol , core_img , retracted . minpt , parallel = parallel , manual_shared_memory_id = manual_shared_memory_id , manual_shared_memory_bbox = manual_shared_memory_bbox , manual_shared_memory_order = manual_shared_memory_order ) # Download the shell, paint, and upload all_chunks = set ( chunknames ( expanded , vol . bounds , vol . key , vol . underlying ) ) core_chunks = set ( chunknames ( retracted , vol . bounds , vol . key , vol . underlying ) ) shell_chunks = all_chunks . difference ( core_chunks ) def shade_and_upload ( img3d , bbox ) : # decode is returning non-writable chunk # we're throwing them away so safe to write img3d . setflags ( write = 1 ) shade ( img3d , bbox , img , bounds ) single_process_upload ( vol , img3d , ( ( Vec ( 0 , 0 , 0 ) , Vec ( * img3d . shape [ : 3 ] ) , bbox . minpt , bbox . maxpt ) , ) , n_threads = 0 ) download_multiple ( vol , shell_chunks , fn = shade_and_upload )
Upload img to vol with offset . This is the primary entry point for uploads .
645
17
28,010
def decompress ( content , encoding , filename = 'N/A' ) : try : encoding = ( encoding or '' ) . lower ( ) if encoding == '' : return content elif encoding == 'gzip' : return gunzip ( content ) except DecompressionError as err : print ( "Filename: " + str ( filename ) ) raise raise NotImplementedError ( str ( encoding ) + ' is not currently supported. Supported Options: None, gzip' )
Decompress file content .
100
6
28,011
def compress ( content , method = 'gzip' ) : if method == True : method = 'gzip' # backwards compatibility method = ( method or '' ) . lower ( ) if method == '' : return content elif method == 'gzip' : return gzip_compress ( content ) raise NotImplementedError ( str ( method ) + ' is not currently supported. Supported Options: None, gzip' )
Compresses file content .
90
5
28,012
def gunzip ( content ) : gzip_magic_numbers = [ 0x1f , 0x8b ] first_two_bytes = [ byte for byte in bytearray ( content ) [ : 2 ] ] if first_two_bytes != gzip_magic_numbers : raise DecompressionError ( 'File is not in gzip format. Magic numbers {}, {} did not match {}, {}.' . format ( hex ( first_two_bytes [ 0 ] ) , hex ( first_two_bytes [ 1 ] ) ) , hex ( gzip_magic_numbers [ 0 ] ) , hex ( gzip_magic_numbers [ 1 ] ) ) stringio = BytesIO ( content ) with gzip . GzipFile ( mode = 'rb' , fileobj = stringio ) as gfile : return gfile . read ( )
Decompression is applied if the first to bytes matches with the gzip magic numbers . There is once chance in 65536 that a file that is not gzipped will be ungzipped .
187
40
28,013
def flush ( self , preserve = None ) : if not os . path . exists ( self . path ) : return if preserve is None : shutil . rmtree ( self . path ) return for mip in self . vol . available_mips : preserve_mip = self . vol . slices_from_global_coords ( preserve ) preserve_mip = Bbox . from_slices ( preserve_mip ) mip_path = os . path . join ( self . path , self . vol . mip_key ( mip ) ) if not os . path . exists ( mip_path ) : continue for filename in os . listdir ( mip_path ) : bbox = Bbox . from_filename ( filename ) if not Bbox . intersects ( preserve_mip , bbox ) : os . remove ( os . path . join ( mip_path , filename ) )
Delete the cache for this dataset . Optionally preserve a region . Helpful when working with overlaping volumes .
197
21
28,014
def flush_region ( self , region , mips = None ) : if not os . path . exists ( self . path ) : return if type ( region ) in ( list , tuple ) : region = generate_slices ( region , self . vol . bounds . minpt , self . vol . bounds . maxpt , bounded = False ) region = Bbox . from_slices ( region ) mips = self . vol . mip if mips == None else mips if type ( mips ) == int : mips = ( mips , ) for mip in mips : mip_path = os . path . join ( self . path , self . vol . mip_key ( mip ) ) if not os . path . exists ( mip_path ) : continue region_mip = self . vol . slices_from_global_coords ( region ) region_mip = Bbox . from_slices ( region_mip ) for filename in os . listdir ( mip_path ) : bbox = Bbox . from_filename ( filename ) if not Bbox . intersects ( region , bbox ) : os . remove ( os . path . join ( mip_path , filename ) )
Delete a cache region at one or more mip levels bounded by a Bbox for this dataset . Bbox coordinates should be specified in mip 0 coordinates .
267
32
28,015
def save_images ( self , directory = None , axis = 'z' , channel = None , global_norm = True , image_format = 'PNG' ) : if directory is None : directory = os . path . join ( './saved_images' , self . dataset_name , self . layer , str ( self . mip ) , self . bounds . to_filename ( ) ) return save_images ( self , directory , axis , channel , global_norm , image_format )
See cloudvolume . lib . save_images for more information .
107
13
28,016
def from_path ( kls , vertices ) : if vertices . shape [ 0 ] == 0 : return PrecomputedSkeleton ( ) skel = PrecomputedSkeleton ( vertices ) edges = np . zeros ( shape = ( skel . vertices . shape [ 0 ] - 1 , 2 ) , dtype = np . uint32 ) edges [ : , 0 ] = np . arange ( skel . vertices . shape [ 0 ] - 1 ) edges [ : , 1 ] = np . arange ( 1 , skel . vertices . shape [ 0 ] ) skel . edges = edges return skel
Given an Nx3 array of vertices that constitute a single path generate a skeleton with appropriate edges .
136
21
28,017
def simple_merge ( kls , skeletons ) : if len ( skeletons ) == 0 : return PrecomputedSkeleton ( ) if type ( skeletons [ 0 ] ) is np . ndarray : skeletons = [ skeletons ] ct = 0 edges = [ ] for skel in skeletons : edge = skel . edges + ct edges . append ( edge ) ct += skel . vertices . shape [ 0 ] return PrecomputedSkeleton ( vertices = np . concatenate ( [ skel . vertices for skel in skeletons ] , axis = 0 ) , edges = np . concatenate ( edges , axis = 0 ) , radii = np . concatenate ( [ skel . radii for skel in skeletons ] , axis = 0 ) , vertex_types = np . concatenate ( [ skel . vertex_types for skel in skeletons ] , axis = 0 ) , segid = skeletons [ 0 ] . id , )
Simple concatenation of skeletons into one object without adding edges between them .
207
15
28,018
def decode ( kls , skelbuf , segid = None ) : if len ( skelbuf ) < 8 : raise SkeletonDecodeError ( "{} bytes is fewer than needed to specify the number of verices and edges." . format ( len ( skelbuf ) ) ) num_vertices , num_edges = struct . unpack ( '<II' , skelbuf [ : 8 ] ) min_format_length = 8 + 12 * num_vertices + 8 * num_edges if len ( skelbuf ) < min_format_length : raise SkeletonDecodeError ( "The input skeleton was {} bytes but the format requires {} bytes." . format ( len ( skelbuf ) , format_length ) ) vstart = 2 * 4 # two uint32s in vend = vstart + num_vertices * 3 * 4 # float32s vertbuf = skelbuf [ vstart : vend ] estart = vend eend = estart + num_edges * 4 * 2 # 2x uint32s edgebuf = skelbuf [ estart : eend ] vertices = np . frombuffer ( vertbuf , dtype = '<f4' ) . reshape ( ( num_vertices , 3 ) ) edges = np . frombuffer ( edgebuf , dtype = '<u4' ) . reshape ( ( num_edges , 2 ) ) if len ( skelbuf ) == min_format_length : return PrecomputedSkeleton ( vertices , edges , segid = segid ) radii_format_length = min_format_length + num_vertices * 4 if len ( skelbuf ) < radii_format_length : raise SkeletonDecodeError ( "Input buffer did not have enough float32 radii to correspond to each vertex. # vertices: {}, # radii: {}" . format ( num_vertices , ( radii_format_length - min_format_length ) / 4 ) ) rstart = eend rend = rstart + num_vertices * 4 # 4 bytes np.float32 radiibuf = skelbuf [ rstart : rend ] radii = np . frombuffer ( radiibuf , dtype = np . float32 ) if len ( skelbuf ) == radii_format_length : return PrecomputedSkeleton ( vertices , edges , radii , segid = segid ) type_format_length = radii_format_length + num_vertices * 1 if len ( skelbuf ) < type_format_length : raise SkeletonDecodeError ( "Input buffer did not have enough uint8 SWC vertex types to correspond to each vertex. # vertices: {}, # types: {}" . format ( num_vertices , ( type_format_length - radii_format_length ) ) ) tstart = rend tend = tstart + num_vertices typebuf = skelbuf [ tstart : tend ] vertex_types = np . frombuffer ( typebuf , dtype = np . uint8 ) return PrecomputedSkeleton ( vertices , edges , radii , vertex_types , segid = segid )
Convert a buffer into a PrecomputedSkeleton object .
694
13
28,019
def equivalent ( kls , first , second ) : if first . empty ( ) and second . empty ( ) : return True elif first . vertices . shape [ 0 ] != second . vertices . shape [ 0 ] : return False elif first . edges . shape [ 0 ] != second . edges . shape [ 0 ] : return False EPSILON = 1e-7 vertex1 , inv1 = np . unique ( first . vertices , axis = 0 , return_inverse = True ) vertex2 , inv2 = np . unique ( second . vertices , axis = 0 , return_inverse = True ) vertex_match = np . all ( np . abs ( vertex1 - vertex2 ) < EPSILON ) if not vertex_match : return False remapping = { } for i in range ( len ( inv1 ) ) : remapping [ inv1 [ i ] ] = inv2 [ i ] remap = np . vectorize ( lambda idx : remapping [ idx ] ) edges1 = np . sort ( np . unique ( first . edges , axis = 0 ) , axis = 1 ) edges1 = edges1 [ np . lexsort ( edges1 [ : , : : - 1 ] . T ) ] edges2 = remap ( second . edges ) edges2 = np . sort ( np . unique ( edges2 , axis = 0 ) , axis = 1 ) edges2 = edges2 [ np . lexsort ( edges2 [ : , : : - 1 ] . T ) ] edges_match = np . all ( edges1 == edges2 ) if not edges_match : return False second_verts = { } for i , vert in enumerate ( second . vertices ) : second_verts [ tuple ( vert ) ] = i for i in range ( len ( first . radii ) ) : i2 = second_verts [ tuple ( first . vertices [ i ] ) ] if first . radii [ i ] != second . radii [ i2 ] : return False if first . vertex_types [ i ] != second . vertex_types [ i2 ] : return False return True
Tests that two skeletons are the same in form not merely that their array contents are exactly the same . This test can be made more sophisticated .
450
29
28,020
def crop ( self , bbox ) : skeleton = self . clone ( ) bbox = Bbox . create ( bbox ) if skeleton . empty ( ) : return skeleton nodes_valid_mask = np . array ( [ bbox . contains ( vtx ) for vtx in skeleton . vertices ] , dtype = np . bool ) nodes_valid_idx = np . where ( nodes_valid_mask ) [ 0 ] # Set invalid vertices to be duplicates # so they'll be removed during consolidation if nodes_valid_idx . shape [ 0 ] == 0 : return PrecomputedSkeleton ( ) first_node = nodes_valid_idx [ 0 ] skeleton . vertices [ ~ nodes_valid_mask ] = skeleton . vertices [ first_node ] edges_valid_mask = np . isin ( skeleton . edges , nodes_valid_idx ) edges_valid_idx = edges_valid_mask [ : , 0 ] * edges_valid_mask [ : , 1 ] skeleton . edges = skeleton . edges [ edges_valid_idx , : ] return skeleton . consolidate ( )
Crop away all vertices and edges that lie outside of the given bbox . The edge counts as inside .
240
23
28,021
def consolidate ( self ) : nodes = self . vertices edges = self . edges radii = self . radii vertex_types = self . vertex_types if self . empty ( ) : return PrecomputedSkeleton ( ) eff_nodes , uniq_idx , idx_representative = np . unique ( nodes , axis = 0 , return_index = True , return_inverse = True ) edge_vector_map = np . vectorize ( lambda x : idx_representative [ x ] ) eff_edges = edge_vector_map ( edges ) eff_edges = np . sort ( eff_edges , axis = 1 ) # sort each edge [2,1] => [1,2] eff_edges = eff_edges [ np . lexsort ( eff_edges [ : , : : - 1 ] . T ) ] # Sort rows eff_edges = np . unique ( eff_edges , axis = 0 ) eff_edges = eff_edges [ eff_edges [ : , 0 ] != eff_edges [ : , 1 ] ] # remove trivial loops radii_vector_map = np . vectorize ( lambda idx : radii [ idx ] ) eff_radii = radii_vector_map ( uniq_idx ) vertex_type_map = np . vectorize ( lambda idx : vertex_types [ idx ] ) eff_vtype = vertex_type_map ( uniq_idx ) return PrecomputedSkeleton ( eff_nodes , eff_edges , eff_radii , eff_vtype , segid = self . id )
Remove duplicate vertices and edges from this skeleton without side effects .
359
13
28,022
def downsample ( self , factor ) : if int ( factor ) != factor or factor < 1 : raise ValueError ( "Argument `factor` must be a positive integer greater than or equal to 1. Got: <{}>({})" , type ( factor ) , factor ) paths = self . interjoint_paths ( ) for i , path in enumerate ( paths ) : paths [ i ] = np . concatenate ( ( path [ 0 : : factor , : ] , path [ - 1 : , : ] ) # preserve endpoints ) ds_skel = PrecomputedSkeleton . simple_merge ( [ PrecomputedSkeleton . from_path ( path ) for path in paths ] ) . consolidate ( ) ds_skel . id = self . id # TODO: I'm sure this could be sped up if need be. index = { } for i , vert in enumerate ( self . vertices ) : vert = tuple ( vert ) index [ vert ] = i for i , vert in enumerate ( ds_skel . vertices ) : vert = tuple ( vert ) ds_skel . radii [ i ] = self . radii [ index [ vert ] ] ds_skel . vertex_types [ i ] = self . vertex_types [ index [ vert ] ] return ds_skel
Compute a downsampled version of the skeleton by striding while preserving endpoints .
292
18
28,023
def _single_tree_paths ( self , tree ) : skel = tree . consolidate ( ) tree = defaultdict ( list ) for edge in skel . edges : svert = edge [ 0 ] evert = edge [ 1 ] tree [ svert ] . append ( evert ) tree [ evert ] . append ( svert ) def dfs ( path , visited ) : paths = [ ] stack = [ ( path , visited ) ] while stack : path , visited = stack . pop ( 0 ) vertex = path [ - 1 ] children = tree [ vertex ] visited [ vertex ] = True children = [ child for child in children if not visited [ child ] ] if len ( children ) == 0 : paths . append ( path ) for child in children : stack . append ( ( path + [ child ] , copy . deepcopy ( visited ) ) ) return paths root = skel . edges [ 0 , 0 ] paths = dfs ( [ root ] , defaultdict ( bool ) ) root = np . argmax ( [ len ( _ ) for _ in paths ] ) root = paths [ root ] [ - 1 ] paths = dfs ( [ root ] , defaultdict ( bool ) ) return [ np . flip ( skel . vertices [ path ] , axis = 0 ) for path in paths ]
Get all traversal paths from a single tree .
276
10
28,024
def paths ( self ) : paths = [ ] for tree in self . components ( ) : paths += self . _single_tree_paths ( tree ) return paths
Assuming the skeleton is structured as a single tree return a list of all traversal paths across all components . For each component start from the first vertex find the most distant vertex by hops and set that as the root . Then use depth first traversal to produce paths .
35
53
28,025
def interjoint_paths ( self ) : paths = [ ] for tree in self . components ( ) : subpaths = self . _single_tree_interjoint_paths ( tree ) paths . extend ( subpaths ) return paths
Returns paths between the adjacent critical points in the skeleton where a critical point is the set of terminal and branch points .
54
23
28,026
def components ( self ) : skel , forest = self . _compute_components ( ) if len ( forest ) == 0 : return [ ] elif len ( forest ) == 1 : return [ skel ] orig_verts = { tuple ( coord ) : i for i , coord in enumerate ( skel . vertices ) } skeletons = [ ] for edge_list in forest : edge_list = np . array ( edge_list , dtype = np . uint32 ) edge_list = np . unique ( edge_list , axis = 0 ) vert_idx = np . unique ( edge_list . flatten ( ) ) vert_list = skel . vertices [ vert_idx ] radii = skel . radii [ vert_idx ] vtypes = skel . vertex_types [ vert_idx ] new_verts = { orig_verts [ tuple ( coord ) ] : i for i , coord in enumerate ( vert_list ) } edge_vector_map = np . vectorize ( lambda x : new_verts [ x ] ) edge_list = edge_vector_map ( edge_list ) skeletons . append ( PrecomputedSkeleton ( vert_list , edge_list , radii , vtypes , skel . id ) ) return skeletons
Extract connected components from graph . Useful for ensuring that you re working with a single tree .
277
19
28,027
def get ( self , segids ) : list_return = True if type ( segids ) in ( int , float ) : list_return = False segids = [ int ( segids ) ] paths = [ os . path . join ( self . path , str ( segid ) ) for segid in segids ] StorageClass = Storage if len ( segids ) > 1 else SimpleStorage with StorageClass ( self . vol . layer_cloudpath , progress = self . vol . progress ) as stor : results = stor . get_files ( paths ) for res in results : if res [ 'error' ] is not None : raise res [ 'error' ] missing = [ res [ 'filename' ] for res in results if res [ 'content' ] is None ] if len ( missing ) : raise SkeletonDecodeError ( "File(s) do not exist: {}" . format ( ", " . join ( missing ) ) ) skeletons = [ ] for res in results : segid = int ( os . path . basename ( res [ 'filename' ] ) ) try : skel = PrecomputedSkeleton . decode ( res [ 'content' ] , segid = segid ) except Exception as err : raise SkeletonDecodeError ( "segid " + str ( segid ) + ": " + err . message ) skeletons . append ( skel ) if list_return : return skeletons return skeletons [ 0 ]
Retrieve one or more skeletons from the data layer .
319
11
28,028
def put ( self , fn ) : self . _inserted += 1 self . _queue . put ( fn , block = True ) return self
Enqueue a task function for processing .
30
8
28,029
def start_threads ( self , n_threads ) : if n_threads == len ( self . _threads ) : return self # Terminate all previous tasks with the existing # event object, then create a new one for the next # generation of threads. The old object will hang # around in memory until the threads actually terminate # after another iteration. self . _terminate . set ( ) self . _terminate = threading . Event ( ) threads = [ ] for _ in range ( n_threads ) : worker = threading . Thread ( target = self . _consume_queue , args = ( self . _terminate , ) ) worker . daemon = True worker . start ( ) threads . append ( worker ) self . _threads = tuple ( threads ) return self
Terminate existing threads and create a new set if the thread number doesn t match the desired number .
168
20
28,030
def kill_threads ( self ) : self . _terminate . set ( ) while self . are_threads_alive ( ) : time . sleep ( 0.001 ) self . _threads = ( ) return self
Kill all threads .
49
4
28,031
def wait ( self , progress = None ) : if not len ( self . _threads ) : return self desc = None if type ( progress ) is str : desc = progress last = self . _inserted with tqdm ( total = self . _inserted , disable = ( not progress ) , desc = desc ) as pbar : # Allow queue to consume, but check up on # progress and errors every tenth of a second while not self . _queue . empty ( ) : size = self . _queue . qsize ( ) delta = last - size if delta != 0 : # We should crash on negative numbers pbar . update ( delta ) last = size self . _check_errors ( ) time . sleep ( 0.1 ) # Wait until all tasks in the queue are # fully processed. queue.task_done must be # called for each task. self . _queue . join ( ) self . _check_errors ( ) final = self . _inserted - last if final : pbar . update ( final ) if self . _queue . empty ( ) : self . _inserted = 0 return self
Allow background threads to process until the task queue is empty . If there are no threads in theory the queue should always be empty as processing happens immediately on the main thread .
236
34
28,032
def _radix_sort ( L , i = 0 ) : if len ( L ) <= 1 : return L done_bucket = [ ] buckets = [ [ ] for x in range ( 255 ) ] for s in L : if i >= len ( s ) : done_bucket . append ( s ) else : buckets [ ord ( s [ i ] ) ] . append ( s ) buckets = [ _radix_sort ( b , i + 1 ) for b in buckets ] return done_bucket + [ b for blist in buckets for b in blist ]
Most significant char radix sort
122
6
28,033
def files_exist ( self , file_paths ) : results = { } def exist_thunk ( paths , interface ) : results . update ( interface . files_exist ( paths ) ) if len ( self . _threads ) : for block in scatter ( file_paths , len ( self . _threads ) ) : self . put ( partial ( exist_thunk , block ) ) else : exist_thunk ( file_paths , self . _interface ) desc = 'Existence Testing' if self . progress else None self . wait ( desc ) return results
Threaded exists for all file paths .
123
8
28,034
def get_files ( self , file_paths ) : results = [ ] def get_file_thunk ( path , interface ) : result = error = None try : result = interface . get_file ( path ) except Exception as err : error = err # important to print immediately because # errors are collected at the end print ( err ) content , encoding = result content = compression . decompress ( content , encoding ) results . append ( { "filename" : path , "content" : content , "error" : error , } ) for path in file_paths : if len ( self . _threads ) : self . put ( partial ( get_file_thunk , path ) ) else : get_file_thunk ( path , self . _interface ) desc = 'Downloading' if self . progress else None self . wait ( desc ) return results
returns a list of files faster by using threads
183
10
28,035
def get_file ( self , file_path ) : try : resp = self . _conn . get_object ( Bucket = self . _path . bucket , Key = self . get_path_to_file ( file_path ) , ) encoding = '' if 'ContentEncoding' in resp : encoding = resp [ 'ContentEncoding' ] return resp [ 'Body' ] . read ( ) , encoding except botocore . exceptions . ClientError as err : if err . response [ 'Error' ] [ 'Code' ] == 'NoSuchKey' : return None , None else : raise
There are many types of execptions which can get raised from this method . We want to make sure we only return None when the file doesn t exist .
127
31
28,036
def init_submodules ( self , cache ) : self . cache = CacheService ( cache , weakref . proxy ( self ) ) self . mesh = PrecomputedMeshService ( weakref . proxy ( self ) ) self . skeleton = PrecomputedSkeletonService ( weakref . proxy ( self ) )
cache = path or bool
65
5
28,037
def create_new_info ( cls , num_channels , layer_type , data_type , encoding , resolution , voxel_offset , volume_size , mesh = None , skeletons = None , chunk_size = ( 64 , 64 , 64 ) , compressed_segmentation_block_size = ( 8 , 8 , 8 ) , max_mip = 0 , factor = Vec ( 2 , 2 , 1 ) ) : if not isinstance ( factor , Vec ) : factor = Vec ( * factor ) if not isinstance ( data_type , str ) : data_type = np . dtype ( data_type ) . name info = { "num_channels" : int ( num_channels ) , "type" : layer_type , "data_type" : data_type , "scales" : [ { "encoding" : encoding , "chunk_sizes" : [ chunk_size ] , "key" : "_" . join ( map ( str , resolution ) ) , "resolution" : list ( map ( int , resolution ) ) , "voxel_offset" : list ( map ( int , voxel_offset ) ) , "size" : list ( map ( int , volume_size ) ) , } ] , } fullres = info [ 'scales' ] [ 0 ] factor_in_mip = factor . clone ( ) # add mip levels for _ in range ( max_mip ) : new_resolution = list ( map ( int , Vec ( * fullres [ 'resolution' ] ) * factor_in_mip ) ) newscale = { u"encoding" : encoding , u"chunk_sizes" : [ list ( map ( int , chunk_size ) ) ] , u"key" : "_" . join ( map ( str , new_resolution ) ) , u"resolution" : new_resolution , u"voxel_offset" : downscale ( fullres [ 'voxel_offset' ] , factor_in_mip , np . floor ) , u"size" : downscale ( fullres [ 'size' ] , factor_in_mip , np . ceil ) , } info [ 'scales' ] . append ( newscale ) factor_in_mip *= factor if encoding == 'compressed_segmentation' : info [ 'scales' ] [ 0 ] [ 'compressed_segmentation_block_size' ] = list ( map ( int , compressed_segmentation_block_size ) ) if mesh : info [ 'mesh' ] = 'mesh' if not isinstance ( mesh , string_types ) else mesh if skeletons : info [ 'skeletons' ] = 'skeletons' if not isinstance ( skeletons , string_types ) else skeletons return info
Used for creating new neuroglancer info files .
614
10
28,038
def bbox_to_mip ( self , bbox , mip , to_mip ) : if not type ( bbox ) is Bbox : bbox = lib . generate_slices ( bbox , self . mip_bounds ( mip ) . minpt , self . mip_bounds ( mip ) . maxpt , bounded = False ) bbox = Bbox . from_slices ( bbox ) def one_level ( bbox , mip , to_mip ) : original_dtype = bbox . dtype # setting type required for Python2 downsample_ratio = self . mip_resolution ( mip ) . astype ( np . float32 ) / self . mip_resolution ( to_mip ) . astype ( np . float32 ) bbox = bbox . astype ( np . float64 ) bbox *= downsample_ratio bbox . minpt = np . floor ( bbox . minpt ) bbox . maxpt = np . ceil ( bbox . maxpt ) bbox = bbox . astype ( original_dtype ) return bbox delta = 1 if to_mip >= mip else - 1 while ( mip != to_mip ) : bbox = one_level ( bbox , mip , mip + delta ) mip += delta return bbox
Convert bbox or slices from one mip level to another .
301
14
28,039
def slices_to_global_coords ( self , slices ) : bbox = self . bbox_to_mip ( slices , self . mip , 0 ) return bbox . to_slices ( )
Used to convert from a higher mip level into mip 0 resolution .
48
15
28,040
def slices_from_global_coords ( self , slices ) : bbox = self . bbox_to_mip ( slices , 0 , self . mip ) return bbox . to_slices ( )
Used for converting from mip 0 coordinates to upper mip level coordinates . This is mainly useful for debugging since the neuroglancer client displays the mip 0 coordinates for your cursor .
48
37
28,041
def __realized_bbox ( self , requested_bbox ) : realized_bbox = requested_bbox . expand_to_chunk_size ( self . underlying , offset = self . voxel_offset ) return Bbox . clamp ( realized_bbox , self . bounds )
The requested bbox might not be aligned to the underlying chunk grid or even outside the bounds of the dataset . Convert the request into a bbox representing something that can be actually downloaded .
64
37
28,042
def exists ( self , bbox_or_slices ) : if type ( bbox_or_slices ) is Bbox : requested_bbox = bbox_or_slices else : ( requested_bbox , _ , _ ) = self . __interpret_slices ( bbox_or_slices ) realized_bbox = self . __realized_bbox ( requested_bbox ) cloudpaths = txrx . chunknames ( realized_bbox , self . bounds , self . key , self . underlying ) cloudpaths = list ( cloudpaths ) with Storage ( self . layer_cloudpath , progress = self . progress ) as storage : existence_report = storage . files_exist ( cloudpaths ) return existence_report
Produce a summary of whether all the requested chunks exist .
168
12
28,043
def delete ( self , bbox_or_slices ) : if type ( bbox_or_slices ) is Bbox : requested_bbox = bbox_or_slices else : ( requested_bbox , _ , _ ) = self . __interpret_slices ( bbox_or_slices ) realized_bbox = self . __realized_bbox ( requested_bbox ) if requested_bbox != realized_bbox : raise exceptions . AlignmentError ( "Unable to delete non-chunk aligned bounding boxes. Requested: {}, Realized: {}" . format ( requested_bbox , realized_bbox ) ) cloudpaths = txrx . chunknames ( realized_bbox , self . bounds , self . key , self . underlying ) cloudpaths = list ( cloudpaths ) with Storage ( self . layer_cloudpath , progress = self . progress ) as storage : storage . delete_files ( cloudpaths ) if self . cache . enabled : with Storage ( 'file://' + self . cache . path , progress = self . progress ) as storage : storage . delete_files ( cloudpaths )
Delete the files within the bounding box .
256
9
28,044
def transfer_to ( self , cloudpath , bbox , block_size = None , compress = True ) : if type ( bbox ) is Bbox : requested_bbox = bbox else : ( requested_bbox , _ , _ ) = self . __interpret_slices ( bbox ) realized_bbox = self . __realized_bbox ( requested_bbox ) if requested_bbox != realized_bbox : raise exceptions . AlignmentError ( "Unable to transfer non-chunk aligned bounding boxes. Requested: {}, Realized: {}" . format ( requested_bbox , realized_bbox ) ) default_block_size_MB = 50 # MB chunk_MB = self . underlying . rectVolume ( ) * np . dtype ( self . dtype ) . itemsize * self . num_channels if self . layer_type == 'image' : # kind of an average guess for some EM datasets, have seen up to 1.9x and as low as 1.1 # affinites are also images, but have very different compression ratios. e.g. 3x for kempressed chunk_MB /= 1.3 else : # segmentation chunk_MB /= 100.0 # compression ratios between 80 and 800.... chunk_MB /= 1024.0 * 1024.0 if block_size : step = block_size else : step = int ( default_block_size_MB // chunk_MB ) + 1 try : destvol = CloudVolume ( cloudpath , mip = self . mip ) except exceptions . InfoUnavailableError : destvol = CloudVolume ( cloudpath , mip = self . mip , info = self . info , provenance = self . provenance . serialize ( ) ) destvol . commit_info ( ) destvol . commit_provenance ( ) except exceptions . ScaleUnavailableError : destvol = CloudVolume ( cloudpath ) for i in range ( len ( destvol . scales ) + 1 , len ( self . scales ) ) : destvol . scales . append ( self . scales [ i ] ) destvol . commit_info ( ) destvol . commit_provenance ( ) num_blocks = np . ceil ( self . bounds . volume ( ) / self . underlying . rectVolume ( ) ) / step num_blocks = int ( np . ceil ( num_blocks ) ) cloudpaths = txrx . chunknames ( realized_bbox , self . bounds , self . key , self . underlying ) pbar = tqdm ( desc = 'Transferring Blocks of {} Chunks' . format ( step ) , unit = 'blocks' , disable = ( not self . progress ) , total = num_blocks , ) with pbar : with Storage ( self . layer_cloudpath ) as src_stor : with Storage ( cloudpath ) as dest_stor : for _ in range ( num_blocks , 0 , - 1 ) : srcpaths = list ( itertools . islice ( cloudpaths , step ) ) files = src_stor . get_files ( srcpaths ) files = [ ( f [ 'filename' ] , f [ 'content' ] ) for f in files ] dest_stor . put_files ( files = files , compress = compress , content_type = txrx . content_type ( destvol ) , ) pbar . update ( )
Transfer files from one storage location to another bypassing volume painting . This enables using a single CloudVolume instance to transfer big volumes . In some cases gsutil or aws s3 cli tools may be more appropriate . This method is provided for convenience . It may be optimized for better performance over time as demand requires .
731
65
28,045
def download_point ( self , pt , size = 256 , mip = None ) : if isinstance ( size , int ) : size = Vec ( size , size , size ) else : size = Vec ( * size ) if mip is None : mip = self . mip size2 = size // 2 pt = self . point_to_mip ( pt , mip = 0 , to_mip = mip ) bbox = Bbox ( pt - size2 , pt + size2 ) saved_mip = self . mip self . mip = mip img = self [ bbox ] self . mip = saved_mip return img
Download to the right of point given in mip 0 coords . Useful for quickly visualizing a neuroglancer coordinate at an arbitary mip level .
142
32
28,046
def download_to_shared_memory ( self , slices , location = None ) : if self . path . protocol == 'boss' : raise NotImplementedError ( 'BOSS protocol does not support shared memory download.' ) if type ( slices ) == Bbox : slices = slices . to_slices ( ) ( requested_bbox , steps , channel_slice ) = self . __interpret_slices ( slices ) if self . autocrop : requested_bbox = Bbox . intersection ( requested_bbox , self . bounds ) location = location or self . shared_memory_id return txrx . cutout ( self , requested_bbox , steps , channel_slice , parallel = self . parallel , shared_memory_location = location , output_to_shared_memory = True )
Download images to a shared memory array .
173
8
28,047
def get ( self , segids , remove_duplicate_vertices = True , fuse = True , chunk_size = None ) : segids = toiter ( segids ) dne = self . _check_missing_manifests ( segids ) if dne : missing = ', ' . join ( [ str ( segid ) for segid in dne ] ) raise ValueError ( red ( 'Segment ID(s) {} are missing corresponding mesh manifests.\nAborted.' . format ( missing ) ) ) fragments = self . _get_manifests ( segids ) fragments = fragments . values ( ) fragments = list ( itertools . chain . from_iterable ( fragments ) ) # flatten fragments = self . _get_mesh_fragments ( fragments ) fragments = sorted ( fragments , key = lambda frag : frag [ 'filename' ] ) # make decoding deterministic # decode all the fragments meshdata = defaultdict ( list ) for frag in tqdm ( fragments , disable = ( not self . vol . progress ) , desc = "Decoding Mesh Buffer" ) : segid = filename_to_segid ( frag [ 'filename' ] ) mesh = decode_mesh_buffer ( frag [ 'filename' ] , frag [ 'content' ] ) meshdata [ segid ] . append ( mesh ) def produce_output ( mdata ) : vertexct = np . zeros ( len ( mdata ) + 1 , np . uint32 ) vertexct [ 1 : ] = np . cumsum ( [ x [ 'num_vertices' ] for x in mdata ] ) vertices = np . concatenate ( [ x [ 'vertices' ] for x in mdata ] ) faces = np . concatenate ( [ mesh [ 'faces' ] + vertexct [ i ] for i , mesh in enumerate ( mdata ) ] ) if remove_duplicate_vertices : if chunk_size : vertices , faces = remove_duplicate_vertices_cross_chunks ( vertices , faces , chunk_size ) else : vertices , faces = np . unique ( vertices [ faces ] , return_inverse = True , axis = 0 ) faces = faces . astype ( np . uint32 ) return { 'num_vertices' : len ( vertices ) , 'vertices' : vertices , 'faces' : faces , } if fuse : meshdata = [ ( segid , mdata ) for segid , mdata in six . iteritems ( meshdata ) ] meshdata = sorted ( meshdata , key = lambda sm : sm [ 0 ] ) meshdata = [ mdata for segid , mdata in meshdata ] meshdata = list ( itertools . chain . from_iterable ( meshdata ) ) # flatten return produce_output ( meshdata ) else : return { segid : produce_output ( mdata ) for segid , mdata in six . iteritems ( meshdata ) }
Merge fragments derived from these segids into a single vertex and face list .
659
17
28,048
def _check_missing_manifests ( self , segids ) : manifest_paths = [ self . _manifest_path ( segid ) for segid in segids ] with Storage ( self . vol . layer_cloudpath , progress = self . vol . progress ) as stor : exists = stor . files_exist ( manifest_paths ) dne = [ ] for path , there in exists . items ( ) : if not there : ( segid , ) = re . search ( r'(\d+):0$' , path ) . groups ( ) dne . append ( segid ) return dne
Check if there are any missing mesh manifests prior to downloading .
141
12
28,049
def save ( self , segids , filepath = None , file_format = 'ply' ) : if type ( segids ) != list : segids = [ segids ] meshdata = self . get ( segids ) if not filepath : filepath = str ( segids [ 0 ] ) + "." + file_format if len ( segids ) > 1 : filepath = "{}_{}.{}" . format ( segids [ 0 ] , segids [ - 1 ] , file_format ) if file_format == 'obj' : objdata = mesh_to_obj ( meshdata , progress = self . vol . progress ) objdata = '\n' . join ( objdata ) + '\n' data = objdata . encode ( 'utf8' ) elif file_format == 'ply' : data = mesh_to_ply ( meshdata ) else : raise NotImplementedError ( 'Only .obj and .ply is currently supported.' ) with open ( filepath , 'wb' ) as f : f . write ( data )
Save one or more segids into a common mesh format as a single file .
238
17
28,050
def pad_block ( block , block_size ) : unique_vals , unique_counts = np . unique ( block , return_counts = True ) most_frequent_value = unique_vals [ np . argmax ( unique_counts ) ] return np . pad ( block , tuple ( ( 0 , desired_size - actual_size ) for desired_size , actual_size in zip ( block_size , block . shape ) ) , mode = "constant" , constant_values = most_frequent_value )
Pad a block to block_size with its most frequent value
115
12
28,051
def find_closest_divisor ( to_divide , closest_to ) : def find_closest ( td , ct ) : min_distance = td best = td for divisor in divisors ( td ) : if abs ( divisor - ct ) < min_distance : min_distance = abs ( divisor - ct ) best = divisor return best return [ find_closest ( td , ct ) for td , ct in zip ( to_divide , closest_to ) ]
This is used to find the right chunk size for importing a neuroglancer dataset that has a chunk import size that s not evenly divisible by 64 64 64 .
120
33
28,052
def divisors ( n ) : for i in range ( 1 , int ( math . sqrt ( n ) + 1 ) ) : if n % i == 0 : yield i if i * i != n : yield n / i
Generate the divisors of n
49
8
28,053
def expand_to_chunk_size ( self , chunk_size , offset = Vec ( 0 , 0 , 0 , dtype = int ) ) : chunk_size = np . array ( chunk_size , dtype = np . float32 ) result = self . clone ( ) result = result - offset result . minpt = np . floor ( result . minpt / chunk_size ) * chunk_size result . maxpt = np . ceil ( result . maxpt / chunk_size ) * chunk_size return ( result + offset ) . astype ( self . dtype )
Align a potentially non - axis aligned bbox to the grid by growing it to the nearest grid lines .
125
22
28,054
def round_to_chunk_size ( self , chunk_size , offset = Vec ( 0 , 0 , 0 , dtype = int ) ) : chunk_size = np . array ( chunk_size , dtype = np . float32 ) result = self . clone ( ) result = result - offset result . minpt = np . round ( result . minpt / chunk_size ) * chunk_size result . maxpt = np . round ( result . maxpt / chunk_size ) * chunk_size return ( result + offset ) . astype ( self . dtype )
Align a potentially non - axis aligned bbox to the grid by rounding it to the nearest grid lines .
124
22
28,055
def contains ( self , point ) : return ( point [ 0 ] >= self . minpt [ 0 ] and point [ 1 ] >= self . minpt [ 1 ] and point [ 2 ] >= self . minpt [ 2 ] and point [ 0 ] <= self . maxpt [ 0 ] and point [ 1 ] <= self . maxpt [ 1 ] and point [ 2 ] <= self . maxpt [ 2 ] )
Tests if a point on or within a bounding box .
88
13
28,056
def display ( self , display ) : if display is None : raise ValueError ( "Invalid value for `display`, must not be `None`" ) # noqa: E501 allowed_values = [ "BANNER" , "TOASTER" ] # noqa: E501 if display not in allowed_values : raise ValueError ( "Invalid value for `display` ({0}), must be one of {1}" # noqa: E501 . format ( display , allowed_values ) ) self . _display = display
Sets the display of this Message .
113
8
28,057
def scope ( self , scope ) : if scope is None : raise ValueError ( "Invalid value for `scope`, must not be `None`" ) # noqa: E501 allowed_values = [ "CLUSTER" , "CUSTOMER" , "USER" ] # noqa: E501 if scope not in allowed_values : raise ValueError ( "Invalid value for `scope` ({0}), must be one of {1}" # noqa: E501 . format ( scope , allowed_values ) ) self . _scope = scope
Sets the scope of this Message .
118
8
28,058
def severity ( self , severity ) : if severity is None : raise ValueError ( "Invalid value for `severity`, must not be `None`" ) # noqa: E501 allowed_values = [ "MARKETING" , "INFO" , "WARN" , "SEVERE" ] # noqa: E501 if severity not in allowed_values : raise ValueError ( "Invalid value for `severity` ({0}), must be one of {1}" # noqa: E501 . format ( severity , allowed_values ) ) self . _severity = severity
Sets the severity of this Message .
125
8
28,059
def facet_query_matching_method ( self , facet_query_matching_method ) : allowed_values = [ "CONTAINS" , "STARTSWITH" , "EXACT" , "TAGPATH" ] # noqa: E501 if facet_query_matching_method not in allowed_values : raise ValueError ( "Invalid value for `facet_query_matching_method` ({0}), must be one of {1}" # noqa: E501 . format ( facet_query_matching_method , allowed_values ) ) self . _facet_query_matching_method = facet_query_matching_method
Sets the facet_query_matching_method of this FacetSearchRequestContainer .
145
19
28,060
def running_state ( self , running_state ) : allowed_values = [ "ONGOING" , "PENDING" , "ENDED" ] # noqa: E501 if running_state not in allowed_values : raise ValueError ( "Invalid value for `running_state` ({0}), must be one of {1}" # noqa: E501 . format ( running_state , allowed_values ) ) self . _running_state = running_state
Sets the running_state of this MaintenanceWindow .
101
11
28,061
def dynamic_field_type ( self , dynamic_field_type ) : allowed_values = [ "SOURCE" , "SOURCE_TAG" , "METRIC_NAME" , "TAG_KEY" , "MATCHING_SOURCE_TAG" ] # noqa: E501 if dynamic_field_type not in allowed_values : raise ValueError ( "Invalid value for `dynamic_field_type` ({0}), must be one of {1}" # noqa: E501 . format ( dynamic_field_type , allowed_values ) ) self . _dynamic_field_type = dynamic_field_type
Sets the dynamic_field_type of this DashboardParameterValue .
134
15
28,062
def parameter_type ( self , parameter_type ) : allowed_values = [ "SIMPLE" , "LIST" , "DYNAMIC" ] # noqa: E501 if parameter_type not in allowed_values : raise ValueError ( "Invalid value for `parameter_type` ({0}), must be one of {1}" # noqa: E501 . format ( parameter_type , allowed_values ) ) self . _parameter_type = parameter_type
Sets the parameter_type of this DashboardParameterValue .
103
13
28,063
def fixed_legend_filter_field ( self , fixed_legend_filter_field ) : allowed_values = [ "CURRENT" , "MEAN" , "MEDIAN" , "SUM" , "MIN" , "MAX" , "COUNT" ] # noqa: E501 if fixed_legend_filter_field not in allowed_values : raise ValueError ( "Invalid value for `fixed_legend_filter_field` ({0}), must be one of {1}" # noqa: E501 . format ( fixed_legend_filter_field , allowed_values ) ) self . _fixed_legend_filter_field = fixed_legend_filter_field
Sets the fixed_legend_filter_field of this ChartSettings .
153
16
28,064
def fixed_legend_filter_sort ( self , fixed_legend_filter_sort ) : allowed_values = [ "TOP" , "BOTTOM" ] # noqa: E501 if fixed_legend_filter_sort not in allowed_values : raise ValueError ( "Invalid value for `fixed_legend_filter_sort` ({0}), must be one of {1}" # noqa: E501 . format ( fixed_legend_filter_sort , allowed_values ) ) self . _fixed_legend_filter_sort = fixed_legend_filter_sort
Sets the fixed_legend_filter_sort of this ChartSettings .
130
16
28,065
def fixed_legend_position ( self , fixed_legend_position ) : allowed_values = [ "RIGHT" , "TOP" , "LEFT" , "BOTTOM" ] # noqa: E501 if fixed_legend_position not in allowed_values : raise ValueError ( "Invalid value for `fixed_legend_position` ({0}), must be one of {1}" # noqa: E501 . format ( fixed_legend_position , allowed_values ) ) self . _fixed_legend_position = fixed_legend_position
Sets the fixed_legend_position of this ChartSettings .
126
14
28,066
def line_type ( self , line_type ) : allowed_values = [ "linear" , "step-before" , "step-after" , "basis" , "cardinal" , "monotone" ] # noqa: E501 if line_type not in allowed_values : raise ValueError ( "Invalid value for `line_type` ({0}), must be one of {1}" # noqa: E501 . format ( line_type , allowed_values ) ) self . _line_type = line_type
Sets the line_type of this ChartSettings .
117
11
28,067
def sparkline_display_horizontal_position ( self , sparkline_display_horizontal_position ) : allowed_values = [ "MIDDLE" , "LEFT" , "RIGHT" ] # noqa: E501 if sparkline_display_horizontal_position not in allowed_values : raise ValueError ( "Invalid value for `sparkline_display_horizontal_position` ({0}), must be one of {1}" # noqa: E501 . format ( sparkline_display_horizontal_position , allowed_values ) ) self . _sparkline_display_horizontal_position = sparkline_display_horizontal_position
Sets the sparkline_display_horizontal_position of this ChartSettings .
146
17
28,068
def sparkline_display_value_type ( self , sparkline_display_value_type ) : allowed_values = [ "VALUE" , "LABEL" ] # noqa: E501 if sparkline_display_value_type not in allowed_values : raise ValueError ( "Invalid value for `sparkline_display_value_type` ({0}), must be one of {1}" # noqa: E501 . format ( sparkline_display_value_type , allowed_values ) ) self . _sparkline_display_value_type = sparkline_display_value_type
Sets the sparkline_display_value_type of this ChartSettings .
131
16
28,069
def sparkline_size ( self , sparkline_size ) : allowed_values = [ "BACKGROUND" , "BOTTOM" , "NONE" ] # noqa: E501 if sparkline_size not in allowed_values : raise ValueError ( "Invalid value for `sparkline_size` ({0}), must be one of {1}" # noqa: E501 . format ( sparkline_size , allowed_values ) ) self . _sparkline_size = sparkline_size
Sets the sparkline_size of this ChartSettings .
110
12
28,070
def sparkline_value_color_map_apply_to ( self , sparkline_value_color_map_apply_to ) : allowed_values = [ "TEXT" , "BACKGROUND" ] # noqa: E501 if sparkline_value_color_map_apply_to not in allowed_values : raise ValueError ( "Invalid value for `sparkline_value_color_map_apply_to` ({0}), must be one of {1}" # noqa: E501 . format ( sparkline_value_color_map_apply_to , allowed_values ) ) self . _sparkline_value_color_map_apply_to = sparkline_value_color_map_apply_to
Sets the sparkline_value_color_map_apply_to of this ChartSettings .
159
20
28,071
def stack_type ( self , stack_type ) : allowed_values = [ "zero" , "expand" , "wiggle" , "silhouette" ] # noqa: E501 if stack_type not in allowed_values : raise ValueError ( "Invalid value for `stack_type` ({0}), must be one of {1}" # noqa: E501 . format ( stack_type , allowed_values ) ) self . _stack_type = stack_type
Sets the stack_type of this ChartSettings .
105
11
28,072
def tag_mode ( self , tag_mode ) : allowed_values = [ "all" , "top" , "custom" ] # noqa: E501 if tag_mode not in allowed_values : raise ValueError ( "Invalid value for `tag_mode` ({0}), must be one of {1}" # noqa: E501 . format ( tag_mode , allowed_values ) ) self . _tag_mode = tag_mode
Sets the tag_mode of this ChartSettings .
97
11
28,073
def windowing ( self , windowing ) : allowed_values = [ "full" , "last" ] # noqa: E501 if windowing not in allowed_values : raise ValueError ( "Invalid value for `windowing` ({0}), must be one of {1}" # noqa: E501 . format ( windowing , allowed_values ) ) self . _windowing = windowing
Sets the windowing of this ChartSettings .
86
10
28,074
def result ( self , result ) : if result is None : raise ValueError ( "Invalid value for `result`, must not be `None`" ) # noqa: E501 allowed_values = [ "OK" , "ERROR" ] # noqa: E501 if result not in allowed_values : raise ValueError ( "Invalid value for `result` ({0}), must be one of {1}" # noqa: E501 . format ( result , allowed_values ) ) self . _result = result
Sets the result of this ResponseStatus .
109
9
28,075
def matching_method ( self , matching_method ) : allowed_values = [ "CONTAINS" , "STARTSWITH" , "EXACT" , "TAGPATH" ] # noqa: E501 if matching_method not in allowed_values : raise ValueError ( "Invalid value for `matching_method` ({0}), must be one of {1}" # noqa: E501 . format ( matching_method , allowed_values ) ) self . _matching_method = matching_method
Sets the matching_method of this SearchQuery .
110
11
28,076
def alert_type ( self , alert_type ) : allowed_values = [ "CLASSIC" , "THRESHOLD" ] # noqa: E501 if alert_type not in allowed_values : raise ValueError ( "Invalid value for `alert_type` ({0}), must be one of {1}" # noqa: E501 . format ( alert_type , allowed_values ) ) self . _alert_type = alert_type
Sets the alert_type of this Alert .
97
10
28,077
def severity_list ( self , severity_list ) : allowed_values = [ "INFO" , "SMOKE" , "WARN" , "SEVERE" ] # noqa: E501 if not set ( severity_list ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `severity_list` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( severity_list ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _severity_list = severity_list
Sets the severity_list of this Alert .
160
10
28,078
def category_filter ( self , category_filter ) : allowed_values = [ "ADMINISTRATIVE" , "SERVICEHEALTH" , "ALERT" , "AUTOSCALE" , "SECURITY" ] # noqa: E501 if not set ( category_filter ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `category_filter` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( category_filter ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _category_filter = category_filter
Sets the category_filter of this AzureActivityLogConfiguration .
171
13
28,079
def entity_type ( self , entity_type ) : if entity_type is None : raise ValueError ( "Invalid value for `entity_type`, must not be `None`" ) # noqa: E501 allowed_values = [ "DASHBOARD" , "ALERT" , "MAINTENANCE_WINDOW" , "NOTIFICANT" , "EVENT" , "SOURCE" , "EXTERNAL_LINK" , "AGENT" , "CLOUD_INTEGRATION" , "APPLICATION" , "REGISTERED_QUERY" , "USER" , "USER_GROUP" ] # noqa: E501 if entity_type not in allowed_values : raise ValueError ( "Invalid value for `entity_type` ({0}), must be one of {1}" # noqa: E501 . format ( entity_type , allowed_values ) ) self . _entity_type = entity_type
Sets the entity_type of this SavedSearch .
206
12
28,080
def summarization ( self , summarization ) : allowed_values = [ "MEAN" , "MEDIAN" , "MIN" , "MAX" , "SUM" , "COUNT" , "LAST" , "FIRST" ] # noqa: E501 if summarization not in allowed_values : raise ValueError ( "Invalid value for `summarization` ({0}), must be one of {1}" # noqa: E501 . format ( summarization , allowed_values ) ) self . _summarization = summarization
Sets the summarization of this Chart .
118
9
28,081
def categories_to_fetch ( self , categories_to_fetch ) : allowed_values = [ "APPENGINE" , "BIGQUERY" , "BIGTABLE" , "CLOUDFUNCTIONS" , "CLOUDIOT" , "CLOUDSQL" , "CLOUDTASKS" , "COMPUTE" , "CONTAINER" , "DATAFLOW" , "DATAPROC" , "DATASTORE" , "FIREBASEDATABASE" , "FIREBASEHOSTING" , "INTERCONNECT" , "LOADBALANCING" , "LOGGING" , "ML" , "MONITORING" , "PUBSUB" , "REDIS" , "ROUTER" , "SERVICERUNTIME" , "SPANNER" , "STORAGE" , "TPU" , "VPN" ] # noqa: E501 if not set ( categories_to_fetch ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `categories_to_fetch` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( categories_to_fetch ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _categories_to_fetch = categories_to_fetch
Sets the categories_to_fetch of this GCPConfiguration .
342
15
28,082
def alert_statuses ( self , alert_statuses ) : if alert_statuses is None : raise ValueError ( "Invalid value for `alert_statuses`, must not be `None`" ) # noqa: E501 allowed_values = [ "VISIBLE" , "HIDDEN" , "NOT_LOADED" ] # noqa: E501 if not set ( alert_statuses . keys ( ) ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid keys in `alert_statuses` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( alert_statuses . keys ( ) ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _alert_statuses = alert_statuses
Sets the alert_statuses of this IntegrationStatus .
207
12
28,083
def content_status ( self , content_status ) : if content_status is None : raise ValueError ( "Invalid value for `content_status`, must not be `None`" ) # noqa: E501 allowed_values = [ "INVALID" , "NOT_LOADED" , "HIDDEN" , "VISIBLE" ] # noqa: E501 if content_status not in allowed_values : raise ValueError ( "Invalid value for `content_status` ({0}), must be one of {1}" # noqa: E501 . format ( content_status , allowed_values ) ) self . _content_status = content_status
Sets the content_status of this IntegrationStatus .
143
11
28,084
def install_status ( self , install_status ) : if install_status is None : raise ValueError ( "Invalid value for `install_status`, must not be `None`" ) # noqa: E501 allowed_values = [ "UNDECIDED" , "UNINSTALLED" , "INSTALLED" ] # noqa: E501 if install_status not in allowed_values : raise ValueError ( "Invalid value for `install_status` ({0}), must be one of {1}" # noqa: E501 . format ( install_status , allowed_values ) ) self . _install_status = install_status
Sets the install_status of this IntegrationStatus .
139
11
28,085
def event_filter_type ( self , event_filter_type ) : allowed_values = [ "BYCHART" , "AUTOMATIC" , "ALL" , "NONE" , "BYDASHBOARD" , "BYCHARTANDDASHBOARD" ] # noqa: E501 if event_filter_type not in allowed_values : raise ValueError ( "Invalid value for `event_filter_type` ({0}), must be one of {1}" # noqa: E501 . format ( event_filter_type , allowed_values ) ) self . _event_filter_type = event_filter_type
Sets the event_filter_type of this Dashboard .
140
13
28,086
def scatter_plot_source ( self , scatter_plot_source ) : allowed_values = [ "X" , "Y" ] # noqa: E501 if scatter_plot_source not in allowed_values : raise ValueError ( "Invalid value for `scatter_plot_source` ({0}), must be one of {1}" # noqa: E501 . format ( scatter_plot_source , allowed_values ) ) self . _scatter_plot_source = scatter_plot_source
Sets the scatter_plot_source of this ChartSourceQuery .
109
14
28,087
def content_type ( self , content_type ) : allowed_values = [ "application/json" , "text/html" , "text/plain" , "application/x-www-form-urlencoded" , "" ] # noqa: E501 if content_type not in allowed_values : raise ValueError ( "Invalid value for `content_type` ({0}), must be one of {1}" # noqa: E501 . format ( content_type , allowed_values ) ) self . _content_type = content_type
Sets the content_type of this Notificant .
119
11
28,088
def method ( self , method ) : if method is None : raise ValueError ( "Invalid value for `method`, must not be `None`" ) # noqa: E501 allowed_values = [ "WEBHOOK" , "EMAIL" , "PAGERDUTY" ] # noqa: E501 if method not in allowed_values : raise ValueError ( "Invalid value for `method` ({0}), must be one of {1}" # noqa: E501 . format ( method , allowed_values ) ) self . _method = method
Sets the method of this Notificant .
121
9
28,089
def triggers ( self , triggers ) : if triggers is None : raise ValueError ( "Invalid value for `triggers`, must not be `None`" ) # noqa: E501 allowed_values = [ "ALERT_OPENED" , "ALERT_UPDATED" , "ALERT_RESOLVED" , "ALERT_MAINTENANCE" , "ALERT_SNOOZED" , "ALERT_INVALID" , "ALERT_NO_LONGER_INVALID" , "ALERT_TESTING" , "ALERT_RETRIGGERED" , "ALERT_NO_DATA" , "ALERT_NO_DATA_RESOLVED" , "ALERT_NO_DATA_MAINTENANCE" , "ALERT_SERIES_SEVERITY_UPDATE" , "ALERT_SEVERITY_UPDATE" ] # noqa: E501 if not set ( triggers ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `triggers` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( triggers ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _triggers = triggers
Sets the triggers of this Notificant .
313
9
28,090
def creator_type ( self , creator_type ) : allowed_values = [ "USER" , "ALERT" , "SYSTEM" ] # noqa: E501 if not set ( creator_type ) . issubset ( set ( allowed_values ) ) : raise ValueError ( "Invalid values for `creator_type` [{0}], must be a subset of [{1}]" # noqa: E501 . format ( ", " . join ( map ( str , set ( creator_type ) - set ( allowed_values ) ) ) , # noqa: E501 ", " . join ( map ( str , allowed_values ) ) ) ) self . _creator_type = creator_type
Sets the creator_type of this Event .
152
10
28,091
def _integerValue_to_int ( value_str ) : m = BINARY_VALUE . match ( value_str ) if m : value = int ( m . group ( 1 ) , 2 ) elif OCTAL_VALUE . match ( value_str ) : value = int ( value_str , 8 ) elif DECIMAL_VALUE . match ( value_str ) : value = int ( value_str ) elif HEX_VALUE . match ( value_str ) : value = int ( value_str , 16 ) else : value = None return value
Convert a value string that conforms to DSP0004 integerValue into the corresponding integer and return it . The returned value has Python type int or in Python 2 type long if needed .
122
39
28,092
def _realValue_to_float ( value_str ) : if REAL_VALUE . match ( value_str ) : value = float ( value_str ) else : value = None return value
Convert a value string that conforms to DSP0004 realValue into the corresponding float and return it .
41
23
28,093
def parse_cmdline ( argparser_ ) : opts = argparser_ . parse_args ( ) if not opts . server : argparser_ . error ( 'No WBEM server specified' ) return None return opts
Parse the command line . This tests for any required args
49
12
28,094
def _statuscode2name ( status_code ) : try : s = _STATUSCODE2NAME [ status_code ] except KeyError : s = _format ( "Invalid status code {0}" , status_code ) return s
Return the symbolic name for a CIM status code .
51
11
28,095
def _statuscode2string ( status_code ) : try : s = _STATUSCODE2STRING [ status_code ] except KeyError : s = _format ( "Invalid status code {0}" , status_code ) return s
Return a short message for a CIM status code .
52
11
28,096
def build_mock_repository ( conn_ , file_path_list , verbose ) : for file_path in file_path_list : ext = _os . path . splitext ( file_path ) [ 1 ] if not _os . path . exists ( file_path ) : raise ValueError ( 'File name %s does not exist' % file_path ) if ext == '.mof' : conn_ . compile_mof_file ( file_path ) elif ext == '.py' : try : with open ( file_path ) as fp : # the exec includes CONN and VERBOSE globalparams = { 'CONN' : conn_ , 'VERBOSE' : verbose } # pylint: disable=exec-used exec ( fp . read ( ) , globalparams , None ) except Exception as ex : exc_type , exc_value , exc_traceback = _sys . exc_info ( ) tb = repr ( traceback . format_exception ( exc_type , exc_value , exc_traceback ) ) raise ValueError ( 'Exception failure of "--mock-server" python script %r ' 'with conn %r Exception: %r\nTraceback\n%s' % ( file_path , conn , ex , tb ) ) else : raise ValueError ( 'Invalid suffix %s on "--mock-server" ' 'global parameter %s. Must be "py" or "mof".' % ( ext , file_path ) ) if verbose : conn_ . display_repository ( )
Build the mock repository from the file_path list and fake connection instance . This allows both mof files and python files to be used to build the repository .
348
32
28,097
def _remote_connection ( server , opts , argparser_ ) : global CONN # pylint: disable=global-statement if opts . timeout is not None : if opts . timeout < 0 or opts . timeout > 300 : argparser_ . error ( 'timeout option(%s) out of range' % opts . timeout ) # mock only uses the namespace timeout and statistics options from the # original set of options. It ignores the url if opts . mock_server : CONN = FakedWBEMConnection ( default_namespace = opts . namespace , timeout = opts . timeout , stats_enabled = opts . statistics ) try : build_mock_repository ( CONN , opts . mock_server , opts . verbose ) except ValueError as ve : argparser_ . error ( 'Build Repository failed: %s' % ve ) return CONN if server [ 0 ] == '/' : url = server elif re . match ( r"^https{0,1}://" , server ) is not None : url = server elif re . match ( r"^[a-zA-Z0-9]+://" , server ) is not None : argparser_ . error ( 'Invalid scheme on server argument.' ' Use "http" or "https"' ) else : url = '%s://%s' % ( 'https' , server ) creds = None if opts . key_file is not None and opts . cert_file is None : argparser_ . error ( 'keyfile option requires certfile option' ) if opts . user is not None and opts . password is None : opts . password = _getpass . getpass ( 'Enter password for %s: ' % opts . user ) if opts . user is not None or opts . password is not None : creds = ( opts . user , opts . password ) # if client cert and key provided, create dictionary for # wbem connection x509_dict = None if opts . cert_file is not None : x509_dict = { "cert_file" : opts . cert_file } if opts . key_file is not None : x509_dict . update ( { 'key_file' : opts . key_file } ) CONN = WBEMConnection ( url , creds , default_namespace = opts . namespace , no_verification = opts . no_verify_cert , x509 = x509_dict , ca_certs = opts . ca_certs , timeout = opts . timeout , stats_enabled = opts . statistics ) CONN . debug = True return CONN
Initiate a remote connection via PyWBEM . Arguments for the request are part of the command line arguments and include user name password namespace etc .
586
31
28,098
def _get_connection_info ( ) : info = 'Connection: %s,' % CONN . url if CONN . creds is not None : info += ' userid=%s,' % CONN . creds [ 0 ] else : info += ' no creds,' info += ' cacerts=%s,' % ( 'sys-default' if CONN . ca_certs is None else CONN . ca_certs ) info += ' verifycert=%s,' % ( 'off' if CONN . no_verification else 'on' ) info += ' default-namespace=%s' % CONN . default_namespace if CONN . x509 is not None : info += ', client-cert=%s' % CONN . x509 [ 'cert_file' ] try : kf = CONN . x509 [ 'key_file' ] except KeyError : kf = "none" info += ":%s" % kf if CONN . timeout is not None : info += ', timeout=%s' % CONN . timeout # pylint: disable=protected-access info += ' stats=%s, ' % ( 'on' if CONN . _statistics else 'off' ) info += 'log=%s' % ( 'on' if CONN . _operation_recorders else 'off' ) if isinstance ( CONN , FakedWBEMConnection ) : info += ', mock-server' return fill ( info , 78 , subsequent_indent = ' ' )
Return a string with the connection info .
333
8
28,099
def _get_banner ( ) : result = '' result += '\nPython %s' % _sys . version result += '\n\nWbemcli interactive shell' result += '\n%s' % _get_connection_info ( ) # Give hint about exiting. Most people exit with 'quit()' which will # not return from the interact() method, and thus will not write # the history. if _sys . platform == 'win32' : result += '\nEnter Ctrl-Z or quit() or exit() to exit' else : result += '\nPress Ctrl-D or enter quit() or exit() to exit' result += '\nEnter h() for help' return result
Return a banner message for the interactive console .
154
9