idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
19,700
def net ( self ) : try : return NetData ( self . inspect ( cached = True ) . response ) except docker . errors . NotFound : raise NotAvailableAnymore ( )
get ACTIVE port mappings of a container
19,701
def top ( self ) : ps_args = "-eo pid,ppid,wchan,args" try : response = self . d . top ( self . container_id , ps_args = ps_args ) except docker . errors . APIError as ex : logger . warning ( "error getting processes: %r" , ex ) return [ ] logger . debug ( json . dumps ( response , indent = 2 ) ) return [ dict ( zip ( response [ "Titles" ] , process ) ) for process in response [ "Processes" ] or [ ] ]
list of processes in a running container
19,702
def filter ( self , containers = True , images = True , stopped = True , cached = False , sort_by_created = True ) : content = [ ] containers_o = None images_o = None if containers or not stopped : containers_o = self . get_containers ( cached = cached , stopped = stopped ) content += containers_o . response if images : images_o = self . get_images ( cached = cached ) content += images_o . response if sort_by_created : content . sort ( key = attrgetter ( "natural_sort_value" ) , reverse = True ) return content , containers_o , images_o
since django is so awesome let s use their ORM API
19,703
def column_widths ( self , size , focus = False ) : maxcol = size [ 0 ] self . _cache_maxcol = maxcol widths = [ width for i , ( w , ( t , width , b ) ) in enumerate ( self . contents ) ] self . _cache_column_widths = widths return widths
Return a list of column widths .
19,704
def query ( self , query_string = "" ) : def query_notify ( operation ) : w = get_operation_notify_widget ( operation , display_always = False ) if w : self . ui . notify_widget ( w ) if query_string is not None : self . filter_query = query_string . strip ( ) backend_query = { "cached" : False , "containers" : True , "images" : True , } def containers ( ) : backend_query [ "containers" ] = True backend_query [ "images" ] = not backend_query [ "images" ] backend_query [ "cached" ] = True def images ( ) : backend_query [ "containers" ] = not backend_query [ "containers" ] backend_query [ "images" ] = True backend_query [ "cached" ] = True def running ( ) : backend_query [ "stopped" ] = False backend_query [ "cached" ] = True backend_query [ "images" ] = False query_conf = [ { "query_keys" : [ "t" , "type" ] , "query_values" : [ "c" , "container" , "containers" ] , "callback" : containers } , { "query_keys" : [ "t" , "type" ] , "query_values" : [ "i" , "images" , "images" ] , "callback" : images } , { "query_keys" : [ "s" , "state" ] , "query_values" : [ "r" , "running" ] , "callback" : running } , ] query_list = re . split ( r"[\s,]" , self . filter_query ) unprocessed = [ ] for query_str in query_list : if not query_str : continue try : query_key , query_value = query_str . split ( "=" , 1 ) except ValueError : unprocessed . append ( query_str ) else : logger . debug ( "looking up query key %r and query value %r" , query_key , query_value ) for c in query_conf : if query_key in c [ "query_keys" ] and query_value in c [ "query_values" ] : c [ "callback" ] ( ) break else : raise NotifyError ( "Invalid query string: %r" , query_str ) widgets = [ ] logger . debug ( "doing query %s" , backend_query ) query , c_op , i_op = self . d . filter ( ** backend_query ) for o in query : try : line = MainLineWidget ( o ) except NotAvailableAnymore : continue widgets . append ( line ) if unprocessed : new_query = " " . join ( unprocessed ) logger . debug ( "doing parent query for unprocessed string: %r" , new_query ) super ( ) . filter ( new_query , widgets_to_filter = widgets ) else : self . set_body ( widgets ) self . ro_content = widgets query_notify ( i_op ) query_notify ( c_op )
query and display also apply filters
19,705
def get_command ( self , command_input , docker_object = None , buffer = None , size = None ) : logger . debug ( "get command for command input %r" , command_input ) if not command_input : return if command_input [ 0 ] in [ "/" ] : command_name = command_input [ 0 ] unparsed_command_args = shlex . split ( command_input [ 1 : ] ) else : command_input_list = shlex . split ( command_input ) command_name = command_input_list [ 0 ] unparsed_command_args = command_input_list [ 1 : ] try : CommandClass = commands_mapping [ command_name ] except KeyError : logger . info ( "no such command: %r" , command_name ) raise NoSuchCommand ( "There is no such command: %s" % command_name ) else : cmd = CommandClass ( ui = self . ui , docker_backend = self . docker_backend , docker_object = docker_object , buffer = buffer , size = size ) cmd . process_args ( unparsed_command_args ) return cmd
return command instance which is the actual command to be executed
19,706
def read ( self ) : val = self . _fd . read ( ) self . _fd . seek ( 0 ) return int ( val )
Read pin value
19,707
def getVersion ( ) : print ( 'epochs version:' , str ( CDFepoch . version ) + '.' + str ( CDFepoch . release ) + '.' + str ( CDFepoch . increment ) )
Shows the code version .
19,708
def getLeapSecondLastUpdated ( ) : print ( 'Leap second last updated:' , str ( CDFepoch . LTS [ - 1 ] [ 0 ] ) + '-' + str ( CDFepoch . LTS [ - 1 ] [ 1 ] ) + '-' + str ( CDFepoch . LTS [ - 1 ] [ 2 ] ) )
Shows the latest date a leap second was added to the leap second table .
19,709
def close ( self ) : if self . compressed_file is None : with self . path . open ( 'rb+' ) as f : f . seek ( 0 , 2 ) eof = f . tell ( ) self . _update_offset_value ( f , self . gdr_head + 36 , 8 , eof ) if self . checksum : f . write ( self . _md5_compute ( f ) ) return with self . path . open ( 'rb+' ) as f : f . seek ( 0 , 2 ) eof = f . tell ( ) self . _update_offset_value ( f , self . gdr_head + 36 , 8 , eof ) with self . compressed_file . open ( 'wb+' ) as g : g . write ( bytearray . fromhex ( CDF . V3magicNUMBER_1 ) ) g . write ( bytearray . fromhex ( CDF . V3magicNUMBER_2c ) ) self . _write_ccr ( f , g , self . compression ) if self . checksum : g . seek ( 0 , 2 ) g . write ( self . _md5_compute ( g ) ) self . path . unlink ( ) self . compressed_file . rename ( self . path )
Closes the CDF Class .
19,710
def _write_var_data_sparse ( self , f , zVar , var , dataType , numElems , recVary , oneblock ) : rec_start = oneblock [ 0 ] rec_end = oneblock [ 1 ] indata = oneblock [ 2 ] numValues = self . _num_values ( zVar , var ) _ , data = self . _convert_data ( dataType , numElems , numValues , indata ) if zVar : vdr_offset = self . zvarsinfo [ var ] [ 1 ] else : vdr_offset = self . rvarsinfo [ var ] [ 1 ] offset = self . _write_vvr ( f , data ) f . seek ( vdr_offset + 28 , 0 ) vxrOne = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) foundSpot = 0 usedEntries = 0 currentVXR = 0 while foundSpot == 0 and vxrOne > 0 : f . seek ( vxrOne , 0 ) currentVXR = f . tell ( ) f . seek ( vxrOne + 12 , 0 ) vxrNext = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) nEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) usedEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) if ( usedEntries == nEntries ) : vxrOne = vxrNext else : foundSpot = 1 if ( vxrOne == 0 or vxrOne == - 1 ) : currentVXR = self . _create_vxr ( f , rec_start , rec_end , vdr_offset , currentVXR , offset ) else : self . _use_vxrentry ( f , currentVXR , rec_start , rec_end , offset ) f . seek ( vdr_offset + 24 , 0 ) recNumc = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) if ( rec_end > recNumc ) : self . _update_offset_value ( f , vdr_offset + 24 , 4 , rec_end ) return rec_end
Writes a VVR and a VXR for this block of sparse data
19,711
def _create_vxr ( self , f , recStart , recEnd , currentVDR , priorVXR , vvrOffset ) : vxroffset = self . _write_vxr ( f ) self . _use_vxrentry ( f , vxroffset , recStart , recEnd , vvrOffset ) if ( priorVXR == 0 ) : self . _update_offset_value ( f , currentVDR + 28 , 8 , vxroffset ) else : self . _update_offset_value ( f , priorVXR + 12 , 8 , vxroffset ) self . _update_offset_value ( f , currentVDR + 36 , 8 , vxroffset ) return vxroffset
Create a VXR AND use a VXR
19,712
def _use_vxrentry ( self , f , VXRoffset , recStart , recEnd , offset ) : f . seek ( VXRoffset + 20 ) numEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) usedEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) self . _update_offset_value ( f , VXRoffset + 28 + 4 * usedEntries , 4 , recStart ) self . _update_offset_value ( f , VXRoffset + 28 + 4 * numEntries + 4 * usedEntries , 4 , recEnd ) self . _update_offset_value ( f , VXRoffset + 28 + 2 * 4 * numEntries + 8 * usedEntries , 8 , offset ) usedEntries += 1 self . _update_offset_value ( f , VXRoffset + 24 , 4 , usedEntries ) return usedEntries
Adds a VVR pointer to a VXR
19,713
def _add_vxr_levels_r ( self , f , vxrhead , numVXRs ) : newNumVXRs = int ( numVXRs / CDF . NUM_VXRlvl_ENTRIES ) remaining = int ( numVXRs % CDF . NUM_VXRlvl_ENTRIES ) vxroff = vxrhead prevxroff = - 1 if ( remaining != 0 ) : newNumVXRs += 1 CDF . level += 1 for x in range ( 0 , newNumVXRs ) : newvxroff = self . _write_vxr ( f , numEntries = CDF . NUM_VXRlvl_ENTRIES ) if ( x > 0 ) : self . _update_offset_value ( f , prevxroff + 12 , 8 , newvxroff ) else : newvxrhead = newvxroff prevxroff = newvxroff if ( x == ( newNumVXRs - 1 ) ) : if ( remaining == 0 ) : endEntry = CDF . NUM_VXRlvl_ENTRIES else : endEntry = remaining else : endEntry = CDF . NUM_VXRlvl_ENTRIES for _ in range ( 0 , endEntry ) : recFirst , recLast = self . _get_recrange ( f , vxroff ) self . _use_vxrentry ( f , newvxroff , recFirst , recLast , vxroff ) vxroff = self . _read_offset_value ( f , vxroff + 12 , 8 ) vxroff = vxrhead for x in range ( 0 , numVXRs ) : nvxroff = self . _read_offset_value ( f , vxroff + 12 , 8 ) self . _update_offset_value ( f , vxroff + 12 , 8 , 0 ) vxroff = nvxroff if ( newNumVXRs > CDF . NUM_VXRlvl_ENTRIES ) : return self . _add_vxr_levels_r ( f , newvxrhead , newNumVXRs ) else : return newvxrhead , newvxroff
Build a new level of VXRs ... make VXRs more tree - like
19,714
def _update_vdr_vxrheadtail ( self , f , vdr_offset , VXRoffset ) : self . _update_offset_value ( f , vdr_offset + 28 , 8 , VXRoffset ) self . _update_offset_value ( f , vdr_offset + 36 , 8 , VXRoffset )
This sets a VXR to be the first and last VXR in the VDR
19,715
def _get_recrange ( self , f , VXRoffset ) : f . seek ( VXRoffset + 20 ) numEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) usedEntries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) firstRec = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) f . seek ( VXRoffset + 28 + ( 4 * numEntries + 4 * ( usedEntries - 1 ) ) ) lastRec = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) return firstRec , lastRec
Finds the first and last record numbers pointed by the VXR Assumes the VXRs are in order
19,716
def _datatype_size ( datatype , numElms ) : sizes = { 1 : 1 , 2 : 2 , 4 : 4 , 8 : 8 , 11 : 1 , 12 : 2 , 14 : 4 , 21 : 4 , 22 : 8 , 31 : 8 , 32 : 16 , 33 : 8 , 41 : 1 , 44 : 4 , 45 : 8 , 51 : 1 , 52 : 1 } try : if ( isinstance ( datatype , int ) ) : if ( datatype == 51 or datatype == 52 ) : return numElms else : return sizes [ datatype ] else : datatype = datatype . upper ( ) if ( datatype == 'CDF_INT1' or datatype == 'CDF_UINT1' or datatype == 'CDF_BYTE' ) : return 1 elif ( datatype == 'CDF_INT2' or datatype == 'CDF_UINT2' ) : return 2 elif ( datatype == 'CDF_INT4' or datatype == 'CDF_UINT4' ) : return 4 elif ( datatype == 'CDF_INT8' or datatype == 'CDF_TIME_TT2000' ) : return 8 elif ( datatype == 'CDF_REAL4' or datatype == 'CDF_FLOAT' ) : return 4 elif ( datatype == 'CDF_REAL8' or datatype == 'CDF_DOUBLE' or datatype == 'CDF_EPOCH' ) : return 8 elif ( datatype == 'CDF_EPOCH16' ) : return 16 elif ( datatype == 'CDF_CHAR' or datatype == 'CDF_UCHAR' ) : return numElms else : return - 1 except Exception : return - 1
Gets datatype size
19,717
def _write_adr ( self , f , gORv , name ) : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) block_size = CDF . ADR_BASE_SIZE64 section_type = CDF . ADR_ nextADR = 0 headAgrEDR = 0 if ( gORv == True ) : scope = 1 else : scope = 2 num = len ( self . attrs ) ngrEntries = 0 maxgrEntry = - 1 rfuA = 0 headAzEDR = 0 nzEntries = 0 maxzEntry = - 1 rfuE = - 1 adr = bytearray ( block_size ) adr [ 0 : 8 ] = struct . pack ( '>q' , block_size ) adr [ 8 : 12 ] = struct . pack ( '>i' , section_type ) adr [ 12 : 20 ] = struct . pack ( '>q' , nextADR ) adr [ 20 : 28 ] = struct . pack ( '>q' , headAgrEDR ) adr [ 28 : 32 ] = struct . pack ( '>i' , scope ) adr [ 32 : 36 ] = struct . pack ( '>i' , num ) adr [ 36 : 40 ] = struct . pack ( '>i' , ngrEntries ) adr [ 40 : 44 ] = struct . pack ( '>i' , maxgrEntry ) adr [ 44 : 48 ] = struct . pack ( '>i' , rfuA ) adr [ 48 : 56 ] = struct . pack ( '>q' , headAzEDR ) adr [ 56 : 60 ] = struct . pack ( '>i' , nzEntries ) adr [ 60 : 64 ] = struct . pack ( '>i' , maxzEntry ) adr [ 64 : 68 ] = struct . pack ( '>i' , rfuE ) tofill = 256 - len ( name ) adr [ 68 : 324 ] = ( name + '\0' * tofill ) . encode ( ) f . write ( adr ) info = [ ] info . append ( name ) info . append ( scope ) info . append ( byte_loc ) self . attrsinfo [ num ] = info if ( scope == 1 ) : self . gattrs . append ( name ) else : self . vattrs . append ( name ) self . attrs . append ( name ) if ( num > 0 ) : self . _update_offset_value ( f , self . attrsinfo [ num - 1 ] [ 2 ] + 12 , 8 , byte_loc ) else : self . _update_offset_value ( f , self . gdr_head + 28 , 8 , byte_loc ) self . _update_offset_value ( f , self . gdr_head + 48 , 4 , num + 1 ) return num , byte_loc
Writes and ADR to the end of the file .
19,718
def _write_aedr ( self , f , gORz , attrNum , entryNum , value , pdataType , pnumElems , zVar ) : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) if ( gORz == True or zVar != True ) : section_type = CDF . AgrEDR_ else : section_type = CDF . AzEDR_ nextAEDR = 0 if pdataType is None : if isinstance ( value , ( list , tuple ) ) : avalue = value [ 0 ] else : avalue = value if ( isinstance ( avalue , int ) ) : pdataType = CDF . CDF_INT8 elif ( isinstance ( avalue , float ) ) : pdataType = CDF . CDF_FLOAT elif ( isinstance ( avalue , complex ) ) : pdataType = CDF . CDF_EPOCH16 else : pdataType = CDF . CDF_INT1 if pnumElems is None : if isinstance ( value , str ) : pdataType = CDF . CDF_CHAR pnumElems = len ( value ) else : if isinstance ( value , ( list , tuple ) ) : pnumElems = len ( value ) else : pnumElems = 1 dataType = pdataType numElems = pnumElems rfuB = 0 rfuC = 0 rfuD = - 1 rfuE = - 1 if gORz : numStrings = 0 else : if ( isinstance ( value , str ) ) : numStrings = value . count ( '\\N ' ) + 1 else : numStrings = 0 recs , cdata = self . _convert_data ( dataType , numElems , 1 , value ) if ( dataType == 51 ) : numElems = len ( cdata ) block_size = len ( cdata ) + 56 aedr = bytearray ( block_size ) aedr [ 0 : 8 ] = struct . pack ( '>q' , block_size ) aedr [ 8 : 12 ] = struct . pack ( '>i' , section_type ) aedr [ 12 : 20 ] = struct . pack ( '>q' , nextAEDR ) aedr [ 20 : 24 ] = struct . pack ( '>i' , attrNum ) aedr [ 24 : 28 ] = struct . pack ( '>i' , dataType ) aedr [ 28 : 32 ] = struct . pack ( '>i' , entryNum ) aedr [ 32 : 36 ] = struct . pack ( '>i' , numElems ) aedr [ 36 : 40 ] = struct . pack ( '>i' , numStrings ) aedr [ 40 : 44 ] = struct . pack ( '>i' , rfuB ) aedr [ 44 : 48 ] = struct . pack ( '>i' , rfuC ) aedr [ 48 : 52 ] = struct . pack ( '>i' , rfuD ) aedr [ 52 : 56 ] = struct . pack ( '>i' , rfuE ) aedr [ 56 : block_size ] = cdata f . write ( aedr ) return byte_loc
Writes an aedr into the end of the file .
19,719
def _write_vxr ( self , f , numEntries = None ) : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) section_type = CDF . VXR_ nextVXR = 0 if ( numEntries == None ) : nEntries = CDF . NUM_VXR_ENTRIES else : nEntries = int ( numEntries ) block_size = CDF . VXR_BASE_SIZE64 + ( 4 + 4 + 8 ) * nEntries nUsedEntries = 0 firsts = [ - 1 ] * nEntries lasts = [ - 1 ] * nEntries offsets = [ - 1 ] * nEntries vxr = bytearray ( block_size ) vxr [ 0 : 8 ] = struct . pack ( '>q' , block_size ) vxr [ 8 : 12 ] = struct . pack ( '>i' , section_type ) vxr [ 12 : 20 ] = struct . pack ( '>q' , nextVXR ) vxr [ 20 : 24 ] = struct . pack ( '>i' , nEntries ) vxr [ 24 : 28 ] = struct . pack ( '>i' , nUsedEntries ) estart = 28 + 4 * nEntries vxr [ 28 : estart ] = struct . pack ( '>%si' % nEntries , * firsts ) eend = estart + 4 * nEntries vxr [ estart : eend ] = struct . pack ( '>%si' % nEntries , * lasts ) vxr [ eend : block_size ] = struct . pack ( '>%sq' % nEntries , * offsets ) f . write ( vxr ) return byte_loc
Creates a VXR at the end of the file . Returns byte location of the VXR The First Last and Offset fields will need to be filled in later
19,720
def _write_vvr ( self , f , data ) : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) block_size = CDF . VVR_BASE_SIZE64 + len ( data ) section_type = CDF . VVR_ vvr1 = bytearray ( 12 ) vvr1 [ 0 : 8 ] = struct . pack ( '>q' , block_size ) vvr1 [ 8 : 12 ] = struct . pack ( '>i' , section_type ) f . write ( vvr1 ) f . write ( data ) return byte_loc
Writes a vvr to the end of file f with the byte stream data .
19,721
def _write_cpr ( self , f , cType , parameter ) -> int : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) block_size = CDF . CPR_BASE_SIZE64 + 4 section_type = CDF . CPR_ rfuA = 0 pCount = 1 cpr = bytearray ( block_size ) cpr [ 0 : 8 ] = struct . pack ( '>q' , block_size ) cpr [ 8 : 12 ] = struct . pack ( '>i' , section_type ) cpr [ 12 : 16 ] = struct . pack ( '>i' , cType ) cpr [ 16 : 20 ] = struct . pack ( '>i' , rfuA ) cpr [ 20 : 24 ] = struct . pack ( '>i' , pCount ) cpr [ 24 : 28 ] = struct . pack ( '>i' , parameter ) f . write ( cpr ) return byte_loc
Write compression info to the end of the file in a CPR .
19,722
def _write_cvvr ( self , f , data ) : f . seek ( 0 , 2 ) byte_loc = f . tell ( ) cSize = len ( data ) block_size = CDF . CVVR_BASE_SIZE64 + cSize section_type = CDF . CVVR_ rfuA = 0 cvvr1 = bytearray ( 24 ) cvvr1 [ 0 : 8 ] = struct . pack ( '>q' , block_size ) cvvr1 [ 8 : 12 ] = struct . pack ( '>i' , section_type ) cvvr1 [ 12 : 16 ] = struct . pack ( '>i' , rfuA ) cvvr1 [ 16 : 24 ] = struct . pack ( '>q' , cSize ) f . write ( cvvr1 ) f . write ( data ) return byte_loc
Write compressed data variable to the end of the file in a CVVR
19,723
def _write_ccr ( self , f , g , level : int ) : f . seek ( 8 ) data = f . read ( ) uSize = len ( data ) section_type = CDF . CCR_ rfuA = 0 cData = gzip . compress ( data , level ) block_size = CDF . CCR_BASE_SIZE64 + len ( cData ) cprOffset = 0 ccr1 = bytearray ( 32 ) ccr1 [ 0 : 8 ] = struct . pack ( '>q' , block_size ) ccr1 [ 8 : 12 ] = struct . pack ( '>i' , section_type ) ccr1 [ 12 : 20 ] = struct . pack ( '>q' , cprOffset ) ccr1 [ 20 : 28 ] = struct . pack ( '>q' , uSize ) ccr1 [ 28 : 32 ] = struct . pack ( '>i' , rfuA ) g . seek ( 0 , 2 ) g . write ( ccr1 ) g . write ( cData ) cprOffset = self . _write_cpr ( g , CDF . GZIP_COMPRESSION , level ) self . _update_offset_value ( g , 20 , 8 , cprOffset )
Write a CCR to file g from file f with level level . Currently only handles gzip compression .
19,724
def _convert_type ( data_type ) : if data_type in ( 1 , 41 ) : dt_string = 'b' elif data_type == 2 : dt_string = 'h' elif data_type == 4 : dt_string = 'i' elif data_type in ( 8 , 33 ) : dt_string = 'q' elif data_type == 11 : dt_string = 'B' elif data_type == 12 : dt_string = 'H' elif data_type == 14 : dt_string = 'I' elif data_type in ( 21 , 44 ) : dt_string = 'f' elif data_type in ( 22 , 45 , 31 ) : dt_string = 'd' elif data_type == 32 : dt_string = 'd' elif data_type in ( 51 , 52 ) : dt_string = 's' else : dt_string = '' return dt_string
Converts CDF data types into python types
19,725
def _convert_nptype ( data_type , data ) : if data_type in ( 1 , 41 ) : return np . int8 ( data ) . tobytes ( ) elif data_type == 2 : return np . int16 ( data ) . tobytes ( ) elif data_type == 4 : return np . int32 ( data ) . tobytes ( ) elif ( data_type == 8 ) or ( data_type == 33 ) : return np . int64 ( data ) . tobytes ( ) elif data_type == 11 : return np . uint8 ( data ) . tobytes ( ) elif data_type == 12 : return np . uint16 ( data ) . tobytes ( ) elif data_type == 14 : return np . uint32 ( data ) . tobytes ( ) elif ( data_type == 21 ) or ( data_type == 44 ) : return np . float32 ( data ) . tobytes ( ) elif ( data_type == 22 ) or ( data_type == 45 ) or ( data_type == 31 ) : return np . float64 ( data ) . tobytes ( ) elif ( data_type == 32 ) : return np . complex128 ( data ) . tobytes ( ) else : return data
Converts data of CDF type data_type into a numpy array
19,726
def _default_pad ( self , data_type , numElems ) : order = self . _convert_option ( ) if ( data_type == 1 ) or ( data_type == 41 ) : pad_value = struct . pack ( order + 'b' , - 127 ) elif data_type == 2 : pad_value = struct . pack ( order + 'h' , - 32767 ) elif data_type == 4 : pad_value = struct . pack ( order + 'i' , - 2147483647 ) elif ( data_type == 8 ) or ( data_type == 33 ) : pad_value = struct . pack ( order + 'q' , - 9223372036854775807 ) elif data_type == 11 : pad_value = struct . pack ( order + 'B' , 254 ) elif data_type == 12 : pad_value = struct . pack ( order + 'H' , 65534 ) elif data_type == 14 : pad_value = struct . pack ( order + 'I' , 4294967294 ) elif ( data_type == 21 ) or ( data_type == 44 ) : pad_value = struct . pack ( order + 'f' , - 1.0E30 ) elif ( data_type == 22 ) or ( data_type == 45 ) : pad_value = struct . pack ( order + 'd' , - 1.0E30 ) elif ( data_type == 31 ) : pad_value = struct . pack ( order + 'd' , 0.0 ) elif ( data_type == 32 ) : pad_value = struct . pack ( order + '2d' , * [ 0.0 , 0.0 ] ) elif ( data_type == 51 ) or ( data_type == 52 ) : tmpPad = str ( ' ' * numElems ) . encode ( ) form = str ( numElems ) pad_value = struct . pack ( form + 'b' , * tmpPad ) return pad_value
Determines the default pad data for a data_type
19,727
def _num_values ( self , zVar , varNum ) : values = 1 if ( zVar == True ) : numDims = self . zvarsinfo [ varNum ] [ 2 ] dimSizes = self . zvarsinfo [ varNum ] [ 3 ] dimVary = self . zvarsinfo [ varNum ] [ 4 ] else : numDims = self . rvarsinfo [ varNum ] [ 2 ] dimSizes = self . rvarsinfo [ varNum ] [ 3 ] dimVary = self . rvarsinfo [ varNum ] [ 4 ] if ( numDims < 1 ) : return values else : for x in range ( 0 , numDims ) : if ( zVar == True ) : values = values * dimSizes [ x ] else : if ( dimVary [ x ] != 0 ) : values = values * dimSizes [ x ] return values
Determines the number of values in a record . Set zVar = True if this is a zvariable .
19,728
def _read_offset_value ( self , f , offset , size ) : f . seek ( offset , 0 ) if ( size == 8 ) : return int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) else : return int . from_bytes ( f . read ( 4 ) , 'big' , signed = True )
Reads an integer value from file f at location offset .
19,729
def _update_offset_value ( self , f , offset , size , value ) : f . seek ( offset , 0 ) if ( size == 8 ) : f . write ( struct . pack ( '>q' , value ) ) else : f . write ( struct . pack ( '>i' , value ) )
Writes value into location offset in file f .
19,730
def _update_aedr_link ( self , f , attrNum , zVar , varNum , offset ) : adr_offset = self . attrsinfo [ attrNum ] [ 2 ] if zVar : f . seek ( adr_offset + 56 , 0 ) entries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) maxEntry = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) else : f . seek ( adr_offset + 36 , 0 ) entries = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) maxEntry = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) if ( entries == 0 ) : if zVar : self . _update_offset_value ( f , adr_offset + 48 , 8 , offset ) self . _update_offset_value ( f , adr_offset + 56 , 4 , 1 ) self . _update_offset_value ( f , adr_offset + 60 , 4 , varNum ) else : self . _update_offset_value ( f , adr_offset + 20 , 8 , offset ) self . _update_offset_value ( f , adr_offset + 36 , 4 , 1 ) self . _update_offset_value ( f , adr_offset + 40 , 4 , varNum ) else : if zVar : f . seek ( adr_offset + 48 , 0 ) head = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) else : f . seek ( adr_offset + 20 , 0 ) head = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) aedr = head previous_aedr = head done = False for _ in range ( 0 , entries ) : f . seek ( aedr + 28 , 0 ) num = int . from_bytes ( f . read ( 4 ) , 'big' , signed = True ) if ( num > varNum ) : self . _update_offset_value ( f , previous_aedr + 12 , 8 , offset ) self . _update_offset_value ( f , offset + 12 , 8 , aedr ) done = True break else : f . seek ( aedr + 12 , 0 ) previous_aedr = aedr aedr = int . from_bytes ( f . read ( 8 ) , 'big' , signed = True ) if not done : self . _update_offset_value ( f , previous_aedr + 12 , 8 , offset ) if zVar : self . _update_offset_value ( f , adr_offset + 56 , 4 , entries + 1 ) if ( maxEntry < varNum ) : self . _update_offset_value ( f , adr_offset + 60 , 4 , varNum ) else : self . _update_offset_value ( f , adr_offset + 36 , 4 , entries + 1 ) if ( maxEntry < varNum ) : self . _update_offset_value ( f , adr_offset + 40 , 4 , varNum )
Updates variable aedr links
19,731
def _md5_compute ( self , f ) : md5 = hashlib . md5 ( ) block_size = 16384 f . seek ( 0 , 2 ) remaining = f . tell ( ) f . seek ( 0 ) while ( remaining > block_size ) : data = f . read ( block_size ) remaining = remaining - block_size md5 . update ( data ) if remaining > 0 : data = f . read ( remaining ) md5 . update ( data ) return md5 . digest ( )
Computes the checksum of the file
19,732
def cdf_info ( self ) : mycdf_info = { } mycdf_info [ 'CDF' ] = self . file mycdf_info [ 'Version' ] = self . _version mycdf_info [ 'Encoding' ] = self . _encoding mycdf_info [ 'Majority' ] = self . _majority mycdf_info [ 'rVariables' ] , mycdf_info [ 'zVariables' ] = self . _get_varnames ( ) mycdf_info [ 'Attributes' ] = self . _get_attnames ( ) mycdf_info [ 'Copyright' ] = self . _copyright mycdf_info [ 'Checksum' ] = self . _md5 mycdf_info [ 'Num_rdim' ] = self . _num_rdim mycdf_info [ 'rDim_sizes' ] = self . _rdim_sizes mycdf_info [ 'Compressed' ] = self . _compressed if ( self . cdfversion > 2 ) : mycdf_info [ 'LeapSecondUpdated' ] = self . _leap_second_updated return mycdf_info
Returns a dictionary that shows the basic CDF information .
19,733
def varinq ( self , variable ) : vdr_info = self . varget ( variable = variable , inq = True ) if vdr_info is None : raise KeyError ( "Variable {} not found." . format ( variable ) ) var = { } var [ 'Variable' ] = vdr_info [ 'name' ] var [ 'Num' ] = vdr_info [ 'variable_number' ] var [ 'Var_Type' ] = CDF . _variable_token ( vdr_info [ 'section_type' ] ) var [ 'Data_Type' ] = vdr_info [ 'data_type' ] var [ 'Data_Type_Description' ] = CDF . _datatype_token ( vdr_info [ 'data_type' ] ) var [ 'Num_Elements' ] = vdr_info [ 'num_elements' ] var [ 'Num_Dims' ] = vdr_info [ 'num_dims' ] var [ 'Dim_Sizes' ] = vdr_info [ 'dim_sizes' ] var [ 'Sparse' ] = CDF . _sparse_token ( vdr_info [ 'sparse' ] ) var [ 'Last_Rec' ] = vdr_info [ 'max_records' ] var [ 'Rec_Vary' ] = vdr_info [ 'record_vary' ] var [ 'Dim_Vary' ] = vdr_info [ 'dim_vary' ] if ( 'pad' in vdr_info ) : var [ 'Pad' ] = vdr_info [ 'pad' ] var [ 'Compress' ] = vdr_info [ 'compression_level' ] if ( 'blocking_factor' in vdr_info ) : var [ 'Block_Factor' ] = vdr_info [ 'blocking_factor' ] return var
Returns a dictionary that shows the basic variable information .
19,734
def attinq ( self , attribute = None ) : position = self . _first_adr if isinstance ( attribute , str ) : for _ in range ( 0 , self . _num_att ) : name , next_adr = self . _read_adr_fast ( position ) if name . strip ( ) . lower ( ) == attribute . strip ( ) . lower ( ) : return self . _read_adr ( position ) position = next_adr raise KeyError ( 'No attribute {}' . format ( attribute ) ) elif isinstance ( attribute , int ) : if ( attribute < 0 or attribute > self . _num_zvariable ) : raise KeyError ( 'No attribute {}' . format ( attribute ) ) for _ in range ( 0 , attribute ) : name , next_adr = self . _read_adr_fast ( position ) position = next_adr return self . _read_adr ( position ) else : print ( 'Please set attribute keyword equal to the name or ' , 'number of an attribute' ) attrs = self . _get_attnames ( ) print ( attrs ) for x in range ( 0 , self . _num_att ) : name = list ( attrs [ x ] . keys ( ) ) [ 0 ] print ( 'NAME: ' + name + ', NUMBER: ' + str ( x ) + ', SCOPE: ' + attrs [ x ] [ name ] ) return attrs
Get attribute information .
19,735
def epochrange ( self , epoch = None , starttime = None , endtime = None ) : return self . varget ( variable = epoch , starttime = starttime , endtime = endtime , record_range_only = True )
Get epoch range .
19,736
def globalattsget ( self , expand = False , to_np = True ) : byte_loc = self . _first_adr return_dict = { } for _ in range ( 0 , self . _num_att ) : adr_info = self . _read_adr ( byte_loc ) if ( adr_info [ 'scope' ] != 1 ) : byte_loc = adr_info [ 'next_adr_location' ] continue if ( adr_info [ 'num_gr_entry' ] == 0 ) : if ( expand is not False ) : return_dict [ adr_info [ 'name' ] ] = None byte_loc = adr_info [ 'next_adr_location' ] continue if ( expand is False ) : entries = [ ] else : entries = { } aedr_byte_loc = adr_info [ 'first_gr_entry' ] for _ in range ( 0 , adr_info [ 'num_gr_entry' ] ) : if ( self . cdfversion == 3 ) : aedr_info = self . _read_aedr ( aedr_byte_loc , to_np = to_np ) else : aedr_info = self . _read_aedr2 ( aedr_byte_loc , to_np = to_np ) entryData = aedr_info [ 'entry' ] if ( expand is False ) : entries . append ( entryData ) else : entryWithType = [ ] if ( isinstance ( entryData , str ) ) : entryWithType . append ( entryData ) else : dataType = aedr_info [ 'data_type' ] if ( len ( entryData . tolist ( ) ) == 1 ) : if ( dataType != 31 and dataType != 32 and dataType != 33 ) : entryWithType . append ( entryData . tolist ( ) [ 0 ] ) else : if ( dataType != 33 ) : entryWithType . append ( epoch . CDFepoch . encode ( entryData . tolist ( ) [ 0 ] , iso_8601 = False ) ) else : entryWithType . append ( epoch . CDFepoch . encode ( entryData . tolist ( ) [ 0 ] ) ) else : if ( dataType != 31 and dataType != 32 and dataType != 33 ) : entryWithType . append ( entryData . tolist ( ) ) else : if ( dataType != 33 ) : entryWithType . append ( epoch . CDFepoch . encode ( entryData . tolist ( ) , iso_8601 = False ) ) else : entryWithType . append ( epoch . CDFepoch . encode ( entryData . tolist ( ) ) ) entryWithType . append ( CDF . _datatype_token ( aedr_info [ 'data_type' ] ) ) entries [ aedr_info [ 'entry_num' ] ] = entryWithType aedr_byte_loc = aedr_info [ 'next_aedr' ] if ( len ( entries ) != 0 ) : if ( expand is False ) : if ( len ( entries ) == 1 ) : return_dict [ adr_info [ 'name' ] ] = entries [ 0 ] else : return_dict [ adr_info [ 'name' ] ] = entries else : return_dict [ adr_info [ 'name' ] ] = entries byte_loc = adr_info [ 'next_adr_location' ] return return_dict
Gets all global attributes .
19,737
def varattsget ( self , variable = None , expand = False , to_np = True ) : if ( isinstance ( variable , int ) and self . _num_zvariable > 0 and self . _num_rvariable > 0 ) : print ( 'This CDF has both r and z variables. Use variable name' ) return None if isinstance ( variable , str ) : position = self . _first_zvariable num_variables = self . _num_zvariable for zVar in [ 1 , 0 ] : for _ in range ( 0 , num_variables ) : if ( self . cdfversion == 3 ) : name , vdr_next = self . _read_vdr_fast ( position ) else : name , vdr_next = self . _read_vdr_fast2 ( position ) if name . strip ( ) . lower ( ) == variable . strip ( ) . lower ( ) : if ( self . cdfversion == 3 ) : vdr_info = self . _read_vdr ( position ) else : vdr_info = self . _read_vdr2 ( position ) return self . _read_varatts ( vdr_info [ 'variable_number' ] , zVar , expand , to_np = to_np ) position = vdr_next position = self . _first_rvariable num_variables = self . _num_rvariable print ( 'No variable by this name:' , variable ) return None elif isinstance ( variable , int ) : if self . _num_zvariable > 0 : num_variable = self . _num_zvariable zVar = True else : num_variable = self . _num_rvariable zVar = False if ( variable < 0 or variable >= num_variable ) : print ( 'No variable by this number:' , variable ) return None return self . _read_varatts ( variable , zVar , expand , to_np = to_np ) else : print ( 'Please set variable keyword equal to the name or ' , 'number of an variable' ) rvars , zvars = self . _get_varnames ( ) print ( "RVARIABLES: " ) for x in rvars : print ( "NAME: " + str ( x ) ) print ( "ZVARIABLES: " ) for x in zvars : print ( "NAME: " + str ( x ) ) return
Gets all variable attributes .
19,738
def _uncompress_file ( self , path ) : with self . file . open ( 'rb' ) as f : if ( self . cdfversion == 3 ) : data_start , data_size , cType , _ = self . _read_ccr ( 8 ) else : data_start , data_size , cType , _ = self . _read_ccr2 ( 8 ) if cType != 5 : return f . seek ( data_start ) decompressed_data = gzip . decompress ( f . read ( data_size ) ) newpath = pathlib . Path ( tempfile . NamedTemporaryFile ( suffix = '.cdf' ) . name ) with newpath . open ( 'wb' ) as g : g . write ( bytearray . fromhex ( 'cdf30001' ) ) g . write ( bytearray . fromhex ( '0000ffff' ) ) g . write ( decompressed_data ) return newpath
Writes the current file into a file in the temporary directory .
19,739
def _convert_option ( self ) : if sys . byteorder == 'little' and self . _endian ( ) == 'big-endian' : order = '>' elif sys . byteorder == 'big' and self . _endian ( ) == 'little-endian' : order = '<' else : order = '=' return order
Determines how to convert CDF byte ordering to the system byte ordering .
19,740
def _num_values ( self , vdr_dict ) : values = 1 for x in range ( 0 , vdr_dict [ 'num_dims' ] ) : if ( vdr_dict [ 'dim_vary' ] [ x ] != 0 ) : values = values * vdr_dict [ 'dim_sizes' ] [ x ] return values
Returns the number of values in a record using a given VDR dictionary . Multiplies the dimension sizes of each dimension if it is varying .
19,741
def _convert_type ( self , data_type ) : if ( data_type == 1 ) or ( data_type == 41 ) : dt_string = 'b' elif data_type == 2 : dt_string = 'h' elif data_type == 4 : dt_string = 'i' elif ( data_type == 8 ) or ( data_type == 33 ) : dt_string = 'q' elif data_type == 11 : dt_string = 'B' elif data_type == 12 : dt_string = 'H' elif data_type == 14 : dt_string = 'I' elif ( data_type == 21 ) or ( data_type == 44 ) : dt_string = 'f' elif ( data_type == 22 ) or ( data_type == 45 ) or ( data_type == 31 ) : dt_string = 'd' elif ( data_type == 32 ) : dt_string = 'd' elif ( data_type == 51 ) or ( data_type == 52 ) : dt_string = 's' return dt_string
CDF data types to python struct data types
19,742
def _default_pad ( self , data_type , num_elms ) : order = self . _convert_option ( ) if ( data_type == 51 or data_type == 52 ) : return str ( ' ' * num_elms ) if ( data_type == 1 ) or ( data_type == 41 ) : pad_value = struct . pack ( order + 'b' , - 127 ) dt_string = 'i1' elif data_type == 2 : pad_value = struct . pack ( order + 'h' , - 32767 ) dt_string = 'i2' elif data_type == 4 : pad_value = struct . pack ( order + 'i' , - 2147483647 ) dt_string = 'i4' elif ( data_type == 8 ) or ( data_type == 33 ) : pad_value = struct . pack ( order + 'q' , - 9223372036854775807 ) dt_string = 'i8' elif data_type == 11 : pad_value = struct . pack ( order + 'B' , 254 ) dt_string = 'u1' elif data_type == 12 : pad_value = struct . pack ( order + 'H' , 65534 ) dt_string = 'u2' elif data_type == 14 : pad_value = struct . pack ( order + 'I' , 4294967294 ) dt_string = 'u4' elif ( data_type == 21 ) or ( data_type == 44 ) : pad_value = struct . pack ( order + 'f' , - 1.0E30 ) dt_string = 'f' elif ( data_type == 22 ) or ( data_type == 45 ) or ( data_type == 31 ) : pad_value = struct . pack ( order + 'd' , - 1.0E30 ) dt_string = 'd' else : pad_value = struct . pack ( order + '2d' , * [ - 1.0E30 , - 1.0E30 ] ) dt_string = 'c16' dt = np . dtype ( dt_string ) ret = np . frombuffer ( pad_value , dtype = dt , count = 1 ) ret . setflags ( 'WRITEABLE' ) return ret
The default pad values by CDF data type
19,743
def _convert_np_data ( data , data_type , num_elems ) : if ( data_type == 51 or data_type == 52 ) : if ( data == '' ) : return ( '\x00' * num_elems ) . encode ( ) else : return data . ljust ( num_elems , '\x00' ) . encode ( 'utf-8' ) elif ( data_type == 32 ) : data_stream = data . real . tobytes ( ) data_stream += data . imag . tobytes ( ) return data_stream else : return data . tobytes ( )
Converts a single np data into byte stream .
19,744
def _read_vvr_block ( self , offset ) : with self . file . open ( 'rb' ) as f : f . seek ( offset , 0 ) block_size = int . from_bytes ( f . read ( 8 ) , 'big' ) block = f . read ( block_size - 8 ) section_type = int . from_bytes ( block [ 0 : 4 ] , 'big' ) if section_type == 13 : compressed_size = int . from_bytes ( block [ 12 : 16 ] , 'big' ) return gzip . decompress ( block [ 16 : 16 + compressed_size ] ) elif section_type == 7 : return block [ 4 : ]
Returns a VVR or decompressed CVVR block
19,745
def _find_block ( starts , ends , cur_block , rec_num ) : total = len ( starts ) if ( cur_block == - 1 ) : cur_block = 0 for x in range ( cur_block , total ) : if ( starts [ x ] <= rec_num and ends [ x ] >= rec_num ) : return x , x if ( starts [ x ] > rec_num ) : break return - 1 , x - 1
Finds the block that rec_num is in if it is found . Otherwise it returns - 1 . It also returns the block that has the physical data either at or preceeding the rec_num . It could be - 1 if the preceeding block does not exists .
19,746
def _convert_data ( self , data , data_type , num_recs , num_values , num_elems ) : if ( data_type == 51 or data_type == 52 ) : return [ data [ i : i + num_elems ] . decode ( 'utf-8' ) for i in range ( 0 , num_recs * num_values * num_elems , num_elems ) ] else : tofrom = self . _convert_option ( ) dt_string = self . _convert_type ( data_type ) form = tofrom + str ( num_recs * num_values * num_elems ) + dt_string value_len = CDF . _type_size ( data_type , num_elems ) return list ( struct . unpack_from ( form , data [ 0 : num_recs * num_values * value_len ] ) )
Converts data to the appropriate type using the struct . unpack method rather than using numpy .
19,747
def getVersion ( ) : print ( 'CDFread version:' , str ( CDF . version ) + '.' + str ( CDF . release ) + '.' + str ( CDF . increment ) ) print ( 'Date: 2018/01/11' )
Shows the code version and last modified date .
19,748
def get_access_token ( self ) : if ( self . token is None ) or ( datetime . utcnow ( ) > self . reuse_token_until ) : headers = { 'Ocp-Apim-Subscription-Key' : self . client_secret } response = requests . post ( self . base_url , headers = headers ) response . raise_for_status ( ) self . token = response . content self . reuse_token_until = datetime . utcnow ( ) + timedelta ( minutes = 5 ) return self . token . decode ( 'utf-8' )
Returns an access token for the specified subscription .
19,749
def main ( from_lang , to_lang , provider , secret_access_key , output_only , text ) : text = ' ' . join ( text ) kwargs = dict ( from_lang = from_lang , to_lang = to_lang , provider = provider ) if provider != DEFAULT_PROVIDER : kwargs [ 'secret_access_key' ] = secret_access_key translator = Translator ( ** kwargs ) translation = translator . translate ( text ) if sys . version_info . major == 2 : translation = translation . encode ( locale . getpreferredencoding ( ) ) if output_only : click . echo ( translation ) return translation click . echo ( '\nTranslation: {}' . format ( translation ) ) click . echo ( '-' * 25 ) click . echo ( 'Translated by: {}' . format ( translator . provider . name ) ) return translation
Python command line tool to make on line translations
19,750
def get_organization_id ( server_config , label ) : response = requests . get ( server_config [ 'url' ] + '/katello/api/v2/organizations' , data = json . dumps ( { 'search' : 'label={}' . format ( label ) } ) , auth = server_config [ 'auth' ] , headers = { 'content-type' : 'application/json' } , verify = server_config [ 'verify' ] , ) response . raise_for_status ( ) decoded = response . json ( ) if decoded [ 'subtotal' ] != 1 : print ( 'Expected to find one organization, but instead found {0}. Search ' 'results: {1}' . format ( decoded [ 'subtotal' ] , decoded [ 'results' ] ) ) exit ( 1 ) return decoded [ 'results' ] [ 0 ] [ 'id' ]
Return the ID of the organization with label label .
19,751
def _make_entity_from_id ( entity_cls , entity_obj_or_id , server_config ) : if isinstance ( entity_obj_or_id , entity_cls ) : return entity_obj_or_id return entity_cls ( server_config , id = entity_obj_or_id )
Given an entity object or an ID return an entity object .
19,752
def _get_entity_id ( field_name , attrs ) : field_name_id = field_name + '_id' if field_name in attrs : if attrs [ field_name ] is None : return None elif 'id' in attrs [ field_name ] : return attrs [ field_name ] [ 'id' ] if field_name_id in attrs : return attrs [ field_name_id ] else : raise MissingValueError ( 'Cannot find a value for the "{0}" field. Searched for keys named ' '{1}, but available keys are {2}.' . format ( field_name , ( field_name , field_name_id ) , attrs . keys ( ) ) )
Find the ID for a one to one relationship .
19,753
def _get_entity_ids ( field_name , attrs ) : field_name_ids = field_name + '_ids' plural_field_name = pluralize ( field_name ) if field_name_ids in attrs : return attrs [ field_name_ids ] elif field_name in attrs : return [ entity [ 'id' ] for entity in attrs [ field_name ] ] elif plural_field_name in attrs : return [ entity [ 'id' ] for entity in attrs [ plural_field_name ] ] else : raise MissingValueError ( 'Cannot find a value for the "{0}" field. Searched for keys named ' '{1}, but available keys are {2}.' . format ( field_name , ( field_name_ids , field_name , plural_field_name ) , attrs . keys ( ) ) )
Find the IDs for a one to many relationship .
19,754
def to_json_serializable ( obj ) : if isinstance ( obj , Entity ) : return obj . to_json_dict ( ) if isinstance ( obj , dict ) : return { k : to_json_serializable ( v ) for k , v in obj . items ( ) } elif isinstance ( obj , ( list , tuple ) ) : return [ to_json_serializable ( v ) for v in obj ] elif isinstance ( obj , datetime ) : return obj . strftime ( '%Y-%m-%d %H:%M:%S' ) elif isinstance ( obj , date ) : return obj . strftime ( '%Y-%m-%d' ) return obj
Transforms obj into a json serializable object .
19,755
def path ( self , which = None ) : base = urljoin ( self . _server_config . url + '/' , self . _meta [ 'api_path' ] ) if which == 'base' or ( which is None and not hasattr ( self , 'id' ) ) : return base elif ( which == 'self' or which is None ) and hasattr ( self , 'id' ) : return urljoin ( base + '/' , str ( self . id ) ) raise NoSuchPathError
Return the path to the current entity .
19,756
def get_values ( self ) : attrs = vars ( self ) . copy ( ) attrs . pop ( '_server_config' ) attrs . pop ( '_fields' ) attrs . pop ( '_meta' ) if '_path_fields' in attrs : attrs . pop ( '_path_fields' ) return attrs
Return a copy of field values on the current object .
19,757
def to_json_dict ( self , filter_fcn = None ) : fields , values = self . get_fields ( ) , self . get_values ( ) filtered_fields = fields . items ( ) if filter_fcn is not None : filtered_fields = ( tpl for tpl in filtered_fields if filter_fcn ( tpl [ 0 ] , tpl [ 1 ] ) ) json_dct = { } for field_name , field in filtered_fields : if field_name in values : value = values [ field_name ] if value is None : json_dct [ field_name ] = None elif isinstance ( field , OneToOneField ) : json_dct [ field_name ] = value . to_json_dict ( ) elif isinstance ( field , OneToManyField ) : json_dct [ field_name ] = [ entity . to_json_dict ( ) for entity in value ] else : json_dct [ field_name ] = to_json_serializable ( value ) return json_dct
Create a dict with Entity properties for json encoding . It can be overridden by subclasses for each standard serialization doesn t work . By default it call _to_json_dict on OneToOne fields and build a list calling the same method on each OneToMany object s fields .
19,758
def compare ( self , other , filter_fcn = None ) : if not isinstance ( other , type ( self ) ) : return False if filter_fcn is None : def filter_unique ( _ , field ) : return not field . unique filter_fcn = filter_unique return self . to_json_dict ( filter_fcn ) == other . to_json_dict ( filter_fcn )
Returns True if properties can be compared in terms of eq . Entity s Fields can be filtered accordingly to filter_fcn . This callable receives field s name as first parameter and field itself as second parameter . It must return True if field s value should be included on comparison and False otherwise . If not provided field s marked as unique will not be compared by default . id and name are examples of unique fields commonly ignored . Check Entities fields for fields marked with unique = True
19,759
def create_missing ( self ) : for field_name , field in self . get_fields ( ) . items ( ) : if field . required and not hasattr ( self , field_name ) : if hasattr ( field , 'default' ) : value = field . default elif hasattr ( field , 'choices' ) : value = gen_choice ( field . choices ) elif isinstance ( field , OneToOneField ) : value = field . gen_value ( ) ( self . _server_config ) . create ( True ) elif isinstance ( field , OneToManyField ) : value = [ field . gen_value ( ) ( self . _server_config ) . create ( True ) ] else : value = field . gen_value ( ) setattr ( self , field_name , value )
Automagically populate all required instance attributes .
19,760
def update_payload ( self , fields = None ) : values = self . get_values ( ) if fields is not None : values = { field : values [ field ] for field in fields } return _payload ( self . get_fields ( ) , values )
Create a payload of values that can be sent to the server .
19,761
def search_payload ( self , fields = None , query = None ) : if fields is None : fields = set ( self . get_values ( ) . keys ( ) ) if query is None : query = { } payload = { } fields_dict = self . get_fields ( ) for field in fields : value = getattr ( self , field ) if isinstance ( fields_dict [ field ] , OneToOneField ) : payload [ field + '_id' ] = value . id elif isinstance ( fields_dict [ field ] , OneToManyField ) : payload [ field + '_ids' ] = [ entity . id for entity in value ] else : payload [ field ] = value payload . update ( query ) return payload
Create a search query .
19,762
def search_normalize ( self , results ) : fields = self . get_fields ( ) normalized = [ ] for result in results : attrs = { } for field_name , field in fields . items ( ) : if isinstance ( field , OneToOneField ) : try : attrs [ field_name ] = _get_entity_id ( field_name , result ) except MissingValueError : pass elif isinstance ( field , OneToManyField ) : try : attrs [ field_name ] = _get_entity_ids ( field_name , result ) except MissingValueError : pass else : try : attrs [ field_name ] = result [ field_name ] except KeyError : pass normalized . append ( attrs ) return normalized
Normalize search results so they can be used to create new entities .
19,763
def search_filter ( entities , filters ) : if len ( entities ) == 0 : return entities fields = entities [ 0 ] . get_fields ( ) if not set ( filters ) . issubset ( fields ) : raise NoSuchFieldError ( 'Valid filters are {0}, but received {1} instead.' . format ( fields . keys ( ) , filters . keys ( ) ) ) for field_name in filters : if isinstance ( fields [ field_name ] , ( OneToOneField , OneToManyField ) ) : raise NotImplementedError ( 'Search results cannot (yet?) be locally filtered by ' '`OneToOneField`s and `OneToManyField`s. {0} is a {1}.' . format ( field_name , type ( fields [ field_name ] ) . __name__ ) ) filtered = [ entity . read ( ) for entity in entities ] for field_name , field_value in filters . items ( ) : filtered = [ entity for entity in filtered if getattr ( entity , field_name ) == field_value ] return filtered
Read all entities and locally filter them .
19,764
def _get_config_file_path ( xdg_config_dir , xdg_config_file ) : for config_dir in BaseDirectory . load_config_paths ( xdg_config_dir ) : path = join ( config_dir , xdg_config_file ) if isfile ( path ) : return path raise ConfigFileError ( 'No configuration files could be located after searching for a file ' 'named "{0}" in the standard XDG configuration paths, such as ' '"~/.config/{1}/".' . format ( xdg_config_file , xdg_config_dir ) )
Search XDG_CONFIG_DIRS for a config file and return the first found .
19,765
def delete ( cls , label = 'default' , path = None ) : if path is None : path = _get_config_file_path ( cls . _xdg_config_dir , cls . _xdg_config_file ) cls . _file_lock . acquire ( ) try : with open ( path ) as config_file : config = json . load ( config_file ) del config [ label ] with open ( path , 'w' ) as config_file : json . dump ( config , config_file ) finally : cls . _file_lock . release ( )
Delete a server configuration .
19,766
def get_labels ( cls , path = None ) : if path is None : path = _get_config_file_path ( cls . _xdg_config_dir , cls . _xdg_config_file ) with open ( path ) as config_file : return tuple ( json . load ( config_file ) . keys ( ) )
Get all server configuration labels .
19,767
def save ( self , label = 'default' , path = None ) : cfg = vars ( self ) if 'version' in cfg : cfg [ 'version' ] = str ( cfg [ 'version' ] ) if path is None : path = join ( BaseDirectory . save_config_path ( self . _xdg_config_dir ) , self . _xdg_config_file ) self . _file_lock . acquire ( ) try : try : with open ( path ) as config_file : config = json . load ( config_file ) except IOError : config = { } config [ label ] = cfg with open ( path , 'w' ) as config_file : json . dump ( config , config_file ) finally : self . _file_lock . release ( )
Save the current connection configuration to a file .
19,768
def _log_request ( method , url , kwargs , data = None , params = None ) : logger . debug ( 'Making HTTP %s request to %s with %s, %s and %s.' , method , url , 'options {0}' . format ( kwargs ) if len ( kwargs ) > 0 else 'no options' , 'params {0}' . format ( params ) if params else 'no params' , 'data {0}' . format ( data ) if data is not None else 'no data' , )
Log out information about the arguments given .
19,769
def _log_response ( response ) : message = u'Received HTTP {0} response: {1}' . format ( response . status_code , response . text ) if response . status_code >= 400 : logger . warning ( message ) else : logger . debug ( message )
Log out information about a Request object .
19,770
def request ( method , url , ** kwargs ) : _set_content_type ( kwargs ) if _content_type_is_json ( kwargs ) and kwargs . get ( 'data' ) is not None : kwargs [ 'data' ] = dumps ( kwargs [ 'data' ] ) _log_request ( method , url , kwargs ) response = requests . request ( method , url , ** kwargs ) _log_response ( response ) return response
A wrapper for requests . request .
19,771
def head ( url , ** kwargs ) : _set_content_type ( kwargs ) if _content_type_is_json ( kwargs ) and kwargs . get ( 'data' ) is not None : kwargs [ 'data' ] = dumps ( kwargs [ 'data' ] ) _log_request ( 'HEAD' , url , kwargs ) response = requests . head ( url , ** kwargs ) _log_response ( response ) return response
A wrapper for requests . head .
19,772
def post ( url , data = None , json = None , ** kwargs ) : _set_content_type ( kwargs ) if _content_type_is_json ( kwargs ) and data is not None : data = dumps ( data ) _log_request ( 'POST' , url , kwargs , data ) response = requests . post ( url , data , json , ** kwargs ) _log_response ( response ) return response
A wrapper for requests . post .
19,773
def put ( url , data = None , ** kwargs ) : _set_content_type ( kwargs ) if _content_type_is_json ( kwargs ) and data is not None : data = dumps ( data ) _log_request ( 'PUT' , url , kwargs , data ) response = requests . put ( url , data , ** kwargs ) _log_response ( response ) return response
A wrapper for requests . put . Sends a PUT request .
19,774
def _handle_response ( response , server_config , synchronous = False , timeout = None ) : response . raise_for_status ( ) if synchronous is True and response . status_code == ACCEPTED : return ForemanTask ( server_config , id = response . json ( ) [ 'id' ] ) . poll ( timeout = timeout ) if response . status_code == NO_CONTENT : return if 'application/json' in response . headers . get ( 'content-type' , '' ) . lower ( ) : return response . json ( ) elif isinstance ( response . content , bytes ) : return response . content . decode ( 'utf-8' ) else : return response . content
Handle a server s response in a typical fashion .
19,775
def create_missing ( self ) : super ( AuthSourceLDAP , self ) . create_missing ( ) if getattr ( self , 'onthefly_register' , False ) is True : for field in ( 'account_password' , 'attr_firstname' , 'attr_lastname' , 'attr_login' , 'attr_mail' ) : if not hasattr ( self , field ) : setattr ( self , field , self . _fields [ field ] . gen_value ( ) )
Possibly set several extra instance attributes .
19,776
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if attrs is None : attrs = self . update_json ( [ ] ) if ignore is None : ignore = set ( ) ignore . add ( 'account_password' ) return super ( AuthSourceLDAP , self ) . read ( entity , attrs , ignore , params )
Do not read the account_password attribute . Work around a bug .
19,777
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if attrs is None : attrs = self . read_json ( ) attrs [ 'search_' ] = attrs . pop ( 'search' ) attr = 'max_count' if ignore is None : ignore = set ( ) if attr not in ignore : attrs [ attr ] = DiscoveryRule ( self . _server_config , id = attrs [ 'id' ] , ) . update_json ( [ ] ) [ attr ] return super ( DiscoveryRule , self ) . read ( entity , attrs , ignore , params )
Work around a bug . Rename search to search_ .
19,778
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if entity is None : entity = type ( self ) ( self . _server_config , usergroup = self . usergroup , ) if ignore is None : ignore = set ( ) ignore . add ( 'usergroup' ) if attrs is None : attrs = self . read_json ( ) attrs [ 'auth_source' ] = attrs . pop ( 'auth_source_ldap' ) return super ( ExternalUserGroup , self ) . read ( entity , attrs , ignore , params )
Ignore usergroup from read and alter auth_source_ldap with auth_source
19,779
def run ( self , synchronous = True , ** kwargs ) : kwargs = kwargs . copy ( ) kwargs . update ( self . _server_config . get_client_kwargs ( ) ) if 'data' in kwargs : if 'job_template_id' not in kwargs [ 'data' ] and 'feature' not in kwargs [ 'data' ] : raise KeyError ( 'Provide either job_template_id or feature value' ) if 'search_query' not in kwargs [ 'data' ] and 'bookmark_id' not in kwargs [ 'data' ] : raise KeyError ( 'Provide either search_query or bookmark_id value' ) for param_name in [ 'targeting_type' , 'inputs' ] : if param_name not in kwargs [ 'data' ] : raise KeyError ( 'Provide {} value' . format ( param_name ) ) kwargs [ 'data' ] = { u'job_invocation' : kwargs [ 'data' ] } response = client . post ( self . path ( 'base' ) , ** kwargs ) response . raise_for_status ( ) if synchronous is True : return ForemanTask ( server_config = self . _server_config , id = response . json ( ) [ 'task' ] [ 'id' ] ) . poll ( ) return response . json ( )
Helper to run existing job template
19,780
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if attrs is None : attrs = self . read_json ( params = params ) if ignore is None : ignore = set ( ) ignore . add ( 'template_inputs' ) entity = super ( JobTemplate , self ) . read ( entity = entity , attrs = attrs , ignore = ignore , params = params ) referenced_entities = [ TemplateInput ( entity . _server_config , id = entity_id , template = JobTemplate ( entity . _server_config , id = entity . id ) ) for entity_id in _get_entity_ids ( 'template_inputs' , attrs ) ] setattr ( entity , 'template_inputs' , referenced_entities ) return entity
Ignore the template inputs when initially reading the job template . Look up each TemplateInput entity separately and afterwords add them to the JobTemplate entity .
19,781
def upload ( self , filepath , filename = None ) : if not filename : filename = os . path . basename ( filepath ) content_upload = self . create ( ) try : offset = 0 content_chunk_size = 2 * 1024 * 1024 with open ( filepath , 'rb' ) as contentfile : chunk = contentfile . read ( content_chunk_size ) while len ( chunk ) > 0 : data = { 'offset' : offset , 'content' : chunk } content_upload . update ( data ) offset += len ( chunk ) chunk = contentfile . read ( content_chunk_size ) size = 0 checksum = hashlib . sha256 ( ) with open ( filepath , 'rb' ) as contentfile : contents = contentfile . read ( ) size = len ( contents ) checksum . update ( contents ) uploads = [ { 'id' : content_upload . upload_id , 'name' : filename , 'size' : size , 'checksum' : checksum . hexdigest ( ) } ] json = self . repository . import_uploads ( uploads ) finally : content_upload . delete ( ) return json
Upload content .
19,782
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if entity is None : entity = type ( self ) ( self . _server_config , content_view_filter = self . content_view_filter , ) if attrs is None : attrs = self . read_json ( ) if ignore is None : ignore = set ( ) ignore . add ( 'content_view_filter' ) ignore . update ( [ field_name for field_name in entity . get_fields ( ) . keys ( ) if field_name not in attrs ] ) return super ( ContentViewFilterRule , self ) . read ( entity , attrs , ignore , params )
Do not read certain fields .
19,783
def publish ( self , synchronous = True , ** kwargs ) : kwargs = kwargs . copy ( ) if 'data' in kwargs and 'id' not in kwargs [ 'data' ] : kwargs [ 'data' ] [ 'id' ] = self . id kwargs . update ( self . _server_config . get_client_kwargs ( ) ) response = client . post ( self . path ( 'publish' ) , ** kwargs ) return _handle_response ( response , self . _server_config , synchronous )
Helper for publishing an existing content view .
19,784
def delete_from_environment ( self , environment , synchronous = True ) : if isinstance ( environment , Environment ) : environment_id = environment . id else : environment_id = environment response = client . delete ( '{0}/environments/{1}' . format ( self . path ( ) , environment_id ) , ** self . _server_config . get_client_kwargs ( ) ) return _handle_response ( response , self . _server_config , synchronous )
Delete this content view version from an environment .
19,785
def add ( self , synchronous = True , ** kwargs ) : kwargs = kwargs . copy ( ) if 'data' not in kwargs : kwargs [ 'data' ] = dict ( ) if 'component_ids' not in kwargs [ 'data' ] : kwargs [ 'data' ] [ 'components' ] = [ _payload ( self . get_fields ( ) , self . get_values ( ) ) ] kwargs . update ( self . _server_config . get_client_kwargs ( ) ) response = client . put ( self . path ( 'add' ) , ** kwargs ) return _handle_response ( response , self . _server_config , synchronous )
Add provided Content View Component .
19,786
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if attrs is None : attrs = self . read_json ( ) attrs [ 'override' ] = attrs . pop ( 'override?' ) attrs [ 'unlimited' ] = attrs . pop ( 'unlimited?' ) return super ( Filter , self ) . read ( entity , attrs , ignore , params )
Deal with different named data returned from the server
19,787
def poll ( self , poll_rate = None , timeout = None ) : return _poll_task ( self . id , self . _server_config , poll_rate , timeout )
Return the status of a task or timeout .
19,788
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if ignore is None : ignore = set ( ) ignore . add ( 'root_pass' ) ignore . add ( 'kickstart_repository' ) if attrs is None : attrs = self . read_json ( ) attrs [ 'parent_id' ] = attrs . pop ( 'ancestry' ) version = _get_version ( self . _server_config ) if version >= Version ( '6.1' ) and version < Version ( '6.2' ) : attrs2 = HostGroup ( self . _server_config , id = attrs [ 'id' ] ) . update_json ( [ ] ) for attr in ( 'content_source_id' , 'content_view_id' , 'lifecycle_environment_id' ) : attrs [ attr ] = attrs2 . get ( attr ) return super ( HostGroup , self ) . read ( entity , attrs , ignore , params )
Deal with several bugs .
19,789
def delete_puppetclass ( self , synchronous = True , ** kwargs ) : kwargs = kwargs . copy ( ) kwargs . update ( self . _server_config . get_client_kwargs ( ) ) path = "{0}/{1}" . format ( self . path ( 'puppetclass_ids' ) , kwargs [ 'data' ] . pop ( 'puppetclass_id' ) ) return _handle_response ( client . delete ( path , ** kwargs ) , self . _server_config , synchronous )
Remove a Puppet class from host group
19,790
def owner_type ( self , value ) : self . _owner_type = value if value == 'User' : self . _fields [ 'owner' ] = entity_fields . OneToOneField ( User ) if hasattr ( self , 'owner' ) : self . owner = User ( self . _server_config , id = self . owner . id if isinstance ( self . owner , Entity ) else self . owner ) elif value == 'Usergroup' : self . _fields [ 'owner' ] = entity_fields . OneToOneField ( UserGroup ) if hasattr ( self , 'owner' ) : self . owner = UserGroup ( self . _server_config , id = self . owner . id if isinstance ( self . owner , Entity ) else self . owner )
Set owner_type to the given value .
19,791
def get_values ( self ) : attrs = super ( Host , self ) . get_values ( ) if '_owner_type' in attrs and attrs [ '_owner_type' ] is not None : attrs [ 'owner_type' ] = attrs . pop ( '_owner_type' ) else : attrs . pop ( '_owner_type' ) return attrs
Correctly set the owner_type attribute .
19,792
def errata_applicability ( self , synchronous = True , ** kwargs ) : kwargs = kwargs . copy ( ) kwargs . update ( self . _server_config . get_client_kwargs ( ) ) response = client . put ( self . path ( 'errata/applicability' ) , ** kwargs ) return _handle_response ( response , self . _server_config , synchronous )
Force regenerate errata applicability
19,793
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if attrs is None : attrs = self . read_json ( ) if ignore is None : ignore = set ( ) if 'parameters' in attrs : attrs [ 'host_parameters_attributes' ] = attrs . pop ( 'parameters' ) else : ignore . add ( 'host_parameters_attributes' ) if 'content_facet_attributes' not in attrs : ignore . add ( 'content_facet_attributes' ) ignore . add ( 'compute_attributes' ) ignore . add ( 'interfaces_attributes' ) ignore . add ( 'root_pass' ) ignore . add ( 'image' ) ignore . add ( 'interface' ) ignore . add ( 'build_status_label' ) result = super ( Host , self ) . read ( entity , attrs , ignore , params ) if attrs . get ( 'image_id' ) : result . image = Image ( server_config = self . _server_config , id = attrs . get ( 'image_id' ) , compute_resource = attrs . get ( 'compute_resource_id' ) , ) else : result . image = None if 'interfaces' in attrs and attrs [ 'interfaces' ] : result . interface = [ Interface ( self . _server_config , host = result . id , id = interface [ 'id' ] , ) for interface in attrs [ 'interfaces' ] ] if 'build_status_label' in attrs : result . build_status_label = attrs [ 'build_status_label' ] return result
Deal with oddly named and structured data returned by the server .
19,794
def create_payload ( self ) : payload = super ( LifecycleEnvironment , self ) . create_payload ( ) if ( _get_version ( self . _server_config ) < Version ( '6.1' ) and 'prior_id' in payload ) : payload [ 'prior' ] = payload . pop ( 'prior_id' ) return payload
Rename the payload key prior_id to prior .
19,795
def create_missing ( self ) : super ( LifecycleEnvironment , self ) . create_missing ( ) if ( self . name != 'Library' and not hasattr ( self , 'prior' ) ) : results = self . search ( { 'organization' } , { u'name' : u'Library' } ) if len ( results ) != 1 : raise APIResponseError ( u'Could not find the "Library" lifecycle environment for ' u'organization {0}. Search results: {1}' . format ( self . organization , results ) ) self . prior = results [ 0 ]
Automatically populate additional instance attributes .
19,796
def create_payload ( self ) : payload = super ( Media , self ) . create_payload ( ) if 'path_' in payload : payload [ 'path' ] = payload . pop ( 'path_' ) return { u'medium' : payload }
Wrap submitted data within an extra dict and rename path_ .
19,797
def create_payload ( self ) : payload = super ( OverrideValue , self ) . create_payload ( ) if hasattr ( self , 'smart_class_parameter' ) : del payload [ 'smart_class_parameter_id' ] if hasattr ( self , 'smart_variable' ) : del payload [ 'smart_variable_id' ] return payload
Remove smart_class_parameter_id or smart_variable_id
19,798
def read ( self , entity = None , attrs = None , ignore = None , params = None ) : if entity is None : entity = type ( self ) ( self . _server_config , ** { self . _parent_type : self . _parent_id } ) if ignore is None : ignore = set ( ) for field_name in self . _path_fields : ignore . add ( field_name ) return super ( Parameter , self ) . read ( entity , attrs , ignore , params )
Ignore path related fields as they re never returned by the server and are only added to entity to be able to use proper path .
19,799
def search ( self , fields = None , query = None , filters = None ) : results = self . search_json ( fields , query ) [ 'results' ] results = self . search_normalize ( results ) entities = [ ] for result in results : sync_plan = result . get ( 'sync_plan' ) if sync_plan is not None : del result [ 'sync_plan' ] entity = type ( self ) ( self . _server_config , ** result ) if sync_plan : entity . sync_plan = SyncPlan ( server_config = self . _server_config , id = sync_plan , organization = Organization ( server_config = self . _server_config , id = result . get ( 'organization' ) ) , ) entities . append ( entity ) if filters is not None : entities = self . search_filter ( entities , filters ) return entities
Search for entities with missing attribute