idx
int64
0
63k
question
stringlengths
61
4.03k
target
stringlengths
6
1.23k
46,100
def query ( self , query , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if archiver == "" : archiver = self . archivers [ 0 ] nonce = random . randint ( 0 , 2 ** 32 ) ev = threading . Event ( ) response = { } def _handleresult ( msg ) : got_response = False error = getError ( nonce , msg ) if error is not None : got_response = True response [ "error" ] = error metadata = getMetadata ( nonce , msg ) if metadata is not None : got_response = True response [ "metadata" ] = metadata timeseries = getTimeseries ( nonce , msg ) if timeseries is not None : got_response = True response [ "timeseries" ] = timeseries if got_response : ev . set ( ) vk = self . vk [ : - 1 ] self . c . subscribe ( "{0}/s.giles/_/i.archiver/signal/{1},queries" . format ( archiver , vk ) , _handleresult ) q_struct = msgpack . packb ( { "Query" : query , "Nonce" : nonce } ) po = PayloadObject ( ( 2 , 0 , 8 , 1 ) , None , q_struct ) self . c . publish ( "{0}/s.giles/_/i.archiver/slot/query" . format ( archiver ) , payload_objects = ( po , ) ) ev . wait ( timeout ) if len ( response ) == 0 : raise TimeoutException ( "Query of {0} timed out" . format ( query ) ) return response
Runs the given pundat query and returns the results as a Python object .
46,101
def uuids ( self , where , archiver = "" , timeout = DEFAULT_TIMEOUT ) : resp = self . query ( "select uuid where {0}" . format ( where ) , archiver , timeout ) uuids = [ ] for r in resp [ "metadata" ] : uuids . append ( r [ "uuid" ] ) return uuids
Using the given where - clause finds all UUIDs that match
46,102
def tags ( self , where , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select * where {0}" . format ( where ) , archiver , timeout ) . get ( 'metadata' , { } )
Retrieves tags for all streams matching the given WHERE clause
46,103
def tags_uuids ( self , uuids , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if not isinstance ( uuids , list ) : uuids = [ uuids ] where = " or " . join ( [ 'uuid = "{0}"' . format ( uuid ) for uuid in uuids ] ) return self . query ( "select * where {0}" . format ( where ) , archiver , timeout ) . get ( 'metadata' , { } )
Retrieves tags for all streams with the provided UUIDs
46,104
def data ( self , where , start , end , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select data in ({0}, {1}) where {2}" . format ( start , end , where ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all RAW data between the 2 given timestamps
46,105
def data_uuids ( self , uuids , start , end , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if not isinstance ( uuids , list ) : uuids = [ uuids ] where = " or " . join ( [ 'uuid = "{0}"' . format ( uuid ) for uuid in uuids ] ) return self . query ( "select data in ({0}, {1}) where {2}" . format ( start , end , where ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given list of UUIDs retrieves all RAW data between the 2 given timestamps
46,106
def stats ( self , where , start , end , pw , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select statistical({3}) data in ({0}, {1}) where {2}" . format ( start , end , where , pw ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all statistical data between the 2 given timestamps using the given pointwidth
46,107
def window ( self , where , start , end , width , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select window({3}) data in ({0}, {1}) where {2}" . format ( start , end , where , width ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all statistical data between the 2 given timestamps using the given window size
46,108
def brightness ( self ) : if self . mode == "ww" : return int ( self . raw_state [ 9 ] ) else : _ , _ , v = colorsys . rgb_to_hsv ( * self . getRgb ( ) ) return v
Return current brightness 0 - 255 .
46,109
def decode ( var , encoding ) : if PY2 : if isinstance ( var , unicode ) : ret = var elif isinstance ( var , str ) : if encoding : ret = var . decode ( encoding ) else : ret = unicode ( var ) else : ret = unicode ( var ) else : ret = str ( var ) return ret
If not already unicode decode it .
46,110
def cfitsio_version ( asfloat = False ) : ver = '%0.3f' % _fitsio_wrap . cfitsio_version ( ) if asfloat : return float ( ver ) else : return ver
Return the cfitsio version as a string .
46,111
def is_little_endian ( array ) : if numpy . little_endian : machine_little = True else : machine_little = False byteorder = array . dtype . base . byteorder return ( byteorder == '<' ) or ( machine_little and byteorder == '=' )
Return True if array is little endian False otherwise .
46,112
def array_to_native ( array , inplace = False ) : if numpy . little_endian : machine_little = True else : machine_little = False data_little = False if array . dtype . names is None : if array . dtype . base . byteorder == '|' : return array data_little = is_little_endian ( array ) else : for fname in array . dtype . names : if is_little_endian ( array [ fname ] ) : data_little = True break if ( ( machine_little and not data_little ) or ( not machine_little and data_little ) ) : output = array . byteswap ( inplace ) else : output = array return output
Convert an array to the native byte order .
46,113
def mks ( val ) : if sys . version_info > ( 3 , 0 , 0 ) : if isinstance ( val , bytes ) : sval = str ( val , 'utf-8' ) else : sval = str ( val ) else : sval = str ( val ) return sval
make sure the value is a string paying mind to python3 vs 2
46,114
def _get_col_dimstr ( tdim , is_string = False ) : dimstr = '' if tdim is None : dimstr = 'array[bad TDIM]' else : if is_string : if len ( tdim ) > 1 : dimstr = [ str ( d ) for d in tdim [ 1 : ] ] else : if len ( tdim ) > 1 or tdim [ 0 ] > 1 : dimstr = [ str ( d ) for d in tdim ] if dimstr != '' : dimstr = ',' . join ( dimstr ) dimstr = 'array[%s]' % dimstr return dimstr
not for variable length
46,115
def get_colname ( self , colnum ) : if colnum < 0 or colnum > ( len ( self . _colnames ) - 1 ) : raise ValueError ( "colnum out of range [0,%s-1]" % ( 0 , len ( self . _colnames ) ) ) return self . _colnames [ colnum ]
Get the name associated with the given column number
46,116
def write_column ( self , column , data , ** keys ) : firstrow = keys . get ( 'firstrow' , 0 ) colnum = self . _extract_colnum ( column ) if not data . flags [ 'C_CONTIGUOUS' ] : data_send = numpy . ascontiguousarray ( data ) array_to_native ( data_send , inplace = True ) else : data_send = array_to_native ( data , inplace = False ) if IS_PY3 and data_send . dtype . char == 'U' : data_send = data_send . astype ( 'S' , copy = False ) self . _verify_column_data ( colnum , data_send ) self . _FITS . write_column ( self . _ext + 1 , colnum + 1 , data_send , firstrow = firstrow + 1 , write_bitcols = self . write_bitcols ) del data_send self . _update_info ( )
Write data to a column in this HDU
46,117
def _verify_column_data ( self , colnum , data ) : this_dt = data . dtype . descr [ 0 ] if len ( data . shape ) > 2 : this_shape = data . shape [ 1 : ] elif len ( data . shape ) == 2 and data . shape [ 1 ] > 1 : this_shape = data . shape [ 1 : ] else : this_shape = ( ) this_npy_type = this_dt [ 1 ] [ 1 : ] npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) info = self . _info [ 'colinfo' ] [ colnum ] if npy_type [ 0 ] in [ '>' , '<' , '|' ] : npy_type = npy_type [ 1 : ] col_name = info [ 'name' ] col_tdim = info [ 'tdim' ] col_shape = _tdim2shape ( col_tdim , col_name , is_string = ( npy_type [ 0 ] == 'S' ) ) if col_shape is None : if this_shape == ( ) : this_shape = None if col_shape is not None and not isinstance ( col_shape , tuple ) : col_shape = ( col_shape , ) if npy_type == 'i1' and this_npy_type == 'b1' : this_npy_type = 'i1' if isinstance ( self , AsciiTableHDU ) : if npy_type == 'i8' and this_npy_type in [ 'i2' , 'i4' ] : this_npy_type = 'i8' elif npy_type == 'f8' and this_npy_type == 'f4' : this_npy_type = 'f8' if this_npy_type != npy_type : raise ValueError ( "bad input data for column '%s': " "expected '%s', got '%s'" % ( col_name , npy_type , this_npy_type ) ) if this_shape != col_shape : raise ValueError ( "bad input shape for column '%s': " "expected '%s', got '%s'" % ( col_name , col_shape , this_shape ) )
verify the input data is of the correct type and shape
46,118
def write_var_column ( self , column , data , firstrow = 0 , ** keys ) : if not is_object ( data ) : raise ValueError ( "Only object fields can be written to " "variable-length arrays" ) colnum = self . _extract_colnum ( column ) self . _FITS . write_var_column ( self . _ext + 1 , colnum + 1 , data , firstrow = firstrow + 1 ) self . _update_info ( )
Write data to a variable - length column in this HDU
46,119
def insert_column ( self , name , data , colnum = None ) : if name in self . _colnames : raise ValueError ( "column '%s' already exists" % name ) if IS_PY3 and data . dtype . char == 'U' : descr = numpy . empty ( 1 ) . astype ( data . dtype ) . astype ( 'S' ) . dtype . descr else : descr = data . dtype . descr if len ( descr ) > 1 : raise ValueError ( "you can only insert a single column, " "requested: %s" % descr ) this_descr = descr [ 0 ] this_descr = [ name , this_descr [ 1 ] ] if len ( data . shape ) > 1 : this_descr += [ data . shape [ 1 : ] ] this_descr = tuple ( this_descr ) name , fmt , dims = _npy2fits ( this_descr , table_type = self . _table_type_str ) if dims is not None : dims = [ dims ] if colnum is None : new_colnum = len ( self . _info [ 'colinfo' ] ) + 1 else : new_colnum = colnum + 1 self . _FITS . insert_col ( self . _ext + 1 , new_colnum , name , fmt , tdim = dims ) self . _update_info ( ) self . write_column ( name , data )
Insert a new column .
46,120
def append ( self , data , ** keys ) : firstrow = self . _info [ 'nrows' ] keys [ 'firstrow' ] = firstrow self . write ( data , ** keys )
Append new rows to a table HDU
46,121
def delete_rows ( self , rows ) : if rows is None : return if isinstance ( rows , slice ) : rows = self . _process_slice ( rows ) if rows . step is not None and rows . step != 1 : rows = numpy . arange ( rows . start + 1 , rows . stop + 1 , rows . step , ) else : rows = slice ( rows . start + 1 , rows . stop + 1 ) else : rows = self . _extract_rows ( rows ) rows += 1 if isinstance ( rows , slice ) : self . _FITS . delete_row_range ( self . _ext + 1 , rows . start , rows . stop ) else : if rows . size == 0 : return self . _FITS . delete_rows ( self . _ext + 1 , rows ) self . _update_info ( )
Delete rows from the table
46,122
def resize ( self , nrows , front = False ) : nrows_current = self . get_nrows ( ) if nrows == nrows_current : return if nrows < nrows_current : rowdiff = nrows_current - nrows if front : start = 0 stop = rowdiff else : start = nrows stop = nrows_current self . delete_rows ( slice ( start , stop ) ) else : rowdiff = nrows - nrows_current if front : firstrow = 0 else : firstrow = nrows_current self . _FITS . insert_rows ( self . _ext + 1 , firstrow , rowdiff ) self . _update_info ( )
Resize the table to the given size removing or adding rows as necessary . Note if expanding the table at the end it is more efficient to use the append function than resizing and then writing .
46,123
def read ( self , ** keys ) : columns = keys . get ( 'columns' , None ) rows = keys . get ( 'rows' , None ) if columns is not None : if 'columns' in keys : del keys [ 'columns' ] data = self . read_columns ( columns , ** keys ) elif rows is not None : if 'rows' in keys : del keys [ 'rows' ] data = self . read_rows ( rows , ** keys ) else : data = self . _read_all ( ** keys ) return data
read data from this HDU
46,124
def _read_all ( self , ** keys ) : dtype , offsets , isvar = self . get_rec_dtype ( ** keys ) w , = numpy . where ( isvar == True ) has_tbit = self . _check_tbit ( ) if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) colnums = self . _extract_colnums ( ) rows = None array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) elif has_tbit : colnums = self . _extract_colnums ( ) array = self . read_columns ( colnums , ** keys ) else : firstrow = 1 nrows = self . _info [ 'nrows' ] array = numpy . zeros ( nrows , dtype = dtype ) self . _FITS . read_as_rec ( self . _ext + 1 , 1 , nrows , array ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
Read all data in the HDU .
46,125
def read_column ( self , col , ** keys ) : res = self . read_columns ( [ col ] , ** keys ) colname = res . dtype . names [ 0 ] data = res [ colname ] self . _maybe_trim_strings ( data , ** keys ) return data
Read the specified column
46,126
def read_rows ( self , rows , ** keys ) : if rows is None : return self . _read_all ( ) if self . _info [ 'hdutype' ] == ASCII_TBL : keys [ 'rows' ] = rows return self . read ( ** keys ) rows = self . _extract_rows ( rows ) dtype , offsets , isvar = self . get_rec_dtype ( ** keys ) w , = numpy . where ( isvar == True ) if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) colnums = self . _extract_colnums ( ) return self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : array = numpy . zeros ( rows . size , dtype = dtype ) self . _FITS . read_rows_as_rec ( self . _ext + 1 , array , rows ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
Read the specified rows .
46,127
def read_columns ( self , columns , ** keys ) : if self . _info [ 'hdutype' ] == ASCII_TBL : keys [ 'columns' ] = columns return self . read ( ** keys ) rows = keys . get ( 'rows' , None ) colnums = self . _extract_colnums ( columns ) if isinstance ( colnums , int ) : return self . read_column ( columns , ** keys ) rows = self . _extract_rows ( rows ) dtype , offsets , isvar = self . get_rec_dtype ( colnums = colnums , ** keys ) w , = numpy . where ( isvar == True ) if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size array = numpy . zeros ( nrows , dtype = dtype ) colnumsp = colnums [ : ] . copy ( ) colnumsp [ : ] += 1 self . _FITS . read_columns_as_rec ( self . _ext + 1 , colnumsp , array , rows ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for i in xrange ( colnums . size ) : colnum = int ( colnums [ i ] ) name = array . dtype . names [ i ] self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) if ( self . _check_tbit ( colnums = colnums ) ) : array = self . _fix_tbit_dtype ( array , colnums ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
read a subset of columns from this binary table HDU
46,128
def read_slice ( self , firstrow , lastrow , step = 1 , ** keys ) : if self . _info [ 'hdutype' ] == ASCII_TBL : rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) keys [ 'rows' ] = rows return self . read_ascii ( ** keys ) step = keys . get ( 'step' , 1 ) if self . _info [ 'hdutype' ] == IMAGE_HDU : raise ValueError ( "slices currently only supported for tables" ) maxrow = self . _info [ 'nrows' ] if firstrow < 0 or lastrow > maxrow : raise ValueError ( "slice must specify a sub-range of [%d,%d]" % ( 0 , maxrow ) ) dtype , offsets , isvar = self . get_rec_dtype ( ** keys ) w , = numpy . where ( isvar == True ) if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) colnums = self . _extract_colnums ( ) array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : if step != 1 : rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) array = self . read ( rows = rows ) else : nrows = lastrow - firstrow array = numpy . zeros ( nrows , dtype = dtype ) self . _FITS . read_as_rec ( self . _ext + 1 , firstrow + 1 , lastrow , array ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
Read the specified row slice from a table .
46,129
def get_rec_dtype ( self , ** keys ) : colnums = keys . get ( 'colnums' , None ) vstorage = keys . get ( 'vstorage' , self . _vstorage ) if colnums is None : colnums = self . _extract_colnums ( ) descr = [ ] isvararray = numpy . zeros ( len ( colnums ) , dtype = numpy . bool ) for i , colnum in enumerate ( colnums ) : dt , isvar = self . get_rec_column_descr ( colnum , vstorage ) descr . append ( dt ) isvararray [ i ] = isvar dtype = numpy . dtype ( descr ) offsets = numpy . zeros ( len ( colnums ) , dtype = 'i8' ) for i , n in enumerate ( dtype . names ) : offsets [ i ] = dtype . fields [ n ] [ 1 ] return dtype , offsets , isvararray
Get the dtype for the specified columns
46,130
def _check_tbit ( self , ** keys ) : colnums = keys . get ( 'colnums' , None ) if colnums is None : colnums = self . _extract_colnums ( ) has_tbit = False for i , colnum in enumerate ( colnums ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) if ( istbit ) : has_tbit = True break return has_tbit
Check if one of the columns is a TBIT column
46,131
def _fix_tbit_dtype ( self , array , colnums ) : descr = array . dtype . descr for i , colnum in enumerate ( colnums ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) if ( istbit ) : coldescr = list ( descr [ i ] ) coldescr [ 1 ] = '?' descr [ i ] = tuple ( coldescr ) return array . view ( descr )
If necessary patch up the TBIT to convert to bool array
46,132
def _get_simple_dtype_and_shape ( self , colnum , rows = None ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) info = self . _info [ 'colinfo' ] [ colnum ] name = info [ 'name' ] if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size shape = None tdim = info [ 'tdim' ] shape = _tdim2shape ( tdim , name , is_string = ( npy_type [ 0 ] == 'S' ) ) if shape is not None : if nrows > 1 : if not isinstance ( shape , tuple ) : shape = ( nrows , shape ) else : shape = tuple ( [ nrows ] + list ( shape ) ) else : shape = nrows return npy_type , shape
When reading a single column we want the basic data type and the shape of the array .
46,133
def get_rec_column_descr ( self , colnum , vstorage ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) name = self . _info [ 'colinfo' ] [ colnum ] [ 'name' ] if isvar : if vstorage == 'object' : descr = ( name , 'O' ) else : tform = self . _info [ 'colinfo' ] [ colnum ] [ 'tform' ] max_size = _extract_vararray_max ( tform ) if max_size <= 0 : name = self . _info [ 'colinfo' ] [ colnum ] [ 'name' ] mess = 'Will read as an object field' if max_size < 0 : mess = "Column '%s': No maximum size: '%s'. %s" mess = mess % ( name , tform , mess ) warnings . warn ( mess , FITSRuntimeWarning ) else : mess = "Column '%s': Max size is zero: '%s'. %s" mess = mess % ( name , tform , mess ) warnings . warn ( mess , FITSRuntimeWarning ) return self . get_rec_column_descr ( colnum , 'object' ) if npy_type [ 0 ] == 'S' : npy_type = 'S%d' % max_size descr = ( name , npy_type ) elif npy_type [ 0 ] == 'U' : npy_type = 'U%d' % max_size descr = ( name , npy_type ) else : descr = ( name , npy_type , max_size ) else : tdim = self . _info [ 'colinfo' ] [ colnum ] [ 'tdim' ] shape = _tdim2shape ( tdim , name , is_string = ( npy_type [ 0 ] == 'S' or npy_type [ 0 ] == 'U' ) ) if shape is not None : descr = ( name , npy_type , shape ) else : descr = ( name , npy_type ) return descr , isvar
Get a descriptor entry for the specified column .
46,134
def _read_rec_with_var ( self , colnums , rows , dtype , offsets , isvar , vstorage ) : colnumsp = colnums + 1 if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size array = numpy . zeros ( nrows , dtype = dtype ) wnotvar , = numpy . where ( isvar == False ) if wnotvar . size > 0 : thesecol = colnumsp [ wnotvar ] theseoff = offsets [ wnotvar ] self . _FITS . read_columns_as_rec_byoffset ( self . _ext + 1 , thesecol , theseoff , array , rows ) for i in xrange ( thesecol . size ) : name = array . dtype . names [ wnotvar [ i ] ] colnum = thesecol [ i ] - 1 self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) wvar , = numpy . where ( isvar == True ) if wvar . size > 0 : thesecol = colnumsp [ wvar ] for i in xrange ( thesecol . size ) : colnump = thesecol [ i ] name = array . dtype . names [ wvar [ i ] ] dlist = self . _FITS . read_var_column_as_list ( self . _ext + 1 , colnump , rows ) if ( isinstance ( dlist [ 0 ] , str ) or ( IS_PY3 and isinstance ( dlist [ 0 ] , bytes ) ) ) : is_string = True else : is_string = False if array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] == 'O' : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) array [ name ] [ irow ] = item else : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) if is_string : array [ name ] [ irow ] = item else : ncopy = len ( item ) if IS_PY3 : ts = array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] if ts != 'S' and ts != 'U' : array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] else : array [ name ] [ irow ] = item else : array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] return array
Read columns from a table into a rec array including variable length columns . This is special because for efficiency it involves reading from the main table as normal but skipping the columns in the array that are variable . Then reading the variable length columns with accounting for strides appropriately .
46,135
def _extract_rows ( self , rows ) : if rows is not None : rows = numpy . array ( rows , ndmin = 1 , copy = False , dtype = 'i8' ) rows = numpy . unique ( rows ) maxrow = self . _info [ 'nrows' ] - 1 if rows [ 0 ] < 0 or rows [ - 1 ] > maxrow : raise ValueError ( "rows must be in [%d,%d]" % ( 0 , maxrow ) ) return rows
Extract an array of rows from an input scalar or sequence
46,136
def _process_slice ( self , arg ) : start = arg . start stop = arg . stop step = arg . step nrows = self . _info [ 'nrows' ] if step is None : step = 1 if start is None : start = 0 if stop is None : stop = nrows if start < 0 : start = nrows + start if start < 0 : raise IndexError ( "Index out of bounds" ) if stop < 0 : stop = nrows + start + 1 if stop < start : stop = start if stop > nrows : stop = nrows return slice ( start , stop , step )
process the input slice for use calling the C code
46,137
def _slice2rows ( self , start , stop , step = None ) : nrows = self . _info [ 'nrows' ] if start is None : start = 0 if stop is None : stop = nrows if step is None : step = 1 tstart = self . _fix_range ( start ) tstop = self . _fix_range ( stop ) if tstart == 0 and tstop == nrows : return None if stop < start : raise ValueError ( "start is greater than stop in slice" ) return numpy . arange ( tstart , tstop , step , dtype = 'i8' )
Convert a slice to an explicit array of rows
46,138
def _fix_range ( self , num , isslice = True ) : nrows = self . _info [ 'nrows' ] if isslice : if num < 0 : num = nrows + ( 1 + num ) elif num > nrows : num = nrows else : if num < 0 : num = nrows + num elif num > ( nrows - 1 ) : num = nrows - 1 return num
Ensure the input is within range .
46,139
def _rescale_and_convert_field_inplace ( self , array , name , scale , zero ) : self . _rescale_array ( array [ name ] , scale , zero ) if array [ name ] . dtype == numpy . bool : array [ name ] = self . _convert_bool_array ( array [ name ] ) return array
Apply fits scalings . Also convert bool to proper numpy boolean values
46,140
def _rescale_array ( self , array , scale , zero ) : if scale != 1.0 : sval = numpy . array ( scale , dtype = array . dtype ) array *= sval if zero != 0.0 : zval = numpy . array ( zero , dtype = array . dtype ) array += zval
Scale the input array
46,141
def _maybe_trim_strings ( self , array , ** keys ) : trim_strings = keys . get ( 'trim_strings' , False ) if self . trim_strings or trim_strings : _trim_strings ( array )
if requested trim trailing white space from all string fields in the input array
46,142
def _get_tbl_numpy_dtype ( self , colnum , include_endianness = True ) : table_type = self . _info [ 'hdutype' ] table_type_string = _hdu_type_map [ table_type ] try : ftype = self . _info [ 'colinfo' ] [ colnum ] [ 'eqtype' ] if table_type == ASCII_TBL : npy_type = _table_fits2npy_ascii [ abs ( ftype ) ] else : npy_type = _table_fits2npy [ abs ( ftype ) ] except KeyError : raise KeyError ( "unsupported %s fits data " "type: %d" % ( table_type_string , ftype ) ) istbit = False if ( ftype == 1 ) : istbit = True isvar = False if ftype < 0 : isvar = True if include_endianness : if table_type == ASCII_TBL : addstr = '' else : addstr = '>' if npy_type not in [ 'u1' , 'i1' , 'S' , 'U' ] : npy_type = addstr + npy_type if npy_type == 'S' : width = self . _info [ 'colinfo' ] [ colnum ] [ 'width' ] npy_type = 'S%d' % width elif npy_type == 'U' : width = self . _info [ 'colinfo' ] [ colnum ] [ 'width' ] npy_type = 'U%d' % width return npy_type , isvar , istbit
Get numpy type for the input column
46,143
def _process_args_as_rows_or_columns ( self , arg , unpack = False ) : flags = set ( ) if isinstance ( arg , ( tuple , list , numpy . ndarray ) ) : if isstring ( arg [ 0 ] ) : result = arg else : result = arg flags . add ( 'isrows' ) elif isstring ( arg ) : result = arg elif isinstance ( arg , slice ) : if unpack : flags . add ( 'isrows' ) result = self . _slice2rows ( arg . start , arg . stop , arg . step ) else : flags . add ( 'isrows' ) flags . add ( 'isslice' ) result = self . _process_slice ( arg ) else : result = arg flags . add ( 'isrows' ) if numpy . ndim ( arg ) == 0 : flags . add ( 'isscalar' ) return result , flags
We must be able to interpret the args as as either a column name or row number or sequences thereof . Numpy arrays and slices are also fine .
46,144
def _extract_colnums ( self , columns = None ) : if columns is None : return numpy . arange ( self . _ncol , dtype = 'i8' ) if not isinstance ( columns , ( tuple , list , numpy . ndarray ) ) : return self . _extract_colnum ( columns ) colnums = numpy . zeros ( len ( columns ) , dtype = 'i8' ) for i in xrange ( colnums . size ) : colnums [ i ] = self . _extract_colnum ( columns [ i ] ) colnums = numpy . unique ( colnums ) return colnums
Extract an array of columns from the input
46,145
def _extract_colnum ( self , col ) : if isinteger ( col ) : colnum = col if ( colnum < 0 ) or ( colnum > ( self . _ncol - 1 ) ) : raise ValueError ( "column number should be in [0,%d]" % ( 0 , self . _ncol - 1 ) ) else : colstr = mks ( col ) try : if self . case_sensitive : mess = "column name '%s' not found (case sensitive)" % col colnum = self . _colnames . index ( colstr ) else : mess = "column name '%s' not found (case insensitive)" % col colnum = self . _colnames_lower . index ( colstr . lower ( ) ) except ValueError : raise ValueError ( mess ) return int ( colnum )
Get the column number for the input column
46,146
def _update_info ( self ) : super ( TableHDU , self ) . _update_info ( ) if self . _info [ 'hdutype' ] == IMAGE_HDU : mess = "Extension %s is not a Table HDU" % self . ext raise ValueError ( mess ) if 'colinfo' in self . _info : self . _colnames = [ i [ 'name' ] for i in self . _info [ 'colinfo' ] ] self . _colnames_lower = [ i [ 'name' ] . lower ( ) for i in self . _info [ 'colinfo' ] ] self . _ncol = len ( self . _colnames )
Call parent method and make sure this is in fact a table HDU . Set some convenience data .
46,147
def _get_next_buffered_row ( self ) : if self . _iter_row == self . _iter_nrows : raise StopIteration if self . _row_buffer_index >= self . _iter_row_buffer : self . _buffer_iter_rows ( self . _iter_row ) data = self . _row_buffer [ self . _row_buffer_index ] self . _iter_row += 1 self . _row_buffer_index += 1 return data
Get the next row for iteration .
46,148
def _buffer_iter_rows ( self , start ) : self . _row_buffer = self [ start : start + self . _iter_row_buffer ] self . _row_buffer_index = 0
Read in the buffer for iteration
46,149
def read ( self , ** keys ) : rows = keys . get ( 'rows' , None ) columns = keys . get ( 'columns' , None ) colnums = self . _extract_colnums ( columns ) if isinstance ( colnums , int ) : return self . read_column ( columns , ** keys ) rows = self . _extract_rows ( rows ) if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size rows = self . _extract_rows ( rows ) dtype , offsets , isvar = self . get_rec_dtype ( colnums = colnums , ** keys ) array = numpy . zeros ( nrows , dtype = dtype ) wnotvar , = numpy . where ( isvar == False ) if wnotvar . size > 0 : for i in wnotvar : colnum = colnums [ i ] name = array . dtype . names [ i ] a = array [ name ] . copy ( ) self . _FITS . read_column ( self . _ext + 1 , colnum + 1 , a , rows ) array [ name ] = a del a array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) wvar , = numpy . where ( isvar == True ) if wvar . size > 0 : for i in wvar : colnum = colnums [ i ] name = array . dtype . names [ i ] dlist = self . _FITS . read_var_column_as_list ( self . _ext + 1 , colnum + 1 , rows ) if ( isinstance ( dlist [ 0 ] , str ) or ( IS_PY3 and isinstance ( dlist [ 0 ] , bytes ) ) ) : is_string = True else : is_string = False if array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] == 'O' : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) array [ name ] [ irow ] = item else : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) if is_string : array [ name ] [ irow ] = item else : ncopy = len ( item ) array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , ** keys ) return array
read a data from an ascii table HDU
46,150
def read ( self , ** keys ) : if self . is_scalar : data = self . fitshdu . read_column ( self . columns , ** keys ) else : c = keys . get ( 'columns' , None ) if c is None : keys [ 'columns' ] = self . columns data = self . fitshdu . read ( ** keys ) return data
Read the data from disk and return as a numpy array
46,151
def read ( filename , ext = None , extver = None , ** keys ) : with FITS ( filename , ** keys ) as fits : header = keys . pop ( 'header' , False ) if ext is None : for i in xrange ( len ( fits ) ) : if fits [ i ] . has_data ( ) : ext = i break if ext is None : raise IOError ( "No extensions have data" ) item = _make_item ( ext , extver = extver ) data = fits [ item ] . read ( ** keys ) if header : h = fits [ item ] . read_header ( ) return data , h else : return data
Convenience function to read data from the specified FITS HDU
46,152
def read_header ( filename , ext = 0 , extver = None , case_sensitive = False , ** keys ) : dont_create = 0 try : hdunum = ext + 1 except TypeError : hdunum = None _fits = _fitsio_wrap . FITS ( filename , READONLY , dont_create ) if hdunum is None : extname = mks ( ext ) if extver is None : extver_num = 0 else : extver_num = extver if not case_sensitive : hdunum = _fits . movnam_hdu ( ANY_HDU , extname , extver_num ) else : found = False current_ext = 0 while True : hdunum = current_ext + 1 try : hdu_type = _fits . movabs_hdu ( hdunum ) name , vers = _fits . get_hdu_name_version ( hdunum ) if name == extname : if extver is None : found = True break else : if extver_num == vers : found = True break except OSError : break current_ext += 1 if not found : raise IOError ( 'hdu not found: %s (extver %s)' % ( extname , extver ) ) return FITSHDR ( _fits . read_header ( hdunum ) )
Convenience function to read the header from the specified FITS HDU
46,153
def read_scamp_head ( fname , header = None ) : with open ( fname ) as fobj : lines = fobj . readlines ( ) lines = [ l . strip ( ) for l in lines if l [ 0 : 3 ] != 'END' ] hdr = FITSHDR ( header ) for l in lines : hdr . add_record ( l ) return hdr
read a SCAMP . head file as a fits header FITSHDR object
46,154
def write ( filename , data , extname = None , extver = None , units = None , compress = None , table_type = 'binary' , header = None , clobber = False , ** keys ) : with FITS ( filename , 'rw' , clobber = clobber , ** keys ) as fits : fits . write ( data , table_type = table_type , units = units , extname = extname , extver = extver , compress = compress , header = header , ** keys )
Convenience function to create a new HDU and write the data .
46,155
def array2tabledef ( data , table_type = 'binary' , write_bitcols = False ) : is_ascii = ( table_type == 'ascii' ) if data . dtype . fields is None : raise ValueError ( "data must have fields" ) names = [ ] names_nocase = { } formats = [ ] dims = [ ] descr = data . dtype . descr for d in descr : npy_dtype = d [ 1 ] [ 1 : ] if is_ascii : if npy_dtype in [ 'u1' , 'i1' ] : raise ValueError ( "1-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype ) if npy_dtype in [ 'u2' ] : raise ValueError ( "unsigned 2-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype ) if npy_dtype [ 0 ] == 'O' : name = d [ 0 ] form , dim = npy_obj2fits ( data , name ) elif npy_dtype [ 0 ] == "V" : continue else : name , form , dim = _npy2fits ( d , table_type = table_type , write_bitcols = write_bitcols ) if name == '' : raise ValueError ( "field name is an empty string" ) name_nocase = name . upper ( ) if name_nocase in names_nocase : raise ValueError ( "duplicate column name found: '%s'. Note " "FITS column names are not case sensitive" % name_nocase ) names . append ( name ) names_nocase [ name_nocase ] = name_nocase formats . append ( form ) dims . append ( dim ) return names , formats , dims
Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef
46,156
def descr2tabledef ( descr , table_type = 'binary' , write_bitcols = False ) : names = [ ] formats = [ ] dims = [ ] for d in descr : if d [ 1 ] [ 1 ] == 'O' : raise ValueError ( 'cannot automatically declare a var column without ' 'some data to determine max len' ) name , form , dim = _npy2fits ( d , table_type = table_type , write_bitcols = write_bitcols ) if name == '' : raise ValueError ( "field name is an empty string" ) names . append ( name ) formats . append ( form ) dims . append ( dim ) return names , formats , dims
Create a FITS table def from the input numpy descriptor .
46,157
def get_tile_dims ( tile_dims , imshape ) : if tile_dims is None : td = None else : td = numpy . array ( tile_dims , dtype = 'i8' ) nd = len ( imshape ) if td . size != nd : msg = "expected tile_dims to have %d dims, got %d" % ( td . size , nd ) raise ValueError ( msg ) return td
Just make sure the tile dims has the appropriate number of dimensions
46,158
def _extract_table_type ( type ) : if isinstance ( type , str ) : type = type . lower ( ) if type [ 0 : 7 ] == 'binary' : table_type = BINARY_TBL elif type [ 0 : 6 ] == 'ascii' : table_type = ASCII_TBL else : raise ValueError ( "table type string should begin with 'binary' or 'ascii' " "(case insensitive)" ) else : type = int ( type ) if type not in [ BINARY_TBL , ASCII_TBL ] : raise ValueError ( "table type num should be BINARY_TBL (%d) or " "ASCII_TBL (%d)" % ( BINARY_TBL , ASCII_TBL ) ) table_type = type return table_type
Get the numerical table type
46,159
def close ( self ) : if hasattr ( self , '_FITS' ) : if self . _FITS is not None : self . _FITS . close ( ) self . _FITS = None self . _filename = None self . mode = None self . charmode = None self . intmode = None self . hdu_list = None self . hdu_map = None
Close the fits file and set relevant metadata to None
46,160
def movnam_hdu ( self , extname , hdutype = ANY_HDU , extver = 0 ) : extname = mks ( extname ) hdu = self . _FITS . movnam_hdu ( hdutype , extname , extver ) return hdu
Move to the indicated HDU by name
46,161
def reopen ( self ) : self . _FITS . close ( ) del self . _FITS self . _FITS = _fitsio_wrap . FITS ( self . _filename , self . intmode , 0 ) self . update_hdu_list ( )
close and reopen the fits file with the same mode
46,162
def write ( self , data , units = None , extname = None , extver = None , compress = None , tile_dims = None , header = None , names = None , table_type = 'binary' , write_bitcols = False , ** keys ) : isimage = False if data is None : isimage = True elif isinstance ( data , numpy . ndarray ) : if data . dtype . fields == None : isimage = True if isimage : self . write_image ( data , extname = extname , extver = extver , compress = compress , tile_dims = tile_dims , header = header ) else : self . write_table ( data , units = units , extname = extname , extver = extver , header = header , names = names , table_type = table_type , write_bitcols = write_bitcols )
Write the data to a new HDU .
46,163
def write_image ( self , img , extname = None , extver = None , compress = None , tile_dims = None , header = None ) : self . create_image_hdu ( img , header = header , extname = extname , extver = extver , compress = compress , tile_dims = tile_dims ) if header is not None : self [ - 1 ] . write_keys ( header ) self [ - 1 ] . _update_info ( )
Create a new image extension and write the data .
46,164
def create_image_hdu ( self , img = None , dims = None , dtype = None , extname = None , extver = None , compress = None , tile_dims = None , header = None ) : if ( img is not None ) or ( img is None and dims is None ) : from_image = True elif dims is not None : from_image = False if from_image : img2send = img if img is not None : dims = img . shape dtstr = img . dtype . descr [ 0 ] [ 1 ] [ 1 : ] if img . size == 0 : raise ValueError ( "data must have at least 1 row" ) if not img . flags [ 'C_CONTIGUOUS' ] : img2send = numpy . ascontiguousarray ( img ) array_to_native ( img2send , inplace = True ) else : img2send = array_to_native ( img , inplace = False ) if IS_PY3 and img2send . dtype . char == 'U' : img2send = img2send . astype ( 'S' , copy = False ) else : self . _ensure_empty_image_ok ( ) compress = None tile_dims = None dims2send = None else : if dtype is None : raise ValueError ( "send dtype= with dims=" ) dtype = numpy . dtype ( dtype ) dtstr = dtype . descr [ 0 ] [ 1 ] [ 1 : ] img2send = numpy . zeros ( 1 , dtype = dtype ) dims2send = numpy . array ( dims , dtype = 'i8' , ndmin = 1 ) if img2send is not None : if img2send . dtype . fields is not None : raise ValueError ( "got record data type, expected regular ndarray" ) if extname is None : extname = "" else : if not isstring ( extname ) : raise ValueError ( "extension name must be a string" ) extname = mks ( extname ) if extname is not None and extver is not None : extver = check_extver ( extver ) if extver is None : extver = 0 comptype = get_compress_type ( compress ) tile_dims = get_tile_dims ( tile_dims , dims ) if img2send is not None : check_comptype_img ( comptype , dtstr ) if header is not None : nkeys = len ( header ) else : nkeys = 0 self . _FITS . create_image_hdu ( img2send , nkeys , dims = dims2send , comptype = comptype , tile_dims = tile_dims , extname = extname , extver = extver ) self . update_hdu_list ( rebuild = False )
Create a new empty image HDU and reload the hdu list . Either create from an input image or from input dims and dtype
46,165
def _ensure_empty_image_ok ( self ) : if self . ignore_empty : return if len ( self ) > 1 : raise RuntimeError ( "Cannot write None image at extension %d" % len ( self ) ) if 'ndims' in self [ 0 ] . _info : raise RuntimeError ( "Can only write None images to extension zero, " "which already exists" )
If ignore_empty was not set to True we only allow empty HDU for first HDU and if there is no data there already
46,166
def write_table ( self , data , table_type = 'binary' , names = None , formats = None , units = None , extname = None , extver = None , header = None , write_bitcols = False ) : self . create_table_hdu ( data = data , header = header , names = names , units = units , extname = extname , extver = extver , table_type = table_type , write_bitcols = write_bitcols ) if header is not None : self [ - 1 ] . write_keys ( header ) self [ - 1 ] . _update_info ( ) self [ - 1 ] . write ( data , names = names )
Create a new table extension and write the data .
46,167
def create_table_hdu ( self , data = None , dtype = None , header = None , names = None , formats = None , units = None , dims = None , extname = None , extver = None , table_type = 'binary' , write_bitcols = False ) : self . keys [ 'write_bitcols' ] = write_bitcols table_type_int = _extract_table_type ( table_type ) if data is not None : if isinstance ( data , numpy . ndarray ) : names , formats , dims = array2tabledef ( data , table_type = table_type , write_bitcols = write_bitcols ) elif isinstance ( data , ( list , dict ) ) : names , formats , dims = collection2tabledef ( data , names = names , table_type = table_type , write_bitcols = write_bitcols ) else : raise ValueError ( "data must be an ndarray with fields or a dict" ) elif dtype is not None : dtype = numpy . dtype ( dtype ) names , formats , dims = descr2tabledef ( dtype . descr , write_bitcols = write_bitcols , table_type = table_type , ) else : if names is None or formats is None : raise ValueError ( "send either dtype=, data=, or names= and formats=" ) if not isinstance ( names , list ) or not isinstance ( formats , list ) : raise ValueError ( "names and formats should be lists" ) if len ( names ) != len ( formats ) : raise ValueError ( "names and formats must be same length" ) if dims is not None : if not isinstance ( dims , list ) : raise ValueError ( "dims should be a list" ) if len ( dims ) != len ( names ) : raise ValueError ( "names and dims must be same length" ) if units is not None : if not isinstance ( units , list ) : raise ValueError ( "units should be a list" ) if len ( units ) != len ( names ) : raise ValueError ( "names and units must be same length" ) if extname is None : extname = "" else : if not isstring ( extname ) : raise ValueError ( "extension name must be a string" ) extname = mks ( extname ) if extname is not None and extver is not None : extver = check_extver ( extver ) if extver is None : extver = 0 if extname is None : extname = "" if header is not None : nkeys = len ( header ) else : nkeys = 0 self . _FITS . create_table_hdu ( table_type_int , nkeys , names , formats , tunit = units , tdim = dims , extname = extname , extver = extver ) self . update_hdu_list ( rebuild = False )
Create a new empty table extension and reload the hdu list .
46,168
def update_hdu_list ( self , rebuild = True ) : if not hasattr ( self , 'hdu_list' ) : rebuild = True if rebuild : self . hdu_list = [ ] self . hdu_map = { } ext_start = 0 else : ext_start = len ( self ) ext = ext_start while True : try : self . _append_hdu_info ( ext ) except IOError : break except RuntimeError : break ext = ext + 1
Force an update of the entire HDU list
46,169
def next ( self ) : if self . _iter_index == len ( self . hdu_list ) : raise StopIteration hdu = self . hdu_list [ self . _iter_index ] self . _iter_index += 1 return hdu
Move to the next iteration
46,170
def _extract_item ( self , item ) : ver = 0 if isinstance ( item , tuple ) : ver_sent = True nitem = len ( item ) if nitem == 1 : ext = item [ 0 ] elif nitem == 2 : ext , ver = item else : ver_sent = False ext = item return ext , ver , ver_sent
utility function to extract an item meaning a extension number name plus version .
46,171
def _update_info ( self ) : super ( ImageHDU , self ) . _update_info ( ) if self . _info [ 'hdutype' ] != IMAGE_HDU : mess = "Extension %s is not a Image HDU" % self . ext raise ValueError ( mess ) if 'dims' in self . _info : self . _info [ 'dims' ] = list ( reversed ( self . _info [ 'dims' ] ) )
Call parent method and make sure this is in fact a image HDU . Set dims in C order
46,172
def reshape ( self , dims ) : adims = numpy . array ( dims , ndmin = 1 , dtype = 'i8' ) self . _FITS . reshape_image ( self . _ext + 1 , adims )
reshape an existing image to the requested dimensions
46,173
def write ( self , img , start = 0 , ** keys ) : dims = self . get_dims ( ) if img . dtype . fields is not None : raise ValueError ( "got recarray, expected regular ndarray" ) if img . size == 0 : raise ValueError ( "data must have at least 1 row" ) if not img . flags [ 'C_CONTIGUOUS' ] : img_send = numpy . ascontiguousarray ( img ) array_to_native ( img_send , inplace = True ) else : img_send = array_to_native ( img , inplace = False ) if IS_PY3 and img_send . dtype . char == 'U' : img_send = img_send . astype ( 'S' , copy = False ) if not numpy . isscalar ( start ) : offset = _convert_full_start_to_offset ( dims , start ) else : offset = start if self . has_data ( ) : self . _expand_if_needed ( dims , img . shape , start , offset ) self . _FITS . write_image ( self . _ext + 1 , img_send , offset + 1 ) self . _update_info ( )
Write the image into this HDU
46,174
def read ( self , ** keys ) : if not self . has_data ( ) : return None dtype , shape = self . _get_dtype_and_shape ( ) array = numpy . zeros ( shape , dtype = dtype ) self . _FITS . read_image ( self . _ext + 1 , array ) return array
Read the image .
46,175
def _get_dtype_and_shape ( self ) : npy_dtype = self . _get_image_numpy_dtype ( ) if self . _info [ 'ndims' ] != 0 : shape = self . _info [ 'dims' ] else : raise IOError ( "no image present in HDU" ) return npy_dtype , shape
Get the numpy dtype and shape for image
46,176
def _get_image_numpy_dtype ( self ) : try : ftype = self . _info [ 'img_equiv_type' ] npy_type = _image_bitpix2npy [ ftype ] except KeyError : raise KeyError ( "unsupported fits data type: %d" % ftype ) return npy_type
Get the numpy dtype for the image
46,177
def _read_image_slice ( self , arg ) : if 'ndims' not in self . _info : raise ValueError ( "Attempt to slice empty extension" ) if isinstance ( arg , slice ) : return self . _read_image_slice ( ( arg , ) ) if not isinstance ( arg , tuple ) : raise ValueError ( "arguments must be slices, one for each " "dimension, e.g. [2:5] or [2:5,8:25] etc." ) nd = len ( arg ) if nd != self . _info [ 'ndims' ] : raise ValueError ( "Got slice dimensions %d, " "expected %d" % ( nd , self . _info [ 'ndims' ] ) ) targ = arg arg = [ ] for a in targ : if isinstance ( a , slice ) : arg . append ( a ) elif isinstance ( a , int ) : arg . append ( slice ( a , a + 1 , 1 ) ) else : raise ValueError ( "arguments must be slices, e.g. 2:12" ) dims = self . _info [ 'dims' ] arrdims = [ ] first = [ ] last = [ ] steps = [ ] dim = 0 for slc in arg : start = slc . start stop = slc . stop step = slc . step if start is None : start = 0 if stop is None : stop = dims [ dim ] if step is None : step = 1 if step < 1 : raise ValueError ( "slice steps must be >= 1" ) if start < 0 : start = dims [ dim ] + start if start < 0 : raise IndexError ( "Index out of bounds" ) if stop < 0 : stop = dims [ dim ] + start + 1 start = start + 1 if stop < start : raise ValueError ( "python slices but include at least one " "element, got %s" % slc ) if stop > dims [ dim ] : stop = dims [ dim ] first . append ( start ) last . append ( stop ) steps . append ( step ) arrdims . append ( stop - start + 1 ) dim += 1 first . reverse ( ) last . reverse ( ) steps . reverse ( ) first = numpy . array ( first , dtype = 'i8' ) last = numpy . array ( last , dtype = 'i8' ) steps = numpy . array ( steps , dtype = 'i8' ) npy_dtype = self . _get_image_numpy_dtype ( ) array = numpy . zeros ( arrdims , dtype = npy_dtype ) self . _FITS . read_image_slice ( self . _ext + 1 , first , last , steps , array ) return array
workhorse to read a slice
46,178
def _expand_if_needed ( self , dims , write_dims , start , offset ) : from operator import mul if numpy . isscalar ( start ) : start_is_scalar = True else : start_is_scalar = False existing_size = reduce ( mul , dims , 1 ) required_size = offset + reduce ( mul , write_dims , 1 ) if required_size > existing_size : print ( " required size:" , required_size , "existing size:" , existing_size ) ndim = len ( dims ) idim = len ( write_dims ) if start_is_scalar : if start == 0 : start = [ 0 ] * ndim else : raise ValueError ( "When expanding " "an existing image while writing, the start keyword " "must have the same number of dimensions " "as the image or be exactly 0, got %s " % start ) if idim != ndim : raise ValueError ( "When expanding " "an existing image while writing, the input image " "must have the same number of dimensions " "as the original. " "Got %d instead of %d" % ( idim , ndim ) ) new_dims = [ ] for i in xrange ( ndim ) : required_dim = start [ i ] + write_dims [ i ] if required_dim < dims [ i ] : dimsize = dims [ i ] else : dimsize = required_dim new_dims . append ( dimsize ) print ( " reshaping image to:" , new_dims ) self . reshape ( new_dims )
expand the on - disk image if the indended write will extend beyond the existing dimensions
46,179
def get_extname ( self ) : name = self . _info [ 'extname' ] if name . strip ( ) == '' : name = self . _info [ 'hduname' ] return name . strip ( )
Get the name for this extension can be an empty string
46,180
def get_extver ( self ) : ver = self . _info [ 'extver' ] if ver == 0 : ver = self . _info [ 'hduver' ] return ver
Get the version for this extension .
46,181
def get_exttype ( self , num = False ) : if num : return self . _info [ 'hdutype' ] else : name = _hdu_type_map [ self . _info [ 'hdutype' ] ] return name
Get the extension type
46,182
def verify_checksum ( self ) : res = self . _FITS . verify_checksum ( self . _ext + 1 ) if res [ 'dataok' ] != 1 : raise ValueError ( "data checksum failed" ) if res [ 'hduok' ] != 1 : raise ValueError ( "hdu checksum failed" )
Verify the checksum in the header for this HDU .
46,183
def write_comment ( self , comment ) : self . _FITS . write_comment ( self . _ext + 1 , str ( comment ) )
Write a comment into the header
46,184
def write_key ( self , name , value , comment = "" ) : if value is None : self . _FITS . write_undefined_key ( self . _ext + 1 , str ( name ) , str ( comment ) ) elif isinstance ( value , bool ) : if value : v = 1 else : v = 0 self . _FITS . write_logical_key ( self . _ext + 1 , str ( name ) , v , str ( comment ) ) elif isinstance ( value , _stypes ) : self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , str ( value ) , str ( comment ) ) elif isinstance ( value , _ftypes ) : self . _FITS . write_double_key ( self . _ext + 1 , str ( name ) , float ( value ) , str ( comment ) ) elif isinstance ( value , _itypes ) : self . _FITS . write_long_key ( self . _ext + 1 , str ( name ) , int ( value ) , str ( comment ) ) elif isinstance ( value , ( tuple , list ) ) : vl = [ str ( el ) for el in value ] sval = ',' . join ( vl ) self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , sval , str ( comment ) ) else : sval = str ( value ) mess = ( "warning, keyword '%s' has non-standard " "value type %s, " "Converting to string: '%s'" ) warnings . warn ( mess % ( name , type ( value ) , sval ) , FITSRuntimeWarning ) self . _FITS . write_string_key ( self . _ext + 1 , str ( name ) , sval , str ( comment ) )
Write the input value to the header
46,185
def write_keys ( self , records_in , clean = True ) : if isinstance ( records_in , FITSHDR ) : hdr = records_in else : hdr = FITSHDR ( records_in ) if clean : is_table = hasattr ( self , '_table_type_str' ) hdr . clean ( is_table = is_table ) for r in hdr . records ( ) : name = r [ 'name' ] . upper ( ) value = r [ 'value' ] if name == 'COMMENT' : self . write_comment ( value ) elif name == 'HISTORY' : self . write_history ( value ) elif name == 'CONTINUE' : self . _write_continue ( value ) else : comment = r . get ( 'comment' , '' ) self . write_key ( name , value , comment = comment )
Write the keywords to the header .
46,186
def _update_info ( self ) : try : self . _FITS . movabs_hdu ( self . _ext + 1 ) except IOError : raise RuntimeError ( "no such hdu" ) self . _info = self . _FITS . get_hdu_info ( self . _ext + 1 )
Update metadata for this HDU
46,187
def _get_repr_list ( self ) : spacing = ' ' * 2 text = [ '' ] text . append ( "%sfile: %s" % ( spacing , self . _filename ) ) text . append ( "%sextension: %d" % ( spacing , self . _info [ 'hdunum' ] - 1 ) ) text . append ( "%stype: %s" % ( spacing , _hdu_type_map [ self . _info [ 'hdutype' ] ] ) ) extname = self . get_extname ( ) if extname != "" : text . append ( "%sextname: %s" % ( spacing , extname ) ) extver = self . get_extver ( ) if extver != 0 : text . append ( "%sextver: %s" % ( spacing , extver ) ) return text , spacing
Get some representation data common to all HDU types
46,188
def add_record ( self , record_in ) : if ( isinstance ( record_in , dict ) and 'name' in record_in and 'value' in record_in ) : record = { } record . update ( record_in ) else : record = FITSRecord ( record_in ) key = record [ 'name' ] . upper ( ) key_exists = key in self . _record_map if not key_exists or key in ( 'COMMENT' , 'HISTORY' , 'CONTINUE' ) : self . _record_list . append ( record ) index = len ( self . _record_list ) - 1 self . _index_map [ key ] = index else : index = self . _index_map [ key ] self . _record_list [ index ] = record self . _record_map [ key ] = record
Add a new record . Strip quotes from around strings .
46,189
def get_comment ( self , item ) : key = item . upper ( ) if key not in self . _record_map : raise KeyError ( "unknown record: %s" % key ) if 'comment' not in self . _record_map [ key ] : return None else : return self . _record_map [ key ] [ 'comment' ]
Get the comment for the requested entry
46,190
def delete ( self , name ) : if isinstance ( name , ( list , tuple ) ) : for xx in name : self . delete ( xx ) else : if name in self . _record_map : del self . _record_map [ name ] self . _record_list = [ r for r in self . _record_list if r [ 'name' ] != name ]
Delete the specified entry if it exists .
46,191
def clean ( self , is_table = False ) : rmnames = [ 'SIMPLE' , 'EXTEND' , 'XTENSION' , 'BITPIX' , 'PCOUNT' , 'GCOUNT' , 'THEAP' , 'EXTNAME' , 'BLANK' , 'ZQUANTIZ' , 'ZDITHER0' , 'ZIMAGE' , 'ZCMPTYPE' , 'ZSIMPLE' , 'ZTENSION' , 'ZPCOUNT' , 'ZGCOUNT' , 'ZBITPIX' , 'ZEXTEND' , 'CHECKSUM' , 'DATASUM' ] if is_table : rmnames += [ 'BUNIT' , 'BSCALE' , 'BZERO' , ] self . delete ( rmnames ) r = self . _record_map . get ( 'NAXIS' , None ) if r is not None : naxis = int ( r [ 'value' ] ) self . delete ( 'NAXIS' ) rmnames = [ 'NAXIS%d' % i for i in xrange ( 1 , naxis + 1 ) ] self . delete ( rmnames ) r = self . _record_map . get ( 'ZNAXIS' , None ) self . delete ( 'ZNAXIS' ) if r is not None : znaxis = int ( r [ 'value' ] ) rmnames = [ 'ZNAXIS%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZTILE%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZNAME%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) rmnames = [ 'ZVAL%d' % i for i in xrange ( 1 , znaxis + 1 ) ] self . delete ( rmnames ) r = self . _record_map . get ( 'TFIELDS' , None ) if r is not None : tfields = int ( r [ 'value' ] ) self . delete ( 'TFIELDS' ) if tfields > 0 : nbase = [ 'TFORM' , 'TTYPE' , 'TDIM' , 'TUNIT' , 'TSCAL' , 'TZERO' , 'TNULL' , 'TDISP' , 'TDMIN' , 'TDMAX' , 'TDESC' , 'TROTA' , 'TRPIX' , 'TRVAL' , 'TDELT' , 'TCUNI' , ] for i in xrange ( 1 , tfields + 1 ) : names = [ '%s%d' % ( n , i ) for n in nbase ] self . delete ( names )
Remove reserved keywords from the header .
46,192
def get ( self , item , default_value = None ) : found , name = self . _contains_and_name ( item ) if found : return self . _record_map [ name ] [ 'value' ] else : return default_value
Get the requested header entry by keyword name
46,193
def next ( self ) : if self . _current < len ( self . _record_list ) : rec = self . _record_list [ self . _current ] key = rec [ 'name' ] self . _current += 1 return key else : raise StopIteration
for iteration over the header entries
46,194
def set_record ( self , record , ** kw ) : if isstring ( record ) : card = FITSCard ( record ) self . update ( card ) self . verify ( ) else : if isinstance ( record , FITSRecord ) : self . update ( record ) elif isinstance ( record , dict ) : if 'name' in record and 'value' in record : self . update ( record ) elif 'card_string' in record : self . set_record ( record [ 'card_string' ] ) else : raise ValueError ( 'record must have name,value fields ' 'or a card_string field' ) else : raise ValueError ( "record must be a string card or " "dictionary or FITSRecord" )
check the record is valid and set keys in the dict
46,195
def _check_equals ( self ) : card_string = self [ 'card_string' ] if len ( card_string ) < 9 : self . _has_equals = False elif card_string [ 8 ] == '=' : self . _has_equals = True else : self . _has_equals = False
check for = in position 8 set attribute _has_equals
46,196
def _convert_value ( self , value_orig ) : import ast if value_orig is None : return value_orig if value_orig . startswith ( "'" ) and value_orig . endswith ( "'" ) : value = value_orig [ 1 : - 1 ] else : try : avalue = ast . parse ( value_orig ) . body [ 0 ] . value if isinstance ( avalue , ast . BinOp ) : value = value_orig else : value = ast . literal_eval ( value_orig ) except Exception : value = self . _convert_string ( value_orig ) if isinstance ( value , int ) and '_' in value_orig : value = value_orig return value
things like 6 and 1 . 25 are converted with ast . literal_value
46,197
def _make_reads_for_assembly ( number_of_wanted_reads , total_reads , reads_in1 , reads_in2 , reads_out1 , reads_out2 , random_seed = None ) : random . seed ( random_seed ) if number_of_wanted_reads < total_reads : reads_written = 0 percent_wanted = 100 * number_of_wanted_reads / total_reads file_reader1 = pyfastaq . sequences . file_reader ( reads_in1 ) file_reader2 = pyfastaq . sequences . file_reader ( reads_in2 ) out1 = pyfastaq . utils . open_file_write ( reads_out1 ) out2 = pyfastaq . utils . open_file_write ( reads_out2 ) for read1 in file_reader1 : try : read2 = next ( file_reader2 ) except StopIteration : pyfastaq . utils . close ( out1 ) pyfastaq . utils . close ( out2 ) raise Error ( 'Error subsetting reads. No mate found for read ' + read1 . id ) if random . randint ( 0 , 100 ) <= percent_wanted : print ( read1 , file = out1 ) print ( read2 , file = out2 ) reads_written += 2 pyfastaq . utils . close ( out1 ) pyfastaq . utils . close ( out2 ) return reads_written else : os . symlink ( reads_in1 , reads_out1 ) os . symlink ( reads_in2 , reads_out2 ) return total_reads
Makes fastq files that are random subset of input files . Returns total number of reads in output files . If the number of wanted reads is > = total reads then just makes symlinks instead of making new copies of the input files .
46,198
def load_mutations ( gene_coords , mutation_to_drug_json , variants_txt , upstream_before = 100 ) : with open ( mutation_to_drug_json ) as f : drug_data = json . load ( f ) mutations = [ ] genes_with_indels = set ( ) genes_need_upstream = set ( ) genes_non_upstream = set ( ) with open ( variants_txt ) as f : for line in f : gene , variant , d_or_p = line . rstrip ( ) . split ( '\t' ) coding = 0 if gene == 'rrs' else 1 d = { 'gene' : gene , 'var' : variant , 'coding' : coding , 'upstream' : False } drug_data_key = d [ 'gene' ] + '_' + d [ 'var' ] if drug_data_key not in drug_data : print ( 'KEY' , drug_data_key , 'NOT FOUND' , file = sys . stderr ) else : d [ 'drugs' ] = ',' . join ( sorted ( drug_data [ drug_data_key ] ) ) if d_or_p == 'DNA' and gene != 'rrs' : assert gene != 'rrs' re_match = re . match ( '([ACGT]+)(-?[0-9]+)([ACGTX]+)' , d [ 'var' ] ) try : ref , pos , alt = re_match . groups ( ) except : print ( 'regex error:' , d [ 'var' ] , file = sys . stderr ) continue pos = int ( pos ) if len ( ref ) != len ( alt ) : genes_with_indels . add ( d [ 'gene' ] ) continue elif pos > 0 : continue elif pos < 0 : this_gene_coords = gene_coords [ d [ 'gene' ] ] d [ 'upstream' ] = True if this_gene_coords [ 'start' ] < this_gene_coords [ 'end' ] : variant_pos_in_output_seq = upstream_before + pos + 1 else : variant_pos_in_output_seq = upstream_before + pos + 1 assert variant_pos_in_output_seq > 0 d [ 'var' ] = ref + str ( variant_pos_in_output_seq ) + alt d [ 'original_mutation' ] = variant genes_need_upstream . add ( d [ 'gene' ] ) elif pos == 0 : print ( 'Zero coord!' , d , file = sys . stderr ) continue else : print ( 'deal with?' , d , file = sys . stderr ) continue mutations . append ( d ) if not d [ 'upstream' ] : genes_non_upstream . add ( d [ 'gene' ] ) return mutations , genes_with_indels , genes_need_upstream , genes_non_upstream
Load mutations from mykrobe - style files . mutation_to_drug_json is json file of mutation - > list of drugs . variants_txt is text file of variants used my mykrobe s make probes . gene_coords should be dict of gene coords made by the function genbank_to_gene_coords
46,199
def write_prepareref_fasta_file ( outfile , gene_coords , genes_need_upstream , genes_non_upstream , upstream_before = 100 , upstream_after = 100 ) : tmp_dict = { } fasta_in = os . path . join ( data_dir , 'NC_000962.3.fa.gz' ) pyfastaq . tasks . file_to_dict ( fasta_in , tmp_dict ) ref_seq = tmp_dict [ 'NC_000962.3' ] with open ( outfile , 'w' ) as f : for gene in genes_non_upstream : start = gene_coords [ gene ] [ 'start' ] end = gene_coords [ gene ] [ 'end' ] if start < end : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start : end + 1 ] ) else : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ end : start + 1 ] ) gene_fa . revcomp ( ) print ( gene_fa , file = f ) for gene in genes_need_upstream : start = gene_coords [ gene ] [ 'start' ] end = gene_coords [ gene ] [ 'end' ] if start < end : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start - upstream_before : start + upstream_after ] ) else : gene_fa = pyfastaq . sequences . Fasta ( gene , ref_seq [ start - upstream_after + 1 : start + upstream_before + 1 ] ) gene_fa . revcomp ( ) gene_fa . id += '_upstream' print ( gene_fa , file = f )
Writes fasta file to be used with - f option of prepareref