idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
234,000
def resample_data ( self , data , freq , resampler = 'mean' ) : if resampler == 'mean' : data = data . resample ( freq ) . mean ( ) elif resampler == 'max' : data = data . resample ( freq ) . max ( ) else : raise ValueError ( 'Resampler can be \'mean\' or \'max\' only.' ) return data
Resample dataframe .
93
5
234,001
def interpolate_data ( self , data , limit , method ) : data = data . interpolate ( how = "index" , limit = limit , method = method ) return data
Interpolate dataframe .
38
6
234,002
def remove_na ( self , data , remove_na_how ) : data = data . dropna ( how = remove_na_how ) return data
Remove NAs from dataframe .
33
7
234,003
def remove_outlier ( self , data , sd_val ) : data = data . dropna ( ) data = data [ ( np . abs ( stats . zscore ( data ) ) < float ( sd_val ) ) . all ( axis = 1 ) ] return data
Remove outliers from dataframe .
58
7
234,004
def remove_out_of_bounds ( self , data , low_bound , high_bound ) : data = data . dropna ( ) data = data [ ( data > low_bound ) . all ( axis = 1 ) & ( data < high_bound ) . all ( axis = 1 ) ] return data
Remove out of bound datapoints from dataframe .
67
12
234,005
def _set_TS_index ( self , data ) : # Set index data . index = pd . to_datetime ( data . index , error = "ignore" ) # Format types to numeric for col in data . columns : data [ col ] = pd . to_numeric ( data [ col ] , errors = "coerce" ) return data
Convert index to datetime and all other columns to numeric
78
12
234,006
def _utc_to_local ( self , data , local_zone = "America/Los_Angeles" ) : # Accounts for localtime shift data . index = data . index . tz_localize ( pytz . utc ) . tz_convert ( local_zone ) # Gets rid of extra offset information so can compare with csv data data . index = data . index . tz_localize ( None ) return data
Adjust index of dataframe according to timezone that is requested by user .
96
15
234,007
def _local_to_utc ( self , timestamp , local_zone = "America/Los_Angeles" ) : timestamp_new = pd . to_datetime ( timestamp , infer_datetime_format = True , errors = 'coerce' ) timestamp_new = timestamp_new . tz_localize ( local_zone ) . tz_convert ( pytz . utc ) timestamp_new = timestamp_new . strftime ( '%Y-%m-%d %H:%M:%S' ) return timestamp_new
Convert local timestamp to UTC .
123
7
234,008
def find_uuid ( self , obj , column_name ) : keys = obj . context . keys ( ) for i in keys : if column_name in obj . context [ i ] [ '?point' ] : uuid = i return i
Find uuid .
53
4
234,009
def identify_missing ( self , df , check_start = True ) : # Check start changes the first value of df to 1, when the data stream is initially missing # This allows the diff function to acknowledge the missing data data_missing = df . isnull ( ) * 1 col_name = str ( data_missing . columns [ 0 ] ) # When there is no data stream at the beginning we change it to 1 if check_start & data_missing [ col_name ] [ 0 ] == 1 : data_missing [ col_name ] [ 0 ] = 0 return data_missing , col_name
Identify missing data .
128
5
234,010
def diff_boolean ( self , df , column_name = None , uuid = None , duration = True , min_event_filter = '3 hours' ) : if uuid == None : uuid = 'End' data_gaps = df [ ( df . diff ( ) == 1 ) | ( df . diff ( ) == - 1 ) ] . dropna ( ) data_gaps [ "duration" ] = abs ( data_gaps . index . to_series ( ) . diff ( periods = - 1 ) ) data_gaps [ uuid ] = data_gaps . index + ( data_gaps [ "duration" ] ) data_gaps = data_gaps [ data_gaps [ "duration" ] > pd . Timedelta ( min_event_filter ) ] data_gaps = data_gaps [ data_gaps [ column_name ] == 1 ] data_gaps . pop ( column_name ) if not duration : data_gaps . pop ( 'duration' ) data_gaps . index = data_gaps . index . strftime ( date_format = "%Y-%m-%d %H:%M:%S" ) data_gaps [ uuid ] = data_gaps [ uuid ] . dt . strftime ( date_format = "%Y-%m-%d %H:%M:%S" ) return data_gaps
takes the dataframe of missing values and returns a dataframe that indicates the length of each event where data was continuously missing
314
25
234,011
def analyze_quality_table ( self , obj , low_bound = None , high_bound = None ) : data = obj . df N_rows = 3 N_cols = data . shape [ 1 ] d = pd . DataFrame ( np . zeros ( ( N_rows , N_cols ) ) , index = [ '% Missing' , 'AVG Length Missing' , 'Std dev. Missing' ] , columns = [ data . columns ] ) if low_bound : data = data . where ( data >= low_bound ) if high_bound : data = data . where ( data < high_bound ) for i in range ( N_cols ) : data_per_meter = data . iloc [ : , [ i ] ] data_missing , meter = self . identify_missing ( data_per_meter ) percentage = data_missing . sum ( ) / ( data . shape [ 0 ] ) * 100 data_gaps = self . diff_boolean ( data_missing , column_name = meter ) missing_mean = data_gaps . mean ( ) std_dev = data_gaps . std ( ) d . loc [ "% Missing" , meter ] = percentage [ meter ] d . loc [ "AVG Length Missing" , meter ] = missing_mean [ 'duration' ] d . loc [ "Std dev. Missing" , meter ] = std_dev [ 'duration' ] return d
Takes in an the object returned by the MDAL query and analyzes the quality of the data for each column in the df . Returns a df of data quality metrics
310
34
234,012
def analyze_quality_graph ( self , obj ) : data = obj . df for i in range ( data . shape [ 1 ] ) : data_per_meter = data . iloc [ : , [ i ] ] # need to make this work or change the structure data_missing , meter = self . identify_missing ( data_per_meter ) percentage = data_missing . sum ( ) / ( data . shape [ 0 ] ) * 100 print ( 'Percentage Missing of ' + meter + ' data: ' + str ( int ( percentage ) ) + '%' ) data_missing . plot ( figsize = ( 18 , 5 ) , x_compat = True , title = meter + " Missing Data over the Time interval" ) data_gaps = self . diff_boolean ( data_missing , column_name = meter ) data_missing [ 'Hour' ] = data_missing . index . hour ymax = int ( data_missing . groupby ( 'Hour' ) . sum ( ) . max ( ) + 10 ) data_missing . groupby ( 'Hour' ) . sum ( ) . plot ( ylim = ( 0 , ymax ) , figsize = ( 18 , 5 ) , title = meter + " Time of Day of Missing Data" ) print ( data_gaps )
Takes in an the object returned by the MDAL query and analyzes the quality of the data for each column in the df in the form of graphs . The Graphs returned show missing data events over time and missing data frequency during each hour of the day
280
52
234,013
def clean_data ( self , resample = True , freq = 'h' , resampler = 'mean' , interpolate = True , limit = 1 , method = 'linear' , remove_na = True , remove_na_how = 'any' , remove_outliers = True , sd_val = 3 , remove_out_of_bounds = True , low_bound = 0 , high_bound = 9998 ) : # Store copy of the original data data = self . original_data if resample : try : data = self . resample_data ( data , freq , resampler ) except Exception as e : raise e if interpolate : try : data = self . interpolate_data ( data , limit = limit , method = method ) except Exception as e : raise e if remove_na : try : data = self . remove_na ( data , remove_na_how ) except Exception as e : raise e if remove_outliers : try : data = self . remove_outliers ( data , sd_val ) except Exception as e : raise e if remove_out_of_bounds : try : data = self . remove_out_of_bounds ( data , low_bound , high_bound ) except Exception as e : raise e self . cleaned_data = data
Clean dataframe .
285
4
234,014
def write_json ( self ) : with open ( self . results_folder_name + '/results-' + str ( self . get_global_count ( ) ) + '.json' , 'a' ) as f : json . dump ( self . result , f )
Dump data into json file .
57
7
234,015
def site_analysis ( self , folder_name , site_install_mapping , end_date ) : def count_number_of_days ( site , end_date ) : """ Counts the number of days between two dates. Parameters ---------- site : str Key to a dic containing site_name -> pelican installation date. end_date : str End date. Returns ------- int Number of days """ start_date = site_install_mapping [ site ] start_date = start_date . split ( '-' ) start = date ( int ( start_date [ 0 ] ) , int ( start_date [ 1 ] ) , int ( start_date [ 2 ] ) ) end_date = end_date . split ( '-' ) end = date ( int ( end_date [ 0 ] ) , int ( end_date [ 1 ] ) , int ( end_date [ 2 ] ) ) delta = end - start return delta . days if not folder_name or not isinstance ( folder_name , str ) : raise TypeError ( "folder_name should be type string" ) else : list_json_files = [ ] df = pd . DataFrame ( ) temp_df = pd . DataFrame ( ) json_files = [ f for f in os . listdir ( folder_name ) if f . endswith ( '.json' ) ] for json_file in json_files : with open ( folder_name + json_file ) as f : js = json . load ( f ) num_days = count_number_of_days ( js [ 'Site' ] , end_date ) e_abs_sav = round ( js [ 'Energy Savings (absolute)' ] / 1000 , 2 ) # Energy Absolute Savings e_perc_sav = round ( js [ 'Energy Savings (%)' ] , 2 ) # Energy Percent Savings ann_e_abs_sav = ( e_abs_sav / num_days ) * 365 # Annualized Energy Absolute Savings d_abs_sav = round ( js [ 'User Comments' ] [ 'Dollar Savings (absolute)' ] , 2 ) # Dollar Absolute Savings d_perc_sav = round ( js [ 'User Comments' ] [ 'Dollar Savings (%)' ] , 2 ) # Dollar Percent Savings ann_d_abs_sav = ( d_abs_sav / num_days ) * 365 # Annualized Dollar Absolute Savings temp_df = pd . DataFrame ( { 'Site' : js [ 'Site' ] , '#Days since Pelican Installation' : num_days , 'Energy Savings (%)' : e_perc_sav , 'Energy Savings (kWh)' : e_abs_sav , 'Annualized Energy Savings (kWh)' : ann_e_abs_sav , 'Dollar Savings (%)' : d_perc_sav , 'Dollar Savings ($)' : d_abs_sav , 'Annualized Dollar Savings ($)' : ann_d_abs_sav , 'Best Model' : js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'name' ] , 'Adj R2' : round ( js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_cross_val_score' ] , 2 ) , 'RMSE' : round ( js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'rmse' ] , 2 ) , 'MAPE' : js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'mape' ] , 'Uncertainity' : js [ 'Uncertainity' ] , } , index = [ 0 ] ) df = df . append ( temp_df ) df . set_index ( 'Site' , inplace = True ) return df
Summarize site data into a single table .
820
10
234,016
def search ( self , file_name , imported_data = None ) : resample_freq = [ '15T' , 'h' , 'd' ] time_freq = { 'year' : [ True , False , False , False , False ] , 'month' : [ False , True , False , False , False ] , 'week' : [ False , False , True , False , False ] , 'tod' : [ False , False , False , True , False ] , 'dow' : [ False , False , False , False , True ] , } optimal_score = float ( '-inf' ) optimal_model = None # CSV Files if not imported_data : with open ( file_name ) as f : input_json = json . load ( f ) import_json = input_json [ 'Import' ] imported_data = self . import_data ( file_name = import_json [ 'File Name' ] , folder_name = import_json [ 'Folder Name' ] , head_row = import_json [ 'Head Row' ] , index_col = import_json [ 'Index Col' ] , convert_col = import_json [ 'Convert Col' ] , concat_files = import_json [ 'Concat Files' ] , save_file = import_json [ 'Save File' ] ) with open ( file_name ) as f : input_json = json . load ( f ) for x in resample_freq : # Resample data interval input_json [ 'Clean' ] [ 'Frequency' ] = x for i in range ( len ( time_freq . items ( ) ) ) : # Add time features input_json [ 'Preprocess' ] [ 'Year' ] = time_freq [ 'year' ] [ i ] input_json [ 'Preprocess' ] [ 'Month' ] = time_freq [ 'month' ] [ i ] input_json [ 'Preprocess' ] [ 'Week' ] = time_freq [ 'week' ] [ i ] input_json [ 'Preprocess' ] [ 'Time of Day' ] = time_freq [ 'tod' ] [ i ] input_json [ 'Preprocess' ] [ 'Day of Week' ] = time_freq [ 'dow' ] [ i ] # Putting comment in json file to indicate which parameters have been changed time_feature = None for key in time_freq : if time_freq [ key ] [ i ] : time_feature = key self . result [ 'Comment' ] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature # Read parameters in input_json self . read_json ( file_name = None , input_json = input_json , imported_data = imported_data ) # Keep track of highest adj_r2 score if self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_r2' ] > optimal_score : optimal_score = self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_r2' ] optimal_model_file_name = self . results_folder_name + '/results-' + str ( self . get_global_count ( ) ) + '.json' # Wrapper.global_count += 1 print ( 'Most optimal model: ' , optimal_model_file_name ) freq = self . result [ 'Comment' ] . split ( ' ' ) [ 1 ] [ : - 1 ] time_feat = self . result [ 'Comment' ] . split ( ' ' ) [ - 1 ] print ( 'Freq: ' , freq , 'Time Feature: ' , time_feat )
Run models on different data configurations .
817
7
234,017
def clean_data ( self , data , rename_col = None , drop_col = None , resample = True , freq = 'h' , resampler = 'mean' , interpolate = True , limit = 1 , method = 'linear' , remove_na = True , remove_na_how = 'any' , remove_outliers = True , sd_val = 3 , remove_out_of_bounds = True , low_bound = 0 , high_bound = float ( 'inf' ) , save_file = True ) : # Check to ensure data is a pandas dataframe if not isinstance ( data , pd . DataFrame ) : raise TypeError ( 'data has to be a pandas dataframe.' ) # Create instance and clean the data clean_data_obj = Clean_Data ( data ) clean_data_obj . clean_data ( resample = resample , freq = freq , resampler = resampler , interpolate = interpolate , limit = limit , method = method , remove_na = remove_na , remove_na_how = remove_na_how , remove_outliers = remove_outliers , sd_val = sd_val , remove_out_of_bounds = remove_out_of_bounds , low_bound = low_bound , high_bound = high_bound ) # Correlation plot # fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data) # fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png') if rename_col : # Rename columns of dataframe clean_data_obj . rename_columns ( rename_col ) if drop_col : # Drop columns of dataframe clean_data_obj . drop_columns ( drop_col ) # Store cleaned data in wrapper class self . cleaned_data = clean_data_obj . cleaned_data # Logging self . result [ 'Clean' ] = { 'Rename Col' : rename_col , 'Drop Col' : drop_col , 'Resample' : resample , 'Frequency' : freq , 'Resampler' : resampler , 'Interpolate' : interpolate , 'Limit' : limit , 'Method' : method , 'Remove NA' : remove_na , 'Remove NA How' : remove_na_how , 'Remove Outliers' : remove_outliers , 'SD Val' : sd_val , 'Remove Out of Bounds' : remove_out_of_bounds , 'Low Bound' : low_bound , 'High Bound' : str ( high_bound ) if high_bound == float ( 'inf' ) else high_bound , 'Save File' : save_file } if save_file : f = self . results_folder_name + '/cleaned_data-' + str ( self . get_global_count ( ) ) + '.csv' self . cleaned_data . to_csv ( f ) self . result [ 'Clean' ] [ 'Saved File' ] = f else : self . result [ 'Clean' ] [ 'Saved File' ] = '' return self . cleaned_data
Cleans dataframe according to user specifications and stores result in self . cleaned_data .
715
18
234,018
def prevmonday ( num ) : today = get_today ( ) lastmonday = today - timedelta ( days = today . weekday ( ) , weeks = num ) return lastmonday
Return unix SECOND timestamp of num mondays ago
40
12
234,019
def med_filt ( x , k = 201 ) : if x . ndim > 1 : x = np . squeeze ( x ) med = np . median ( x ) assert k % 2 == 1 , "Median filter length must be odd." assert x . ndim == 1 , "Input must be one-dimensional." k2 = ( k - 1 ) // 2 y = np . zeros ( ( len ( x ) , k ) , dtype = x . dtype ) y [ : , k2 ] = x for i in range ( k2 ) : j = k2 - i y [ j : , i ] = x [ : - j ] y [ : j , i ] = x [ 0 ] y [ : - j , - ( i + 1 ) ] = x [ j : ] y [ - j : , - ( i + 1 ) ] = med return np . median ( y , axis = 1 )
Apply a length - k median filter to a 1D array x . Boundaries are extended by repeating endpoints .
198
23
234,020
def preprocess_data ( self , data , hdh_cpoint = 65 , cdh_cpoint = 65 , col_hdh_cdh = None , col_degree = None , degree = None , standardize = False , normalize = False , year = False , month = False , week = False , tod = False , dow = False , save_file = True ) : # Check to ensure data is a pandas dataframe if not isinstance ( data , pd . DataFrame ) : raise SystemError ( 'data has to be a pandas dataframe.' ) # Create instance preprocess_data_obj = Preprocess_Data ( data ) if col_hdh_cdh : preprocess_data_obj . add_degree_days ( col = col_hdh_cdh , hdh_cpoint = hdh_cpoint , cdh_cpoint = cdh_cpoint ) preprocess_data_obj . add_col_features ( col = col_degree , degree = degree ) if standardize : preprocess_data_obj . standardize ( ) if normalize : preprocess_data_obj . normalize ( ) preprocess_data_obj . add_time_features ( year = year , month = month , week = week , tod = tod , dow = dow ) # Store preprocessed data in wrapper class self . preprocessed_data = preprocess_data_obj . preprocessed_data # Logging self . result [ 'Preprocess' ] = { 'HDH CPoint' : hdh_cpoint , 'CDH CPoint' : cdh_cpoint , 'HDH CDH Calc Col' : col_hdh_cdh , 'Col Degree' : col_degree , 'Degree' : degree , 'Standardize' : standardize , 'Normalize' : normalize , 'Year' : year , 'Month' : month , 'Week' : week , 'Time of Day' : tod , 'Day of Week' : dow , 'Save File' : save_file } if save_file : f = self . results_folder_name + '/preprocessed_data-' + str ( self . get_global_count ( ) ) + '.csv' self . preprocessed_data . to_csv ( f ) self . result [ 'Preprocess' ] [ 'Saved File' ] = f else : self . result [ 'Preprocess' ] [ 'Saved File' ] = '' return self . preprocessed_data
Preprocesses dataframe according to user specifications and stores result in self . preprocessed_data .
549
21
234,021
def make_dataframe ( result ) : import pandas as pd ret = { } if isinstance ( result , dict ) : if 'timeseries' in result : result = result [ 'timeseries' ] for uuid , data in result . items ( ) : df = pd . DataFrame ( data ) if len ( df . columns ) == 5 : # statistical data df . columns = [ 'time' , 'min' , 'mean' , 'max' , 'count' ] else : df . columns = [ 'time' , 'value' ] df [ 'time' ] = pd . to_datetime ( df [ 'time' ] , unit = 'ns' ) df = df . set_index ( df . pop ( 'time' ) ) ret [ uuid ] = df return ret
Turns the results of one of the data API calls into a pandas dataframe
174
17
234,022
def query ( self , query , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if archiver == "" : archiver = self . archivers [ 0 ] nonce = random . randint ( 0 , 2 ** 32 ) ev = threading . Event ( ) response = { } def _handleresult ( msg ) : # decode, throw away if not correct nonce got_response = False error = getError ( nonce , msg ) if error is not None : got_response = True response [ "error" ] = error metadata = getMetadata ( nonce , msg ) if metadata is not None : got_response = True response [ "metadata" ] = metadata timeseries = getTimeseries ( nonce , msg ) if timeseries is not None : got_response = True response [ "timeseries" ] = timeseries if got_response : ev . set ( ) vk = self . vk [ : - 1 ] # remove last part of VK because archiver doesn't expect it # set up receiving self . c . subscribe ( "{0}/s.giles/_/i.archiver/signal/{1},queries" . format ( archiver , vk ) , _handleresult ) # execute query q_struct = msgpack . packb ( { "Query" : query , "Nonce" : nonce } ) po = PayloadObject ( ( 2 , 0 , 8 , 1 ) , None , q_struct ) self . c . publish ( "{0}/s.giles/_/i.archiver/slot/query" . format ( archiver ) , payload_objects = ( po , ) ) ev . wait ( timeout ) if len ( response ) == 0 : # no results raise TimeoutException ( "Query of {0} timed out" . format ( query ) ) return response
Runs the given pundat query and returns the results as a Python object .
393
16
234,023
def uuids ( self , where , archiver = "" , timeout = DEFAULT_TIMEOUT ) : resp = self . query ( "select uuid where {0}" . format ( where ) , archiver , timeout ) uuids = [ ] for r in resp [ "metadata" ] : uuids . append ( r [ "uuid" ] ) return uuids
Using the given where - clause finds all UUIDs that match
82
13
234,024
def tags ( self , where , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select * where {0}" . format ( where ) , archiver , timeout ) . get ( 'metadata' , { } )
Retrieves tags for all streams matching the given WHERE clause
54
12
234,025
def tags_uuids ( self , uuids , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if not isinstance ( uuids , list ) : uuids = [ uuids ] where = " or " . join ( [ 'uuid = "{0}"' . format ( uuid ) for uuid in uuids ] ) return self . query ( "select * where {0}" . format ( where ) , archiver , timeout ) . get ( 'metadata' , { } )
Retrieves tags for all streams with the provided UUIDs
112
13
234,026
def data ( self , where , start , end , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select data in ({0}, {1}) where {2}" . format ( start , end , where ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all RAW data between the 2 given timestamps
70
17
234,027
def data_uuids ( self , uuids , start , end , archiver = "" , timeout = DEFAULT_TIMEOUT ) : if not isinstance ( uuids , list ) : uuids = [ uuids ] where = " or " . join ( [ 'uuid = "{0}"' . format ( uuid ) for uuid in uuids ] ) return self . query ( "select data in ({0}, {1}) where {2}" . format ( start , end , where ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given list of UUIDs retrieves all RAW data between the 2 given timestamps
128
20
234,028
def stats ( self , where , start , end , pw , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select statistical({3}) data in ({0}, {1}) where {2}" . format ( start , end , where , pw ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all statistical data between the 2 given timestamps using the given pointwidth
80
22
234,029
def window ( self , where , start , end , width , archiver = "" , timeout = DEFAULT_TIMEOUT ) : return self . query ( "select window({3}) data in ({0}, {1}) where {2}" . format ( start , end , where , width ) , archiver , timeout ) . get ( 'timeseries' , { } )
With the given WHERE clause retrieves all statistical data between the 2 given timestamps using the given window size
78
22
234,030
def brightness ( self ) : if self . mode == "ww" : return int ( self . raw_state [ 9 ] ) else : _ , _ , v = colorsys . rgb_to_hsv ( * self . getRgb ( ) ) return v
Return current brightness 0 - 255 .
56
7
234,031
def decode ( var , encoding ) : if PY2 : if isinstance ( var , unicode ) : ret = var elif isinstance ( var , str ) : if encoding : ret = var . decode ( encoding ) else : ret = unicode ( var ) else : ret = unicode ( var ) else : ret = str ( var ) return ret
If not already unicode decode it .
75
8
234,032
def cfitsio_version ( asfloat = False ) : # use string version to avoid roundoffs ver = '%0.3f' % _fitsio_wrap . cfitsio_version ( ) if asfloat : return float ( ver ) else : return ver
Return the cfitsio version as a string .
57
10
234,033
def is_little_endian ( array ) : if numpy . little_endian : machine_little = True else : machine_little = False byteorder = array . dtype . base . byteorder return ( byteorder == '<' ) or ( machine_little and byteorder == '=' )
Return True if array is little endian False otherwise .
65
11
234,034
def array_to_native ( array , inplace = False ) : if numpy . little_endian : machine_little = True else : machine_little = False data_little = False if array . dtype . names is None : if array . dtype . base . byteorder == '|' : # strings and 1 byte integers return array data_little = is_little_endian ( array ) else : # assume all are same byte order: we only need to find one with # little endian for fname in array . dtype . names : if is_little_endian ( array [ fname ] ) : data_little = True break if ( ( machine_little and not data_little ) or ( not machine_little and data_little ) ) : output = array . byteswap ( inplace ) else : output = array return output
Convert an array to the native byte order .
183
10
234,035
def mks ( val ) : if sys . version_info > ( 3 , 0 , 0 ) : if isinstance ( val , bytes ) : sval = str ( val , 'utf-8' ) else : sval = str ( val ) else : sval = str ( val ) return sval
make sure the value is a string paying mind to python3 vs 2
65
14
234,036
def _get_col_dimstr ( tdim , is_string = False ) : dimstr = '' if tdim is None : dimstr = 'array[bad TDIM]' else : if is_string : if len ( tdim ) > 1 : dimstr = [ str ( d ) for d in tdim [ 1 : ] ] else : if len ( tdim ) > 1 or tdim [ 0 ] > 1 : dimstr = [ str ( d ) for d in tdim ] if dimstr != '' : dimstr = ',' . join ( dimstr ) dimstr = 'array[%s]' % dimstr return dimstr
not for variable length
138
4
234,037
def get_colname ( self , colnum ) : if colnum < 0 or colnum > ( len ( self . _colnames ) - 1 ) : raise ValueError ( "colnum out of range [0,%s-1]" % ( 0 , len ( self . _colnames ) ) ) return self . _colnames [ colnum ]
Get the name associated with the given column number
76
9
234,038
def write_column ( self , column , data , * * keys ) : firstrow = keys . get ( 'firstrow' , 0 ) colnum = self . _extract_colnum ( column ) # need it to be contiguous and native byte order. For now, make a # copy. but we may be able to avoid this with some care. if not data . flags [ 'C_CONTIGUOUS' ] : # this always makes a copy data_send = numpy . ascontiguousarray ( data ) # this is a copy, we can make sure it is native # and modify in place if needed array_to_native ( data_send , inplace = True ) else : # we can avoid the copy with a try-finally block and # some logic data_send = array_to_native ( data , inplace = False ) if IS_PY3 and data_send . dtype . char == 'U' : # for python3, we convert unicode to ascii # this will error if the character is not in ascii data_send = data_send . astype ( 'S' , copy = False ) self . _verify_column_data ( colnum , data_send ) self . _FITS . write_column ( self . _ext + 1 , colnum + 1 , data_send , firstrow = firstrow + 1 , write_bitcols = self . write_bitcols ) del data_send self . _update_info ( )
Write data to a column in this HDU
324
9
234,039
def _verify_column_data ( self , colnum , data ) : this_dt = data . dtype . descr [ 0 ] if len ( data . shape ) > 2 : this_shape = data . shape [ 1 : ] elif len ( data . shape ) == 2 and data . shape [ 1 ] > 1 : this_shape = data . shape [ 1 : ] else : this_shape = ( ) this_npy_type = this_dt [ 1 ] [ 1 : ] npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) info = self . _info [ 'colinfo' ] [ colnum ] if npy_type [ 0 ] in [ '>' , '<' , '|' ] : npy_type = npy_type [ 1 : ] col_name = info [ 'name' ] col_tdim = info [ 'tdim' ] col_shape = _tdim2shape ( col_tdim , col_name , is_string = ( npy_type [ 0 ] == 'S' ) ) if col_shape is None : if this_shape == ( ) : this_shape = None if col_shape is not None and not isinstance ( col_shape , tuple ) : col_shape = ( col_shape , ) """ print('column name:',col_name) print(data.shape) print('col tdim', info['tdim']) print('column dtype:',npy_type) print('input dtype:',this_npy_type) print('column shape:',col_shape) print('input shape:',this_shape) print() """ # this mismatch is OK if npy_type == 'i1' and this_npy_type == 'b1' : this_npy_type = 'i1' if isinstance ( self , AsciiTableHDU ) : # we don't enforce types exact for ascii if npy_type == 'i8' and this_npy_type in [ 'i2' , 'i4' ] : this_npy_type = 'i8' elif npy_type == 'f8' and this_npy_type == 'f4' : this_npy_type = 'f8' if this_npy_type != npy_type : raise ValueError ( "bad input data for column '%s': " "expected '%s', got '%s'" % ( col_name , npy_type , this_npy_type ) ) if this_shape != col_shape : raise ValueError ( "bad input shape for column '%s': " "expected '%s', got '%s'" % ( col_name , col_shape , this_shape ) )
verify the input data is of the correct type and shape
623
12
234,040
def write_var_column ( self , column , data , firstrow = 0 , * * keys ) : if not is_object ( data ) : raise ValueError ( "Only object fields can be written to " "variable-length arrays" ) colnum = self . _extract_colnum ( column ) self . _FITS . write_var_column ( self . _ext + 1 , colnum + 1 , data , firstrow = firstrow + 1 ) self . _update_info ( )
Write data to a variable - length column in this HDU
108
12
234,041
def insert_column ( self , name , data , colnum = None ) : if name in self . _colnames : raise ValueError ( "column '%s' already exists" % name ) if IS_PY3 and data . dtype . char == 'U' : # fast dtype conversion using an empty array # we could hack at the actual text description, but using # the numpy API is probably safer # this also avoids doing a dtype conversion on every array # element which could b expensive descr = numpy . empty ( 1 ) . astype ( data . dtype ) . astype ( 'S' ) . dtype . descr else : descr = data . dtype . descr if len ( descr ) > 1 : raise ValueError ( "you can only insert a single column, " "requested: %s" % descr ) this_descr = descr [ 0 ] this_descr = [ name , this_descr [ 1 ] ] if len ( data . shape ) > 1 : this_descr += [ data . shape [ 1 : ] ] this_descr = tuple ( this_descr ) name , fmt , dims = _npy2fits ( this_descr , table_type = self . _table_type_str ) if dims is not None : dims = [ dims ] if colnum is None : new_colnum = len ( self . _info [ 'colinfo' ] ) + 1 else : new_colnum = colnum + 1 self . _FITS . insert_col ( self . _ext + 1 , new_colnum , name , fmt , tdim = dims ) self . _update_info ( ) self . write_column ( name , data )
Insert a new column .
380
5
234,042
def append ( self , data , * * keys ) : firstrow = self . _info [ 'nrows' ] keys [ 'firstrow' ] = firstrow self . write ( data , * * keys )
Append new rows to a table HDU
45
9
234,043
def delete_rows ( self , rows ) : if rows is None : return # extract and convert to 1-offset for C routine if isinstance ( rows , slice ) : rows = self . _process_slice ( rows ) if rows . step is not None and rows . step != 1 : rows = numpy . arange ( rows . start + 1 , rows . stop + 1 , rows . step , ) else : # rows must be 1-offset rows = slice ( rows . start + 1 , rows . stop + 1 ) else : rows = self . _extract_rows ( rows ) # rows must be 1-offset rows += 1 if isinstance ( rows , slice ) : self . _FITS . delete_row_range ( self . _ext + 1 , rows . start , rows . stop ) else : if rows . size == 0 : return self . _FITS . delete_rows ( self . _ext + 1 , rows ) self . _update_info ( )
Delete rows from the table
208
5
234,044
def resize ( self , nrows , front = False ) : nrows_current = self . get_nrows ( ) if nrows == nrows_current : return if nrows < nrows_current : rowdiff = nrows_current - nrows if front : # delete from the front start = 0 stop = rowdiff else : # delete from the back start = nrows stop = nrows_current self . delete_rows ( slice ( start , stop ) ) else : rowdiff = nrows - nrows_current if front : # in this case zero is what we want, since the code inserts firstrow = 0 else : firstrow = nrows_current self . _FITS . insert_rows ( self . _ext + 1 , firstrow , rowdiff ) self . _update_info ( )
Resize the table to the given size removing or adding rows as necessary . Note if expanding the table at the end it is more efficient to use the append function than resizing and then writing .
173
39
234,045
def read ( self , * * keys ) : columns = keys . get ( 'columns' , None ) rows = keys . get ( 'rows' , None ) if columns is not None : if 'columns' in keys : del keys [ 'columns' ] data = self . read_columns ( columns , * * keys ) elif rows is not None : if 'rows' in keys : del keys [ 'rows' ] data = self . read_rows ( rows , * * keys ) else : data = self . _read_all ( * * keys ) return data
read data from this HDU
124
6
234,046
def _read_all ( self , * * keys ) : dtype , offsets , isvar = self . get_rec_dtype ( * * keys ) w , = numpy . where ( isvar == True ) # noqa has_tbit = self . _check_tbit ( ) if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) colnums = self . _extract_colnums ( ) rows = None array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) elif has_tbit : # drop down to read_columns since we can't stuff into a # contiguous array colnums = self . _extract_colnums ( ) array = self . read_columns ( colnums , * * keys ) else : firstrow = 1 # noqa - not used? nrows = self . _info [ 'nrows' ] array = numpy . zeros ( nrows , dtype = dtype ) self . _FITS . read_as_rec ( self . _ext + 1 , 1 , nrows , array ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , * * keys ) return array
Read all data in the HDU .
457
8
234,047
def read_column ( self , col , * * keys ) : res = self . read_columns ( [ col ] , * * keys ) colname = res . dtype . names [ 0 ] data = res [ colname ] self . _maybe_trim_strings ( data , * * keys ) return data
Read the specified column
68
4
234,048
def read_rows ( self , rows , * * keys ) : if rows is None : # we actually want all rows! return self . _read_all ( ) if self . _info [ 'hdutype' ] == ASCII_TBL : keys [ 'rows' ] = rows return self . read ( * * keys ) rows = self . _extract_rows ( rows ) dtype , offsets , isvar = self . get_rec_dtype ( * * keys ) w , = numpy . where ( isvar == True ) # noqa if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) colnums = self . _extract_colnums ( ) return self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : array = numpy . zeros ( rows . size , dtype = dtype ) self . _FITS . read_rows_as_rec ( self . _ext + 1 , array , rows ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , * * keys ) return array
Read the specified rows .
427
5
234,049
def read_columns ( self , columns , * * keys ) : if self . _info [ 'hdutype' ] == ASCII_TBL : keys [ 'columns' ] = columns return self . read ( * * keys ) rows = keys . get ( 'rows' , None ) # if columns is None, returns all. Guaranteed to be unique and sorted colnums = self . _extract_colnums ( columns ) if isinstance ( colnums , int ) : # scalar sent, don't read as a recarray return self . read_column ( columns , * * keys ) # if rows is None still returns None, and is correctly interpreted # by the reader to mean all rows = self . _extract_rows ( rows ) # this is the full dtype for all columns dtype , offsets , isvar = self . get_rec_dtype ( colnums = colnums , * * keys ) w , = numpy . where ( isvar == True ) # noqa if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size array = numpy . zeros ( nrows , dtype = dtype ) colnumsp = colnums [ : ] . copy ( ) colnumsp [ : ] += 1 self . _FITS . read_columns_as_rec ( self . _ext + 1 , colnumsp , array , rows ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for i in xrange ( colnums . size ) : colnum = int ( colnums [ i ] ) name = array . dtype . names [ i ] self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) if ( self . _check_tbit ( colnums = colnums ) ) : array = self . _fix_tbit_dtype ( array , colnums ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , * * keys ) return array
read a subset of columns from this binary table HDU
622
11
234,050
def read_slice ( self , firstrow , lastrow , step = 1 , * * keys ) : if self . _info [ 'hdutype' ] == ASCII_TBL : rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) keys [ 'rows' ] = rows return self . read_ascii ( * * keys ) step = keys . get ( 'step' , 1 ) if self . _info [ 'hdutype' ] == IMAGE_HDU : raise ValueError ( "slices currently only supported for tables" ) maxrow = self . _info [ 'nrows' ] if firstrow < 0 or lastrow > maxrow : raise ValueError ( "slice must specify a sub-range of [%d,%d]" % ( 0 , maxrow ) ) dtype , offsets , isvar = self . get_rec_dtype ( * * keys ) w , = numpy . where ( isvar == True ) # noqa if w . size > 0 : vstorage = keys . get ( 'vstorage' , self . _vstorage ) rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) colnums = self . _extract_colnums ( ) array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage ) else : if step != 1 : rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' ) array = self . read ( rows = rows ) else : # no +1 because lastrow is non-inclusive nrows = lastrow - firstrow array = numpy . zeros ( nrows , dtype = dtype ) # only first needs to be +1. This is becuase the c code is # inclusive self . _FITS . read_as_rec ( self . _ext + 1 , firstrow + 1 , lastrow , array ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) for colnum , name in enumerate ( array . dtype . names ) : self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , * * keys ) return array
Read the specified row slice from a table .
638
9
234,051
def get_rec_dtype ( self , * * keys ) : colnums = keys . get ( 'colnums' , None ) vstorage = keys . get ( 'vstorage' , self . _vstorage ) if colnums is None : colnums = self . _extract_colnums ( ) descr = [ ] isvararray = numpy . zeros ( len ( colnums ) , dtype = numpy . bool ) for i , colnum in enumerate ( colnums ) : dt , isvar = self . get_rec_column_descr ( colnum , vstorage ) descr . append ( dt ) isvararray [ i ] = isvar dtype = numpy . dtype ( descr ) offsets = numpy . zeros ( len ( colnums ) , dtype = 'i8' ) for i , n in enumerate ( dtype . names ) : offsets [ i ] = dtype . fields [ n ] [ 1 ] return dtype , offsets , isvararray
Get the dtype for the specified columns
228
8
234,052
def _check_tbit ( self , * * keys ) : colnums = keys . get ( 'colnums' , None ) if colnums is None : colnums = self . _extract_colnums ( ) has_tbit = False for i , colnum in enumerate ( colnums ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) if ( istbit ) : has_tbit = True break return has_tbit
Check if one of the columns is a TBIT column
120
11
234,053
def _fix_tbit_dtype ( self , array , colnums ) : descr = array . dtype . descr for i , colnum in enumerate ( colnums ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) if ( istbit ) : coldescr = list ( descr [ i ] ) coldescr [ 1 ] = '?' descr [ i ] = tuple ( coldescr ) return array . view ( descr )
If necessary patch up the TBIT to convert to bool array
120
12
234,054
def _get_simple_dtype_and_shape ( self , colnum , rows = None ) : # basic datatype npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) info = self . _info [ 'colinfo' ] [ colnum ] name = info [ 'name' ] if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size shape = None tdim = info [ 'tdim' ] shape = _tdim2shape ( tdim , name , is_string = ( npy_type [ 0 ] == 'S' ) ) if shape is not None : if nrows > 1 : if not isinstance ( shape , tuple ) : # vector shape = ( nrows , shape ) else : # multi-dimensional shape = tuple ( [ nrows ] + list ( shape ) ) else : # scalar shape = nrows return npy_type , shape
When reading a single column we want the basic data type and the shape of the array .
219
18
234,055
def get_rec_column_descr ( self , colnum , vstorage ) : npy_type , isvar , istbit = self . _get_tbl_numpy_dtype ( colnum ) name = self . _info [ 'colinfo' ] [ colnum ] [ 'name' ] if isvar : if vstorage == 'object' : descr = ( name , 'O' ) else : tform = self . _info [ 'colinfo' ] [ colnum ] [ 'tform' ] max_size = _extract_vararray_max ( tform ) if max_size <= 0 : name = self . _info [ 'colinfo' ] [ colnum ] [ 'name' ] mess = 'Will read as an object field' if max_size < 0 : mess = "Column '%s': No maximum size: '%s'. %s" mess = mess % ( name , tform , mess ) warnings . warn ( mess , FITSRuntimeWarning ) else : mess = "Column '%s': Max size is zero: '%s'. %s" mess = mess % ( name , tform , mess ) warnings . warn ( mess , FITSRuntimeWarning ) # we are forced to read this as an object array return self . get_rec_column_descr ( colnum , 'object' ) if npy_type [ 0 ] == 'S' : # variable length string columns cannot # themselves be arrays I don't think npy_type = 'S%d' % max_size descr = ( name , npy_type ) elif npy_type [ 0 ] == 'U' : # variable length string columns cannot # themselves be arrays I don't think npy_type = 'U%d' % max_size descr = ( name , npy_type ) else : descr = ( name , npy_type , max_size ) else : tdim = self . _info [ 'colinfo' ] [ colnum ] [ 'tdim' ] shape = _tdim2shape ( tdim , name , is_string = ( npy_type [ 0 ] == 'S' or npy_type [ 0 ] == 'U' ) ) if shape is not None : descr = ( name , npy_type , shape ) else : descr = ( name , npy_type ) return descr , isvar
Get a descriptor entry for the specified column .
522
9
234,056
def _read_rec_with_var ( self , colnums , rows , dtype , offsets , isvar , vstorage ) : colnumsp = colnums + 1 if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size array = numpy . zeros ( nrows , dtype = dtype ) # read from the main table first wnotvar , = numpy . where ( isvar == False ) # noqa if wnotvar . size > 0 : # this will be contiguous (not true for slices) thesecol = colnumsp [ wnotvar ] theseoff = offsets [ wnotvar ] self . _FITS . read_columns_as_rec_byoffset ( self . _ext + 1 , thesecol , theseoff , array , rows ) for i in xrange ( thesecol . size ) : name = array . dtype . names [ wnotvar [ i ] ] colnum = thesecol [ i ] - 1 self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] ) array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) # now read the variable length arrays we may be able to speed this up # by storing directly instead of reading first into a list wvar , = numpy . where ( isvar == True ) # noqa if wvar . size > 0 : # this will be contiguous (not true for slices) thesecol = colnumsp [ wvar ] for i in xrange ( thesecol . size ) : colnump = thesecol [ i ] name = array . dtype . names [ wvar [ i ] ] dlist = self . _FITS . read_var_column_as_list ( self . _ext + 1 , colnump , rows ) if ( isinstance ( dlist [ 0 ] , str ) or ( IS_PY3 and isinstance ( dlist [ 0 ] , bytes ) ) ) : is_string = True else : is_string = False if array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] == 'O' : # storing in object array # get references to each, no copy made for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) array [ name ] [ irow ] = item else : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) if is_string : array [ name ] [ irow ] = item else : ncopy = len ( item ) if IS_PY3 : ts = array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] if ts != 'S' and ts != 'U' : array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] else : array [ name ] [ irow ] = item else : array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] return array
Read columns from a table into a rec array including variable length columns . This is special because for efficiency it involves reading from the main table as normal but skipping the columns in the array that are variable . Then reading the variable length columns with accounting for strides appropriately .
747
52
234,057
def _extract_rows ( self , rows ) : if rows is not None : rows = numpy . array ( rows , ndmin = 1 , copy = False , dtype = 'i8' ) # returns unique, sorted rows = numpy . unique ( rows ) maxrow = self . _info [ 'nrows' ] - 1 if rows [ 0 ] < 0 or rows [ - 1 ] > maxrow : raise ValueError ( "rows must be in [%d,%d]" % ( 0 , maxrow ) ) return rows
Extract an array of rows from an input scalar or sequence
117
13
234,058
def _process_slice ( self , arg ) : start = arg . start stop = arg . stop step = arg . step nrows = self . _info [ 'nrows' ] if step is None : step = 1 if start is None : start = 0 if stop is None : stop = nrows if start < 0 : start = nrows + start if start < 0 : raise IndexError ( "Index out of bounds" ) if stop < 0 : stop = nrows + start + 1 if stop < start : # will return an empty struct stop = start if stop > nrows : stop = nrows return slice ( start , stop , step )
process the input slice for use calling the C code
137
10
234,059
def _slice2rows ( self , start , stop , step = None ) : nrows = self . _info [ 'nrows' ] if start is None : start = 0 if stop is None : stop = nrows if step is None : step = 1 tstart = self . _fix_range ( start ) tstop = self . _fix_range ( stop ) if tstart == 0 and tstop == nrows : # this is faster: if all fields are also requested, then a # single fread will be done return None if stop < start : raise ValueError ( "start is greater than stop in slice" ) return numpy . arange ( tstart , tstop , step , dtype = 'i8' )
Convert a slice to an explicit array of rows
156
10
234,060
def _fix_range ( self , num , isslice = True ) : nrows = self . _info [ 'nrows' ] if isslice : # include the end if num < 0 : num = nrows + ( 1 + num ) elif num > nrows : num = nrows else : # single element if num < 0 : num = nrows + num elif num > ( nrows - 1 ) : num = nrows - 1 return num
Ensure the input is within range .
100
8
234,061
def _rescale_and_convert_field_inplace ( self , array , name , scale , zero ) : self . _rescale_array ( array [ name ] , scale , zero ) if array [ name ] . dtype == numpy . bool : array [ name ] = self . _convert_bool_array ( array [ name ] ) return array
Apply fits scalings . Also convert bool to proper numpy boolean values
79
14
234,062
def _rescale_array ( self , array , scale , zero ) : if scale != 1.0 : sval = numpy . array ( scale , dtype = array . dtype ) array *= sval if zero != 0.0 : zval = numpy . array ( zero , dtype = array . dtype ) array += zval
Scale the input array
75
4
234,063
def _maybe_trim_strings ( self , array , * * keys ) : trim_strings = keys . get ( 'trim_strings' , False ) if self . trim_strings or trim_strings : _trim_strings ( array )
if requested trim trailing white space from all string fields in the input array
54
14
234,064
def _get_tbl_numpy_dtype ( self , colnum , include_endianness = True ) : table_type = self . _info [ 'hdutype' ] table_type_string = _hdu_type_map [ table_type ] try : ftype = self . _info [ 'colinfo' ] [ colnum ] [ 'eqtype' ] if table_type == ASCII_TBL : npy_type = _table_fits2npy_ascii [ abs ( ftype ) ] else : npy_type = _table_fits2npy [ abs ( ftype ) ] except KeyError : raise KeyError ( "unsupported %s fits data " "type: %d" % ( table_type_string , ftype ) ) istbit = False if ( ftype == 1 ) : istbit = True isvar = False if ftype < 0 : isvar = True if include_endianness : # if binary we will read the big endian bytes directly, # if ascii we read into native byte order if table_type == ASCII_TBL : addstr = '' else : addstr = '>' if npy_type not in [ 'u1' , 'i1' , 'S' , 'U' ] : npy_type = addstr + npy_type if npy_type == 'S' : width = self . _info [ 'colinfo' ] [ colnum ] [ 'width' ] npy_type = 'S%d' % width elif npy_type == 'U' : width = self . _info [ 'colinfo' ] [ colnum ] [ 'width' ] npy_type = 'U%d' % width return npy_type , isvar , istbit
Get numpy type for the input column
396
8
234,065
def _process_args_as_rows_or_columns ( self , arg , unpack = False ) : flags = set ( ) # if isinstance ( arg , ( tuple , list , numpy . ndarray ) ) : # a sequence was entered if isstring ( arg [ 0 ] ) : result = arg else : result = arg flags . add ( 'isrows' ) elif isstring ( arg ) : # a single string was entered result = arg elif isinstance ( arg , slice ) : if unpack : flags . add ( 'isrows' ) result = self . _slice2rows ( arg . start , arg . stop , arg . step ) else : flags . add ( 'isrows' ) flags . add ( 'isslice' ) result = self . _process_slice ( arg ) else : # a single object was entered. # Probably should apply some more checking on this result = arg flags . add ( 'isrows' ) if numpy . ndim ( arg ) == 0 : flags . add ( 'isscalar' ) return result , flags
We must be able to interpret the args as as either a column name or row number or sequences thereof . Numpy arrays and slices are also fine .
232
30
234,066
def _extract_colnums ( self , columns = None ) : if columns is None : return numpy . arange ( self . _ncol , dtype = 'i8' ) if not isinstance ( columns , ( tuple , list , numpy . ndarray ) ) : # is a scalar return self . _extract_colnum ( columns ) colnums = numpy . zeros ( len ( columns ) , dtype = 'i8' ) for i in xrange ( colnums . size ) : colnums [ i ] = self . _extract_colnum ( columns [ i ] ) # returns unique sorted colnums = numpy . unique ( colnums ) return colnums
Extract an array of columns from the input
158
9
234,067
def _extract_colnum ( self , col ) : if isinteger ( col ) : colnum = col if ( colnum < 0 ) or ( colnum > ( self . _ncol - 1 ) ) : raise ValueError ( "column number should be in [0,%d]" % ( 0 , self . _ncol - 1 ) ) else : colstr = mks ( col ) try : if self . case_sensitive : mess = "column name '%s' not found (case sensitive)" % col colnum = self . _colnames . index ( colstr ) else : mess = "column name '%s' not found (case insensitive)" % col colnum = self . _colnames_lower . index ( colstr . lower ( ) ) except ValueError : raise ValueError ( mess ) return int ( colnum )
Get the column number for the input column
181
8
234,068
def _update_info ( self ) : super ( TableHDU , self ) . _update_info ( ) if self . _info [ 'hdutype' ] == IMAGE_HDU : mess = "Extension %s is not a Table HDU" % self . ext raise ValueError ( mess ) if 'colinfo' in self . _info : self . _colnames = [ i [ 'name' ] for i in self . _info [ 'colinfo' ] ] self . _colnames_lower = [ i [ 'name' ] . lower ( ) for i in self . _info [ 'colinfo' ] ] self . _ncol = len ( self . _colnames )
Call parent method and make sure this is in fact a table HDU . Set some convenience data .
152
20
234,069
def _get_next_buffered_row ( self ) : if self . _iter_row == self . _iter_nrows : raise StopIteration if self . _row_buffer_index >= self . _iter_row_buffer : self . _buffer_iter_rows ( self . _iter_row ) data = self . _row_buffer [ self . _row_buffer_index ] self . _iter_row += 1 self . _row_buffer_index += 1 return data
Get the next row for iteration .
107
7
234,070
def _buffer_iter_rows ( self , start ) : self . _row_buffer = self [ start : start + self . _iter_row_buffer ] # start back at the front of the buffer self . _row_buffer_index = 0
Read in the buffer for iteration
54
6
234,071
def read ( self , * * keys ) : rows = keys . get ( 'rows' , None ) columns = keys . get ( 'columns' , None ) # if columns is None, returns all. Guaranteed to be unique and sorted colnums = self . _extract_colnums ( columns ) if isinstance ( colnums , int ) : # scalar sent, don't read as a recarray return self . read_column ( columns , * * keys ) rows = self . _extract_rows ( rows ) if rows is None : nrows = self . _info [ 'nrows' ] else : nrows = rows . size # if rows is None still returns None, and is correctly interpreted # by the reader to mean all rows = self . _extract_rows ( rows ) # this is the full dtype for all columns dtype , offsets , isvar = self . get_rec_dtype ( colnums = colnums , * * keys ) array = numpy . zeros ( nrows , dtype = dtype ) # note reading into existing data wnotvar , = numpy . where ( isvar == False ) # noqa if wnotvar . size > 0 : for i in wnotvar : colnum = colnums [ i ] name = array . dtype . names [ i ] a = array [ name ] . copy ( ) self . _FITS . read_column ( self . _ext + 1 , colnum + 1 , a , rows ) array [ name ] = a del a array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array ) wvar , = numpy . where ( isvar == True ) # noqa if wvar . size > 0 : for i in wvar : colnum = colnums [ i ] name = array . dtype . names [ i ] dlist = self . _FITS . read_var_column_as_list ( self . _ext + 1 , colnum + 1 , rows ) if ( isinstance ( dlist [ 0 ] , str ) or ( IS_PY3 and isinstance ( dlist [ 0 ] , bytes ) ) ) : is_string = True else : is_string = False if array [ name ] . dtype . descr [ 0 ] [ 1 ] [ 1 ] == 'O' : # storing in object array # get references to each, no copy made for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) array [ name ] [ irow ] = item else : for irow , item in enumerate ( dlist ) : if IS_PY3 and isinstance ( item , bytes ) : item = item . decode ( 'ascii' ) if is_string : array [ name ] [ irow ] = item else : ncopy = len ( item ) array [ name ] [ irow ] [ 0 : ncopy ] = item [ : ] lower = keys . get ( 'lower' , False ) upper = keys . get ( 'upper' , False ) if self . lower or lower : _names_to_lower_if_recarray ( array ) elif self . upper or upper : _names_to_upper_if_recarray ( array ) self . _maybe_trim_strings ( array , * * keys ) return array
read a data from an ascii table HDU
753
11
234,072
def read ( self , * * keys ) : if self . is_scalar : data = self . fitshdu . read_column ( self . columns , * * keys ) else : c = keys . get ( 'columns' , None ) if c is None : keys [ 'columns' ] = self . columns data = self . fitshdu . read ( * * keys ) return data
Read the data from disk and return as a numpy array
86
12
234,073
def read ( filename , ext = None , extver = None , * * keys ) : with FITS ( filename , * * keys ) as fits : header = keys . pop ( 'header' , False ) if ext is None : for i in xrange ( len ( fits ) ) : if fits [ i ] . has_data ( ) : ext = i break if ext is None : raise IOError ( "No extensions have data" ) item = _make_item ( ext , extver = extver ) data = fits [ item ] . read ( * * keys ) if header : h = fits [ item ] . read_header ( ) return data , h else : return data
Convenience function to read data from the specified FITS HDU
144
14
234,074
def read_header ( filename , ext = 0 , extver = None , case_sensitive = False , * * keys ) : dont_create = 0 try : hdunum = ext + 1 except TypeError : hdunum = None _fits = _fitsio_wrap . FITS ( filename , READONLY , dont_create ) if hdunum is None : extname = mks ( ext ) if extver is None : extver_num = 0 else : extver_num = extver if not case_sensitive : # the builtin movnam_hdu is not case sensitive hdunum = _fits . movnam_hdu ( ANY_HDU , extname , extver_num ) else : # for case sensitivity we'll need to run through # all the hdus found = False current_ext = 0 while True : hdunum = current_ext + 1 try : hdu_type = _fits . movabs_hdu ( hdunum ) # noqa - not used name , vers = _fits . get_hdu_name_version ( hdunum ) if name == extname : if extver is None : # take the first match found = True break else : if extver_num == vers : found = True break except OSError : break current_ext += 1 if not found : raise IOError ( 'hdu not found: %s (extver %s)' % ( extname , extver ) ) return FITSHDR ( _fits . read_header ( hdunum ) )
Convenience function to read the header from the specified FITS HDU
341
15
234,075
def read_scamp_head ( fname , header = None ) : with open ( fname ) as fobj : lines = fobj . readlines ( ) lines = [ l . strip ( ) for l in lines if l [ 0 : 3 ] != 'END' ] # if header is None an empty FITSHDR is created hdr = FITSHDR ( header ) for l in lines : hdr . add_record ( l ) return hdr
read a SCAMP . head file as a fits header FITSHDR object
99
16
234,076
def write ( filename , data , extname = None , extver = None , units = None , compress = None , table_type = 'binary' , header = None , clobber = False , * * keys ) : with FITS ( filename , 'rw' , clobber = clobber , * * keys ) as fits : fits . write ( data , table_type = table_type , units = units , extname = extname , extver = extver , compress = compress , header = header , * * keys )
Convenience function to create a new HDU and write the data .
115
15
234,077
def array2tabledef ( data , table_type = 'binary' , write_bitcols = False ) : is_ascii = ( table_type == 'ascii' ) if data . dtype . fields is None : raise ValueError ( "data must have fields" ) names = [ ] names_nocase = { } formats = [ ] dims = [ ] descr = data . dtype . descr for d in descr : # these have the form '<f4' or '|S25', etc. Extract the pure type npy_dtype = d [ 1 ] [ 1 : ] if is_ascii : if npy_dtype in [ 'u1' , 'i1' ] : raise ValueError ( "1-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype ) if npy_dtype in [ 'u2' ] : raise ValueError ( "unsigned 2-byte integers are not supported for " "ascii tables: '%s'" % npy_dtype ) if npy_dtype [ 0 ] == 'O' : # this will be a variable length column 1Pt(len) where t is the # type and len is max length. Each element must be convertible to # the same type as the first name = d [ 0 ] form , dim = npy_obj2fits ( data , name ) elif npy_dtype [ 0 ] == "V" : continue else : name , form , dim = _npy2fits ( d , table_type = table_type , write_bitcols = write_bitcols ) if name == '' : raise ValueError ( "field name is an empty string" ) """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported for " "ascii tables") """ name_nocase = name . upper ( ) if name_nocase in names_nocase : raise ValueError ( "duplicate column name found: '%s'. Note " "FITS column names are not case sensitive" % name_nocase ) names . append ( name ) names_nocase [ name_nocase ] = name_nocase formats . append ( form ) dims . append ( dim ) return names , formats , dims
Similar to descr2tabledef but if there are object columns a type and max length will be extracted and used for the tabledef
519
29
234,078
def descr2tabledef ( descr , table_type = 'binary' , write_bitcols = False ) : names = [ ] formats = [ ] dims = [ ] for d in descr : """ npy_dtype = d[1][1:] if is_ascii and npy_dtype in ['u1','i1']: raise ValueError("1-byte integers are not supported for " "ascii tables") """ if d [ 1 ] [ 1 ] == 'O' : raise ValueError ( 'cannot automatically declare a var column without ' 'some data to determine max len' ) name , form , dim = _npy2fits ( d , table_type = table_type , write_bitcols = write_bitcols ) if name == '' : raise ValueError ( "field name is an empty string" ) """ if is_ascii: if dim is not None: raise ValueError("array columns are not supported " "for ascii tables") """ names . append ( name ) formats . append ( form ) dims . append ( dim ) return names , formats , dims
Create a FITS table def from the input numpy descriptor .
247
13
234,079
def get_tile_dims ( tile_dims , imshape ) : if tile_dims is None : td = None else : td = numpy . array ( tile_dims , dtype = 'i8' ) nd = len ( imshape ) if td . size != nd : msg = "expected tile_dims to have %d dims, got %d" % ( td . size , nd ) raise ValueError ( msg ) return td
Just make sure the tile dims has the appropriate number of dimensions
102
13
234,080
def _extract_table_type ( type ) : if isinstance ( type , str ) : type = type . lower ( ) if type [ 0 : 7 ] == 'binary' : table_type = BINARY_TBL elif type [ 0 : 6 ] == 'ascii' : table_type = ASCII_TBL else : raise ValueError ( "table type string should begin with 'binary' or 'ascii' " "(case insensitive)" ) else : type = int ( type ) if type not in [ BINARY_TBL , ASCII_TBL ] : raise ValueError ( "table type num should be BINARY_TBL (%d) or " "ASCII_TBL (%d)" % ( BINARY_TBL , ASCII_TBL ) ) table_type = type return table_type
Get the numerical table type
181
5
234,081
def close ( self ) : if hasattr ( self , '_FITS' ) : if self . _FITS is not None : self . _FITS . close ( ) self . _FITS = None self . _filename = None self . mode = None self . charmode = None self . intmode = None self . hdu_list = None self . hdu_map = None
Close the fits file and set relevant metadata to None
84
10
234,082
def movnam_hdu ( self , extname , hdutype = ANY_HDU , extver = 0 ) : extname = mks ( extname ) hdu = self . _FITS . movnam_hdu ( hdutype , extname , extver ) return hdu
Move to the indicated HDU by name
66
8
234,083
def reopen ( self ) : self . _FITS . close ( ) del self . _FITS self . _FITS = _fitsio_wrap . FITS ( self . _filename , self . intmode , 0 ) self . update_hdu_list ( )
close and reopen the fits file with the same mode
58
10
234,084
def write ( self , data , units = None , extname = None , extver = None , compress = None , tile_dims = None , header = None , names = None , table_type = 'binary' , write_bitcols = False , * * keys ) : isimage = False if data is None : isimage = True elif isinstance ( data , numpy . ndarray ) : if data . dtype . fields == None : # noqa - probably should be is None isimage = True if isimage : self . write_image ( data , extname = extname , extver = extver , compress = compress , tile_dims = tile_dims , header = header ) else : self . write_table ( data , units = units , extname = extname , extver = extver , header = header , names = names , table_type = table_type , write_bitcols = write_bitcols )
Write the data to a new HDU .
208
9
234,085
def write_image ( self , img , extname = None , extver = None , compress = None , tile_dims = None , header = None ) : self . create_image_hdu ( img , header = header , extname = extname , extver = extver , compress = compress , tile_dims = tile_dims ) if header is not None : self [ - 1 ] . write_keys ( header ) self [ - 1 ] . _update_info ( )
Create a new image extension and write the data .
106
10
234,086
def create_image_hdu ( self , img = None , dims = None , dtype = None , extname = None , extver = None , compress = None , tile_dims = None , header = None ) : if ( img is not None ) or ( img is None and dims is None ) : from_image = True elif dims is not None : from_image = False if from_image : img2send = img if img is not None : dims = img . shape dtstr = img . dtype . descr [ 0 ] [ 1 ] [ 1 : ] if img . size == 0 : raise ValueError ( "data must have at least 1 row" ) # data must be c-contiguous and native byte order if not img . flags [ 'C_CONTIGUOUS' ] : # this always makes a copy img2send = numpy . ascontiguousarray ( img ) array_to_native ( img2send , inplace = True ) else : img2send = array_to_native ( img , inplace = False ) if IS_PY3 and img2send . dtype . char == 'U' : # for python3, we convert unicode to ascii # this will error if the character is not in ascii img2send = img2send . astype ( 'S' , copy = False ) else : self . _ensure_empty_image_ok ( ) compress = None tile_dims = None # we get dims from the input image dims2send = None else : # img was None and dims was sent if dtype is None : raise ValueError ( "send dtype= with dims=" ) # this must work! dtype = numpy . dtype ( dtype ) dtstr = dtype . descr [ 0 ] [ 1 ] [ 1 : ] # use the example image to build the type in C img2send = numpy . zeros ( 1 , dtype = dtype ) # sending an array simplifies access dims2send = numpy . array ( dims , dtype = 'i8' , ndmin = 1 ) if img2send is not None : if img2send . dtype . fields is not None : raise ValueError ( "got record data type, expected regular ndarray" ) if extname is None : # will be ignored extname = "" else : if not isstring ( extname ) : raise ValueError ( "extension name must be a string" ) extname = mks ( extname ) if extname is not None and extver is not None : extver = check_extver ( extver ) if extver is None : # will be ignored extver = 0 comptype = get_compress_type ( compress ) tile_dims = get_tile_dims ( tile_dims , dims ) if img2send is not None : check_comptype_img ( comptype , dtstr ) if header is not None : nkeys = len ( header ) else : nkeys = 0 self . _FITS . create_image_hdu ( img2send , nkeys , dims = dims2send , comptype = comptype , tile_dims = tile_dims , extname = extname , extver = extver ) # don't rebuild the whole list unless this is the first hdu # to be created self . update_hdu_list ( rebuild = False )
Create a new empty image HDU and reload the hdu list . Either create from an input image or from input dims and dtype
756
28
234,087
def _ensure_empty_image_ok ( self ) : if self . ignore_empty : return if len ( self ) > 1 : raise RuntimeError ( "Cannot write None image at extension %d" % len ( self ) ) if 'ndims' in self [ 0 ] . _info : raise RuntimeError ( "Can only write None images to extension zero, " "which already exists" )
If ignore_empty was not set to True we only allow empty HDU for first HDU and if there is no data there already
86
27
234,088
def write_table ( self , data , table_type = 'binary' , names = None , formats = None , units = None , extname = None , extver = None , header = None , write_bitcols = False ) : """ if data.dtype.fields == None: raise ValueError("data must have fields") if data.size == 0: raise ValueError("data must have at least 1 row") """ self . create_table_hdu ( data = data , header = header , names = names , units = units , extname = extname , extver = extver , table_type = table_type , write_bitcols = write_bitcols ) if header is not None : self [ - 1 ] . write_keys ( header ) self [ - 1 ] . _update_info ( ) self [ - 1 ] . write ( data , names = names )
Create a new table extension and write the data .
193
10
234,089
def create_table_hdu ( self , data = None , dtype = None , header = None , names = None , formats = None , units = None , dims = None , extname = None , extver = None , table_type = 'binary' , write_bitcols = False ) : # record this for the TableHDU object self . keys [ 'write_bitcols' ] = write_bitcols # can leave as turn table_type_int = _extract_table_type ( table_type ) if data is not None : if isinstance ( data , numpy . ndarray ) : names , formats , dims = array2tabledef ( data , table_type = table_type , write_bitcols = write_bitcols ) elif isinstance ( data , ( list , dict ) ) : names , formats , dims = collection2tabledef ( data , names = names , table_type = table_type , write_bitcols = write_bitcols ) else : raise ValueError ( "data must be an ndarray with fields or a dict" ) elif dtype is not None : dtype = numpy . dtype ( dtype ) names , formats , dims = descr2tabledef ( dtype . descr , write_bitcols = write_bitcols , table_type = table_type , ) else : if names is None or formats is None : raise ValueError ( "send either dtype=, data=, or names= and formats=" ) if not isinstance ( names , list ) or not isinstance ( formats , list ) : raise ValueError ( "names and formats should be lists" ) if len ( names ) != len ( formats ) : raise ValueError ( "names and formats must be same length" ) if dims is not None : if not isinstance ( dims , list ) : raise ValueError ( "dims should be a list" ) if len ( dims ) != len ( names ) : raise ValueError ( "names and dims must be same length" ) if units is not None : if not isinstance ( units , list ) : raise ValueError ( "units should be a list" ) if len ( units ) != len ( names ) : raise ValueError ( "names and units must be same length" ) if extname is None : # will be ignored extname = "" else : if not isstring ( extname ) : raise ValueError ( "extension name must be a string" ) extname = mks ( extname ) if extname is not None and extver is not None : extver = check_extver ( extver ) if extver is None : # will be ignored extver = 0 if extname is None : # will be ignored extname = "" if header is not None : nkeys = len ( header ) else : nkeys = 0 # note we can create extname in the c code for tables, but not images self . _FITS . create_table_hdu ( table_type_int , nkeys , names , formats , tunit = units , tdim = dims , extname = extname , extver = extver ) # don't rebuild the whole list unless this is the first hdu # to be created self . update_hdu_list ( rebuild = False )
Create a new empty table extension and reload the hdu list .
723
13
234,090
def update_hdu_list ( self , rebuild = True ) : if not hasattr ( self , 'hdu_list' ) : rebuild = True if rebuild : self . hdu_list = [ ] self . hdu_map = { } # we don't know how many hdus there are, so iterate # until we can't open any more ext_start = 0 else : # start from last ext_start = len ( self ) ext = ext_start while True : try : self . _append_hdu_info ( ext ) except IOError : break except RuntimeError : break ext = ext + 1
Force an update of the entire HDU list
134
9
234,091
def next ( self ) : if self . _iter_index == len ( self . hdu_list ) : raise StopIteration hdu = self . hdu_list [ self . _iter_index ] self . _iter_index += 1 return hdu
Move to the next iteration
56
5
234,092
def _extract_item ( self , item ) : ver = 0 if isinstance ( item , tuple ) : ver_sent = True nitem = len ( item ) if nitem == 1 : ext = item [ 0 ] elif nitem == 2 : ext , ver = item else : ver_sent = False ext = item return ext , ver , ver_sent
utility function to extract an item meaning a extension number name plus version .
78
15
234,093
def _update_info ( self ) : super ( ImageHDU , self ) . _update_info ( ) if self . _info [ 'hdutype' ] != IMAGE_HDU : mess = "Extension %s is not a Image HDU" % self . ext raise ValueError ( mess ) # convert to c order if 'dims' in self . _info : self . _info [ 'dims' ] = list ( reversed ( self . _info [ 'dims' ] ) )
Call parent method and make sure this is in fact a image HDU . Set dims in C order
110
21
234,094
def reshape ( self , dims ) : adims = numpy . array ( dims , ndmin = 1 , dtype = 'i8' ) self . _FITS . reshape_image ( self . _ext + 1 , adims )
reshape an existing image to the requested dimensions
56
9
234,095
def write ( self , img , start = 0 , * * keys ) : dims = self . get_dims ( ) if img . dtype . fields is not None : raise ValueError ( "got recarray, expected regular ndarray" ) if img . size == 0 : raise ValueError ( "data must have at least 1 row" ) # data must be c-contiguous and native byte order if not img . flags [ 'C_CONTIGUOUS' ] : # this always makes a copy img_send = numpy . ascontiguousarray ( img ) array_to_native ( img_send , inplace = True ) else : img_send = array_to_native ( img , inplace = False ) if IS_PY3 and img_send . dtype . char == 'U' : # for python3, we convert unicode to ascii # this will error if the character is not in ascii img_send = img_send . astype ( 'S' , copy = False ) if not numpy . isscalar ( start ) : # convert to scalar offset # note we use the on-disk data type to get itemsize offset = _convert_full_start_to_offset ( dims , start ) else : offset = start # see if we need to resize the image if self . has_data ( ) : self . _expand_if_needed ( dims , img . shape , start , offset ) self . _FITS . write_image ( self . _ext + 1 , img_send , offset + 1 ) self . _update_info ( )
Write the image into this HDU
350
7
234,096
def read ( self , * * keys ) : if not self . has_data ( ) : return None dtype , shape = self . _get_dtype_and_shape ( ) array = numpy . zeros ( shape , dtype = dtype ) self . _FITS . read_image ( self . _ext + 1 , array ) return array
Read the image .
77
4
234,097
def _get_dtype_and_shape ( self ) : npy_dtype = self . _get_image_numpy_dtype ( ) if self . _info [ 'ndims' ] != 0 : shape = self . _info [ 'dims' ] else : raise IOError ( "no image present in HDU" ) return npy_dtype , shape
Get the numpy dtype and shape for image
83
10
234,098
def _get_image_numpy_dtype ( self ) : try : ftype = self . _info [ 'img_equiv_type' ] npy_type = _image_bitpix2npy [ ftype ] except KeyError : raise KeyError ( "unsupported fits data type: %d" % ftype ) return npy_type
Get the numpy dtype for the image
79
9
234,099
def _read_image_slice ( self , arg ) : if 'ndims' not in self . _info : raise ValueError ( "Attempt to slice empty extension" ) if isinstance ( arg , slice ) : # one-dimensional, e.g. 2:20 return self . _read_image_slice ( ( arg , ) ) if not isinstance ( arg , tuple ) : raise ValueError ( "arguments must be slices, one for each " "dimension, e.g. [2:5] or [2:5,8:25] etc." ) # should be a tuple of slices, one for each dimension # e.g. [2:3, 8:100] nd = len ( arg ) if nd != self . _info [ 'ndims' ] : raise ValueError ( "Got slice dimensions %d, " "expected %d" % ( nd , self . _info [ 'ndims' ] ) ) targ = arg arg = [ ] for a in targ : if isinstance ( a , slice ) : arg . append ( a ) elif isinstance ( a , int ) : arg . append ( slice ( a , a + 1 , 1 ) ) else : raise ValueError ( "arguments must be slices, e.g. 2:12" ) dims = self . _info [ 'dims' ] arrdims = [ ] first = [ ] last = [ ] steps = [ ] # check the args and reverse dimensions since # fits is backwards from numpy dim = 0 for slc in arg : start = slc . start stop = slc . stop step = slc . step if start is None : start = 0 if stop is None : stop = dims [ dim ] if step is None : step = 1 if step < 1 : raise ValueError ( "slice steps must be >= 1" ) if start < 0 : start = dims [ dim ] + start if start < 0 : raise IndexError ( "Index out of bounds" ) if stop < 0 : stop = dims [ dim ] + start + 1 # move to 1-offset start = start + 1 if stop < start : raise ValueError ( "python slices but include at least one " "element, got %s" % slc ) if stop > dims [ dim ] : stop = dims [ dim ] first . append ( start ) last . append ( stop ) steps . append ( step ) arrdims . append ( stop - start + 1 ) dim += 1 first . reverse ( ) last . reverse ( ) steps . reverse ( ) first = numpy . array ( first , dtype = 'i8' ) last = numpy . array ( last , dtype = 'i8' ) steps = numpy . array ( steps , dtype = 'i8' ) npy_dtype = self . _get_image_numpy_dtype ( ) array = numpy . zeros ( arrdims , dtype = npy_dtype ) self . _FITS . read_image_slice ( self . _ext + 1 , first , last , steps , array ) return array
workhorse to read a slice
669
6