idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
242,400
def background_at_centroid ( self ) : from scipy . ndimage import map_coordinates if self . _background is not None : # centroid can still be NaN if all data values are <= 0 if ( self . _is_completely_masked or np . any ( ~ np . isfinite ( self . centroid ) ) ) : return np . nan * self . _background_unit # unit for table else : value = map_coordinates ( self . _background , [ [ self . ycentroid . value ] , [ self . xcentroid . value ] ] , order = 1 , mode = 'nearest' ) [ 0 ] return value * self . _background_unit else : return None
The value of the background at the position of the source centroid .
157
14
242,401
def perimeter ( self ) : if self . _is_completely_masked : return np . nan * u . pix # unit for table else : from skimage . measure import perimeter return perimeter ( ~ self . _total_mask , neighbourhood = 4 ) * u . pix
The total perimeter of the source segment approximated lines through the centers of the border pixels using a 4 - connectivity .
59
23
242,402
def inertia_tensor ( self ) : mu = self . moments_central a = mu [ 0 , 2 ] b = - mu [ 1 , 1 ] c = mu [ 2 , 0 ] return np . array ( [ [ a , b ] , [ b , c ] ] ) * u . pix ** 2
The inertia tensor of the source for the rotation around its center of mass .
67
16
242,403
def covariance ( self ) : mu = self . moments_central if mu [ 0 , 0 ] != 0 : m = mu / mu [ 0 , 0 ] covariance = self . _check_covariance ( np . array ( [ [ m [ 0 , 2 ] , m [ 1 , 1 ] ] , [ m [ 1 , 1 ] , m [ 2 , 0 ] ] ] ) ) return covariance * u . pix ** 2 else : return np . empty ( ( 2 , 2 ) ) * np . nan * u . pix ** 2
The covariance matrix of the 2D Gaussian function that has the same second - order moments as the source .
120
23
242,404
def covariance_eigvals ( self ) : if not np . isnan ( np . sum ( self . covariance ) ) : eigvals = np . linalg . eigvals ( self . covariance ) if np . any ( eigvals < 0 ) : # negative variance return ( np . nan , np . nan ) * u . pix ** 2 # pragma: no cover return ( np . max ( eigvals ) , np . min ( eigvals ) ) * u . pix ** 2 else : return ( np . nan , np . nan ) * u . pix ** 2
The two eigenvalues of the covariance matrix in decreasing order .
132
14
242,405
def eccentricity ( self ) : l1 , l2 = self . covariance_eigvals if l1 == 0 : return 0. # pragma: no cover return np . sqrt ( 1. - ( l2 / l1 ) )
The eccentricity of the 2D Gaussian function that has the same second - order moments as the source .
53
22
242,406
def orientation ( self ) : a , b , b , c = self . covariance . flat if a < 0 or c < 0 : # negative variance return np . nan * u . rad # pragma: no cover return 0.5 * np . arctan2 ( 2. * b , ( a - c ) )
The angle in radians between the x axis and the major axis of the 2D Gaussian function that has the same second - order moments as the source . The angle increases in the counter - clockwise direction .
69
43
242,407
def _mesh_values ( data , box_size ) : data = np . ma . asanyarray ( data ) ny , nx = data . shape nyboxes = ny // box_size nxboxes = nx // box_size # include only complete boxes ny_crop = nyboxes * box_size nx_crop = nxboxes * box_size data = data [ 0 : ny_crop , 0 : nx_crop ] # a reshaped 2D masked array with mesh data along the x axis data = np . ma . swapaxes ( data . reshape ( nyboxes , box_size , nxboxes , box_size ) , 1 , 2 ) . reshape ( nyboxes * nxboxes , box_size * box_size ) # include only boxes without any masked pixels idx = np . where ( np . ma . count_masked ( data , axis = 1 ) == 0 ) return data [ idx ]
Extract all the data values in boxes of size box_size .
212
14
242,408
def std_blocksum ( data , block_sizes , mask = None ) : data = np . ma . asanyarray ( data ) if mask is not None and mask is not np . ma . nomask : mask = np . asanyarray ( mask ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data . mask |= mask stds = [ ] block_sizes = np . atleast_1d ( block_sizes ) for block_size in block_sizes : mesh_values = _mesh_values ( data , block_size ) block_sums = np . sum ( mesh_values , axis = 1 ) stds . append ( np . std ( block_sums ) ) return np . array ( stds )
Calculate the standard deviation of block - summed data values at sizes of block_sizes .
176
20
242,409
def nstar ( self , image , star_groups ) : result_tab = Table ( ) for param_tab_name in self . _pars_to_output . keys ( ) : result_tab . add_column ( Column ( name = param_tab_name ) ) unc_tab = Table ( ) for param , isfixed in self . psf_model . fixed . items ( ) : if not isfixed : unc_tab . add_column ( Column ( name = param + "_unc" ) ) y , x = np . indices ( image . shape ) star_groups = star_groups . group_by ( 'group_id' ) for n in range ( len ( star_groups . groups ) ) : group_psf = get_grouped_psf_model ( self . psf_model , star_groups . groups [ n ] , self . _pars_to_set ) usepixel = np . zeros_like ( image , dtype = np . bool ) for row in star_groups . groups [ n ] : usepixel [ overlap_slices ( large_array_shape = image . shape , small_array_shape = self . fitshape , position = ( row [ 'y_0' ] , row [ 'x_0' ] ) , mode = 'trim' ) [ 0 ] ] = True fit_model = self . fitter ( group_psf , x [ usepixel ] , y [ usepixel ] , image [ usepixel ] ) param_table = self . _model_params2table ( fit_model , len ( star_groups . groups [ n ] ) ) result_tab = vstack ( [ result_tab , param_table ] ) if 'param_cov' in self . fitter . fit_info . keys ( ) : unc_tab = vstack ( [ unc_tab , self . _get_uncertainties ( len ( star_groups . groups [ n ] ) ) ] ) try : from astropy . nddata . utils import NoOverlapError except ImportError : raise ImportError ( "astropy 1.1 or greater is required in " "order to use this class." ) # do not subtract if the fitting did not go well try : image = subtract_psf ( image , self . psf_model , param_table , subshape = self . fitshape ) except NoOverlapError : pass if 'param_cov' in self . fitter . fit_info . keys ( ) : result_tab = hstack ( [ result_tab , unc_tab ] ) return result_tab , image
Fit as appropriate a compound or single model to the given star_groups . Groups are fitted sequentially from the smallest to the biggest . In each iteration image is subtracted by the previous fitted group .
565
40
242,410
def _get_uncertainties ( self , star_group_size ) : unc_tab = Table ( ) for param_name in self . psf_model . param_names : if not self . psf_model . fixed [ param_name ] : unc_tab . add_column ( Column ( name = param_name + "_unc" , data = np . empty ( star_group_size ) ) ) if 'param_cov' in self . fitter . fit_info . keys ( ) : if self . fitter . fit_info [ 'param_cov' ] is not None : k = 0 n_fit_params = len ( unc_tab . colnames ) for i in range ( star_group_size ) : unc_tab [ i ] = np . sqrt ( np . diag ( self . fitter . fit_info [ 'param_cov' ] ) ) [ k : k + n_fit_params ] k = k + n_fit_params return unc_tab
Retrieve uncertainties on fitted parameters from the fitter object .
221
12
242,411
def _model_params2table ( self , fit_model , star_group_size ) : param_tab = Table ( ) for param_tab_name in self . _pars_to_output . keys ( ) : param_tab . add_column ( Column ( name = param_tab_name , data = np . empty ( star_group_size ) ) ) if star_group_size > 1 : for i in range ( star_group_size ) : for param_tab_name , param_name in self . _pars_to_output . items ( ) : param_tab [ param_tab_name ] [ i ] = getattr ( fit_model , param_name + '_' + str ( i ) ) . value else : for param_tab_name , param_name in self . _pars_to_output . items ( ) : param_tab [ param_tab_name ] = getattr ( fit_model , param_name ) . value return param_tab
Place fitted parameters into an astropy table .
218
9
242,412
def _do_photometry ( self , param_tab , n_start = 1 ) : output_table = Table ( ) self . _define_fit_param_names ( ) for ( init_parname , fit_parname ) in zip ( self . _pars_to_set . keys ( ) , self . _pars_to_output . keys ( ) ) : output_table . add_column ( Column ( name = init_parname ) ) output_table . add_column ( Column ( name = fit_parname ) ) sources = self . finder ( self . _residual_image ) n = n_start while ( sources is not None and ( self . niters is None or n <= self . niters ) ) : apertures = CircularAperture ( ( sources [ 'xcentroid' ] , sources [ 'ycentroid' ] ) , r = self . aperture_radius ) sources [ 'aperture_flux' ] = aperture_photometry ( self . _residual_image , apertures ) [ 'aperture_sum' ] init_guess_tab = Table ( names = [ 'id' , 'x_0' , 'y_0' , 'flux_0' ] , data = [ sources [ 'id' ] , sources [ 'xcentroid' ] , sources [ 'ycentroid' ] , sources [ 'aperture_flux' ] ] ) for param_tab_name , param_name in self . _pars_to_set . items ( ) : if param_tab_name not in ( [ 'x_0' , 'y_0' , 'flux_0' ] ) : init_guess_tab . add_column ( Column ( name = param_tab_name , data = ( getattr ( self . psf_model , param_name ) * np . ones ( len ( sources ) ) ) ) ) star_groups = self . group_maker ( init_guess_tab ) table , self . _residual_image = super ( ) . nstar ( self . _residual_image , star_groups ) star_groups = star_groups . group_by ( 'group_id' ) table = hstack ( [ star_groups , table ] ) table [ 'iter_detected' ] = n * np . ones ( table [ 'x_fit' ] . shape , dtype = np . int32 ) output_table = vstack ( [ output_table , table ] ) # do not warn if no sources are found beyond the first iteration with warnings . catch_warnings ( ) : warnings . simplefilter ( 'ignore' , NoDetectionsWarning ) sources = self . finder ( self . _residual_image ) n += 1 return output_table
Helper function which performs the iterations of the photometry process .
617
12
242,413
def pixel_scale_angle_at_skycoord ( skycoord , wcs , offset = 1. * u . arcsec ) : # We take a point directly "above" (in latitude) the input position # and convert it to pixel coordinates, then we use the pixel deltas # between the input and offset point to calculate the pixel scale and # angle. # Find the coordinates as a representation object coord = skycoord . represent_as ( 'unitspherical' ) # Add a a small perturbation in the latitude direction (since longitude # is more difficult because it is not directly an angle) coord_new = UnitSphericalRepresentation ( coord . lon , coord . lat + offset ) coord_offset = skycoord . realize_frame ( coord_new ) # Find pixel coordinates of offset coordinates and pixel deltas x_offset , y_offset = skycoord_to_pixel ( coord_offset , wcs , mode = 'all' ) x , y = skycoord_to_pixel ( skycoord , wcs , mode = 'all' ) dx = x_offset - x dy = y_offset - y scale = offset . to ( u . arcsec ) / ( np . hypot ( dx , dy ) * u . pixel ) angle = ( np . arctan2 ( dy , dx ) * u . radian ) . to ( u . deg ) return scale , angle
Calculate the pixel scale and WCS rotation angle at the position of a SkyCoord coordinate .
298
20
242,414
def pixel_to_icrs_coords ( x , y , wcs ) : icrs_coords = pixel_to_skycoord ( x , y , wcs ) . icrs icrs_ra = icrs_coords . ra . degree * u . deg icrs_dec = icrs_coords . dec . degree * u . deg return icrs_ra , icrs_dec
Convert pixel coordinates to ICRS Right Ascension and Declination .
88
13
242,415
def filter_data ( data , kernel , mode = 'constant' , fill_value = 0.0 , check_normalization = False ) : from scipy import ndimage if kernel is not None : if isinstance ( kernel , Kernel2D ) : kernel_array = kernel . array else : kernel_array = kernel if check_normalization : if not np . allclose ( np . sum ( kernel_array ) , 1.0 ) : warnings . warn ( 'The kernel is not normalized.' , AstropyUserWarning ) # NOTE: astropy.convolution.convolve fails with zero-sum # kernels (used in findstars) (cf. astropy #1647) # NOTE: if data is int and kernel is float, ndimage.convolve # will return an int image - here we make the data float so # that a float image is always returned return ndimage . convolve ( data . astype ( float ) , kernel_array , mode = mode , cval = fill_value ) else : return data
Convolve a 2D image with a 2D kernel .
223
13
242,416
def prepare_psf_model ( psfmodel , xname = None , yname = None , fluxname = None , renormalize_psf = True ) : if xname is None : xinmod = models . Shift ( 0 , name = 'x_offset' ) xname = 'offset_0' else : xinmod = models . Identity ( 1 ) xname = xname + '_2' xinmod . fittable = True if yname is None : yinmod = models . Shift ( 0 , name = 'y_offset' ) yname = 'offset_1' else : yinmod = models . Identity ( 1 ) yname = yname + '_2' yinmod . fittable = True outmod = ( xinmod & yinmod ) | psfmodel if fluxname is None : outmod = outmod * models . Const2D ( 1 , name = 'flux_scaling' ) fluxname = 'amplitude_3' else : fluxname = fluxname + '_2' if renormalize_psf : # we do the import here because other machinery works w/o scipy from scipy import integrate integrand = integrate . dblquad ( psfmodel , - np . inf , np . inf , lambda x : - np . inf , lambda x : np . inf ) [ 0 ] normmod = models . Const2D ( 1. / integrand , name = 'renormalize_scaling' ) outmod = outmod * normmod # final setup of the output model - fix all the non-offset/scale # parameters for pnm in outmod . param_names : outmod . fixed [ pnm ] = pnm not in ( xname , yname , fluxname ) # and set the names so that BasicPSFPhotometry knows what to do outmod . xname = xname outmod . yname = yname outmod . fluxname = fluxname # now some convenience aliases if reasonable outmod . psfmodel = outmod [ 2 ] if 'x_0' not in outmod . param_names and 'y_0' not in outmod . param_names : outmod . x_0 = getattr ( outmod , xname ) outmod . y_0 = getattr ( outmod , yname ) if 'flux' not in outmod . param_names : outmod . flux = getattr ( outmod , fluxname ) return outmod
Convert a 2D PSF model to one suitable for use with BasicPSFPhotometry or its subclasses .
536
24
242,417
def get_grouped_psf_model ( template_psf_model , star_group , pars_to_set ) : group_psf = None for star in star_group : psf_to_add = template_psf_model . copy ( ) for param_tab_name , param_name in pars_to_set . items ( ) : setattr ( psf_to_add , param_name , star [ param_tab_name ] ) if group_psf is None : # this is the first one only group_psf = psf_to_add else : group_psf += psf_to_add return group_psf
Construct a joint PSF model which consists of a sum of PSF s templated on a specific model but whose parameters are given by a table of objects .
147
33
242,418
def _call_fitter ( fitter , psf , x , y , data , weights ) : if np . all ( weights == 1. ) : return fitter ( psf , x , y , data ) else : return fitter ( psf , x , y , data , weights = weights )
Not all fitters have to support a weight array . This function includes the weight in the fitter call only if really needed .
65
26
242,419
def detect_threshold ( data , snr , background = None , error = None , mask = None , mask_value = None , sigclip_sigma = 3.0 , sigclip_iters = None ) : if background is None or error is None : if astropy_version < '3.1' : data_mean , data_median , data_std = sigma_clipped_stats ( data , mask = mask , mask_value = mask_value , sigma = sigclip_sigma , iters = sigclip_iters ) else : data_mean , data_median , data_std = sigma_clipped_stats ( data , mask = mask , mask_value = mask_value , sigma = sigclip_sigma , maxiters = sigclip_iters ) bkgrd_image = np . zeros_like ( data ) + data_mean bkgrdrms_image = np . zeros_like ( data ) + data_std if background is None : background = bkgrd_image else : if np . isscalar ( background ) : background = np . zeros_like ( data ) + background else : if background . shape != data . shape : raise ValueError ( 'If input background is 2D, then it ' 'must have the same shape as the input ' 'data.' ) if error is None : error = bkgrdrms_image else : if np . isscalar ( error ) : error = np . zeros_like ( data ) + error else : if error . shape != data . shape : raise ValueError ( 'If input error is 2D, then it ' 'must have the same shape as the input ' 'data.' ) return background + ( error * snr )
Calculate a pixel - wise threshold image that can be used to detect sources .
385
17
242,420
def run_cmd ( cmd ) : try : p = sp . Popen ( cmd , stdout = sp . PIPE , stderr = sp . PIPE ) # XXX: May block if either stdout or stderr fill their buffers; # however for the commands this is currently used for that is # unlikely (they should have very brief output) stdout , stderr = p . communicate ( ) except OSError as e : if DEBUG : raise if e . errno == errno . ENOENT : msg = 'Command not found: `{0}`' . format ( ' ' . join ( cmd ) ) raise _CommandNotFound ( msg , cmd ) else : raise _AHBootstrapSystemExit ( 'An unexpected error occurred when running the ' '`{0}` command:\n{1}' . format ( ' ' . join ( cmd ) , str ( e ) ) ) # Can fail of the default locale is not configured properly. See # https://github.com/astropy/astropy/issues/2749. For the purposes under # consideration 'latin1' is an acceptable fallback. try : stdio_encoding = locale . getdefaultlocale ( ) [ 1 ] or 'latin1' except ValueError : # Due to an OSX oddity locale.getdefaultlocale() can also crash # depending on the user's locale/language settings. See: # http://bugs.python.org/issue18378 stdio_encoding = 'latin1' # Unlikely to fail at this point but even then let's be flexible if not isinstance ( stdout , str ) : stdout = stdout . decode ( stdio_encoding , 'replace' ) if not isinstance ( stderr , str ) : stderr = stderr . decode ( stdio_encoding , 'replace' ) return ( p . returncode , stdout , stderr )
Run a command in a subprocess given as a list of command - line arguments .
418
17
242,421
def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyEllipticalAperture ( * * sky_params )
Convert the aperture to a SkyEllipticalAperture object defined in celestial coordinates .
51
18
242,422
def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyEllipticalAnnulus ( * * sky_params )
Convert the aperture to a SkyEllipticalAnnulus object defined in celestial coordinates .
51
18
242,423
def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return EllipticalAperture ( * * pixel_params )
Convert the aperture to an EllipticalAperture object defined in pixel coordinates .
50
17
242,424
def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return EllipticalAnnulus ( * * pixel_params )
Convert the aperture to an EllipticalAnnulus object defined in pixel coordinates .
50
17
242,425
def _area ( sma , eps , phi , r ) : aux = r * math . cos ( phi ) / sma signal = aux / abs ( aux ) if abs ( aux ) >= 1. : aux = signal return abs ( sma ** 2 * ( 1. - eps ) / 2. * math . acos ( aux ) )
Compute elliptical sector area .
77
7
242,426
def find_center ( self , image , threshold = 0.1 , verbose = True ) : self . _centerer_mask_half_size = len ( IN_MASK ) / 2 self . centerer_threshold = threshold # number of pixels in each mask sz = len ( IN_MASK ) self . _centerer_ones_in = np . ma . masked_array ( np . ones ( shape = ( sz , sz ) ) , mask = IN_MASK ) self . _centerer_ones_out = np . ma . masked_array ( np . ones ( shape = ( sz , sz ) ) , mask = OUT_MASK ) self . _centerer_in_mask_npix = np . sum ( self . _centerer_ones_in ) self . _centerer_out_mask_npix = np . sum ( self . _centerer_ones_out ) # Check if center coordinates point to somewhere inside the frame. # If not, set then to frame center. shape = image . shape _x0 = self . x0 _y0 = self . y0 if ( _x0 is None or _x0 < 0 or _x0 >= shape [ 1 ] or _y0 is None or _y0 < 0 or _y0 >= shape [ 0 ] ) : _x0 = shape [ 1 ] / 2 _y0 = shape [ 0 ] / 2 max_fom = 0. max_i = 0 max_j = 0 # scan all positions inside window window_half_size = 5 for i in range ( int ( _x0 - window_half_size ) , int ( _x0 + window_half_size ) + 1 ) : for j in range ( int ( _y0 - window_half_size ) , int ( _y0 + window_half_size ) + 1 ) : # ensure that it stays inside image frame i1 = int ( max ( 0 , i - self . _centerer_mask_half_size ) ) j1 = int ( max ( 0 , j - self . _centerer_mask_half_size ) ) i2 = int ( min ( shape [ 1 ] - 1 , i + self . _centerer_mask_half_size ) ) j2 = int ( min ( shape [ 0 ] - 1 , j + self . _centerer_mask_half_size ) ) window = image [ j1 : j2 , i1 : i2 ] # averages in inner and outer regions. inner = np . ma . masked_array ( window , mask = IN_MASK ) outer = np . ma . masked_array ( window , mask = OUT_MASK ) inner_avg = np . sum ( inner ) / self . _centerer_in_mask_npix outer_avg = np . sum ( outer ) / self . _centerer_out_mask_npix # standard deviation and figure of merit inner_std = np . std ( inner ) outer_std = np . std ( outer ) stddev = np . sqrt ( inner_std ** 2 + outer_std ** 2 ) fom = ( inner_avg - outer_avg ) / stddev if fom > max_fom : max_fom = fom max_i = i max_j = j # figure of merit > threshold: update geometry with new coordinates. if max_fom > threshold : self . x0 = float ( max_i ) self . y0 = float ( max_j ) if verbose : log . info ( "Found center at x0 = {0:5.1f}, y0 = {1:5.1f}" . format ( self . x0 , self . y0 ) ) else : if verbose : log . info ( 'Result is below the threshold -- keeping the ' 'original coordinates.' )
Find the center of a galaxy .
840
7
242,427
def radius ( self , angle ) : return ( self . sma * ( 1. - self . eps ) / np . sqrt ( ( ( 1. - self . eps ) * np . cos ( angle ) ) ** 2 + ( np . sin ( angle ) ) ** 2 ) )
Calculate the polar radius for a given polar angle .
63
12
242,428
def initialize_sector_geometry ( self , phi ) : # These polar radii bound the region between the inner # and outer ellipses that define the sector. sma1 , sma2 = self . bounding_ellipses ( ) eps_ = 1. - self . eps # polar vector at one side of the elliptical sector self . _phi1 = phi - self . sector_angular_width / 2. r1 = ( sma1 * eps_ / math . sqrt ( ( eps_ * math . cos ( self . _phi1 ) ) ** 2 + ( math . sin ( self . _phi1 ) ) ** 2 ) ) r2 = ( sma2 * eps_ / math . sqrt ( ( eps_ * math . cos ( self . _phi1 ) ) ** 2 + ( math . sin ( self . _phi1 ) ) ** 2 ) ) # polar vector at the other side of the elliptical sector self . _phi2 = phi + self . sector_angular_width / 2. r3 = ( sma2 * eps_ / math . sqrt ( ( eps_ * math . cos ( self . _phi2 ) ) ** 2 + ( math . sin ( self . _phi2 ) ) ** 2 ) ) r4 = ( sma1 * eps_ / math . sqrt ( ( eps_ * math . cos ( self . _phi2 ) ) ** 2 + ( math . sin ( self . _phi2 ) ) ** 2 ) ) # sector area sa1 = _area ( sma1 , self . eps , self . _phi1 , r1 ) sa2 = _area ( sma2 , self . eps , self . _phi1 , r2 ) sa3 = _area ( sma2 , self . eps , self . _phi2 , r3 ) sa4 = _area ( sma1 , self . eps , self . _phi2 , r4 ) self . sector_area = abs ( ( sa3 - sa2 ) - ( sa4 - sa1 ) ) # angular width of sector. It is calculated such that the sectors # come out with roughly constant area along the ellipse. self . sector_angular_width = max ( min ( ( self . _area_factor / ( r3 - r4 ) / r4 ) , self . _phi_max ) , self . _phi_min ) # compute the 4 vertices that define the elliptical sector. vertex_x = np . zeros ( shape = 4 , dtype = float ) vertex_y = np . zeros ( shape = 4 , dtype = float ) # vertices are labelled in counterclockwise sequence vertex_x [ 0 : 2 ] = np . array ( [ r1 , r2 ] ) * math . cos ( self . _phi1 + self . pa ) vertex_x [ 2 : 4 ] = np . array ( [ r4 , r3 ] ) * math . cos ( self . _phi2 + self . pa ) vertex_y [ 0 : 2 ] = np . array ( [ r1 , r2 ] ) * math . sin ( self . _phi1 + self . pa ) vertex_y [ 2 : 4 ] = np . array ( [ r4 , r3 ] ) * math . sin ( self . _phi2 + self . pa ) vertex_x += self . x0 vertex_y += self . y0 return vertex_x , vertex_y
Initialize geometry attributes associated with an elliptical sector at the given polar angle phi .
757
18
242,429
def bounding_ellipses ( self ) : if ( self . linear_growth ) : a1 = self . sma - self . astep / 2. a2 = self . sma + self . astep / 2. else : a1 = self . sma * ( 1. - self . astep / 2. ) a2 = self . sma * ( 1. + self . astep / 2. ) return a1 , a2
Compute the semimajor axis of the two ellipses that bound the annulus where integrations take place .
98
24
242,430
def update_sma ( self , step ) : if self . linear_growth : sma = self . sma + step else : sma = self . sma * ( 1. + step ) return sma
Calculate an updated value for the semimajor axis given the current value and the step value .
46
21
242,431
def reset_sma ( self , step ) : if self . linear_growth : sma = self . sma - step step = - step else : aux = 1. / ( 1. + step ) sma = self . sma * aux step = aux - 1. return sma , step
Change the direction of semimajor axis growth from outwards to inwards .
64
16
242,432
def resize_psf ( psf , input_pixel_scale , output_pixel_scale , order = 3 ) : from scipy . ndimage import zoom ratio = input_pixel_scale / output_pixel_scale return zoom ( psf , ratio , order = order ) / ratio ** 2
Resize a PSF using spline interpolation of the requested order .
65
15
242,433
def _select_meshes ( self , data ) : # the number of masked pixels in each mesh nmasked = np . ma . count_masked ( data , axis = 1 ) # meshes that contain more than ``exclude_percentile`` percent # masked pixels are excluded: # - for exclude_percentile=0, good meshes will be only where # nmasked=0 # - meshes where nmasked=self.box_npixels are *always* excluded # (second conditional needed for exclude_percentile=100) threshold_npixels = self . exclude_percentile / 100. * self . box_npixels mesh_idx = np . where ( ( nmasked <= threshold_npixels ) & ( nmasked != self . box_npixels ) ) [ 0 ] # good meshes if len ( mesh_idx ) == 0 : raise ValueError ( 'All meshes contain > {0} ({1} percent per ' 'mesh) masked pixels. Please check your data ' 'or decrease "exclude_percentile".' . format ( threshold_npixels , self . exclude_percentile ) ) return mesh_idx
Define the x and y indices with respect to the low - resolution mesh image of the meshes to use for the background interpolation .
247
27
242,434
def _prepare_data ( self ) : self . nyboxes = self . data . shape [ 0 ] // self . box_size [ 0 ] self . nxboxes = self . data . shape [ 1 ] // self . box_size [ 1 ] yextra = self . data . shape [ 0 ] % self . box_size [ 0 ] xextra = self . data . shape [ 1 ] % self . box_size [ 1 ] if ( xextra + yextra ) == 0 : # no resizing of the data is necessary data_ma = np . ma . masked_array ( self . data , mask = self . mask ) else : # pad or crop the data if self . edge_method == 'pad' : data_ma = self . _pad_data ( yextra , xextra ) self . nyboxes = data_ma . shape [ 0 ] // self . box_size [ 0 ] self . nxboxes = data_ma . shape [ 1 ] // self . box_size [ 1 ] elif self . edge_method == 'crop' : data_ma = self . _crop_data ( ) else : raise ValueError ( 'edge_method must be "pad" or "crop"' ) self . nboxes = self . nxboxes * self . nyboxes # a reshaped 2D masked array with mesh data along the x axis mesh_data = np . ma . swapaxes ( data_ma . reshape ( self . nyboxes , self . box_size [ 0 ] , self . nxboxes , self . box_size [ 1 ] ) , 1 , 2 ) . reshape ( self . nyboxes * self . nxboxes , self . box_npixels ) # first cut on rejecting meshes self . mesh_idx = self . _select_meshes ( mesh_data ) self . _mesh_data = mesh_data [ self . mesh_idx , : ] return
Prepare the data .
419
5
242,435
def _make_2d_array ( self , data ) : if data . shape != self . mesh_idx . shape : raise ValueError ( 'data and mesh_idx must have the same shape' ) if np . ma . is_masked ( data ) : raise ValueError ( 'data must not be a masked array' ) data2d = np . zeros ( self . _mesh_shape ) . astype ( data . dtype ) data2d [ self . mesh_yidx , self . mesh_xidx ] = data if len ( self . mesh_idx ) == self . nboxes : # no meshes were masked return data2d else : # some meshes were masked mask2d = np . ones ( data2d . shape ) . astype ( np . bool ) mask2d [ self . mesh_yidx , self . mesh_xidx ] = False return np . ma . masked_array ( data2d , mask = mask2d )
Convert a 1D array of mesh values to a masked 2D mesh array given the 1D mesh indices mesh_idx .
216
27
242,436
def _interpolate_meshes ( self , data , n_neighbors = 10 , eps = 0. , power = 1. , reg = 0. ) : yx = np . column_stack ( [ self . mesh_yidx , self . mesh_xidx ] ) coords = np . array ( list ( product ( range ( self . nyboxes ) , range ( self . nxboxes ) ) ) ) f = ShepardIDWInterpolator ( yx , data ) img1d = f ( coords , n_neighbors = n_neighbors , power = power , eps = eps , reg = reg ) return img1d . reshape ( self . _mesh_shape )
Use IDW interpolation to fill in any masked pixels in the low - resolution 2D mesh background and background RMS images .
161
26
242,437
def _selective_filter ( self , data , indices ) : data_out = np . copy ( data ) for i , j in zip ( * indices ) : yfs , xfs = self . filter_size hyfs , hxfs = yfs // 2 , xfs // 2 y0 , y1 = max ( i - hyfs , 0 ) , min ( i - hyfs + yfs , data . shape [ 0 ] ) x0 , x1 = max ( j - hxfs , 0 ) , min ( j - hxfs + xfs , data . shape [ 1 ] ) data_out [ i , j ] = np . median ( data [ y0 : y1 , x0 : x1 ] ) return data_out
Selectively filter only pixels above filter_threshold in the background mesh .
162
15
242,438
def _filter_meshes ( self ) : from scipy . ndimage import generic_filter try : nanmedian_func = np . nanmedian # numpy >= 1.9 except AttributeError : # pragma: no cover from scipy . stats import nanmedian nanmedian_func = nanmedian if self . filter_threshold is None : # filter the entire arrays self . background_mesh = generic_filter ( self . background_mesh , nanmedian_func , size = self . filter_size , mode = 'constant' , cval = np . nan ) self . background_rms_mesh = generic_filter ( self . background_rms_mesh , nanmedian_func , size = self . filter_size , mode = 'constant' , cval = np . nan ) else : # selectively filter indices = np . nonzero ( self . background_mesh > self . filter_threshold ) self . background_mesh = self . _selective_filter ( self . background_mesh , indices ) self . background_rms_mesh = self . _selective_filter ( self . background_rms_mesh , indices ) return
Apply a 2D median filter to the low - resolution 2D mesh including only pixels inside the image at the borders .
266
24
242,439
def _calc_bkg_bkgrms ( self ) : if self . sigma_clip is not None : data_sigclip = self . sigma_clip ( self . _mesh_data , axis = 1 ) else : data_sigclip = self . _mesh_data del self . _mesh_data # preform mesh rejection on sigma-clipped data (i.e. for any # newly-masked pixels) idx = self . _select_meshes ( data_sigclip ) self . mesh_idx = self . mesh_idx [ idx ] # indices for the output mesh self . _data_sigclip = data_sigclip [ idx ] # always a 2D masked array self . _mesh_shape = ( self . nyboxes , self . nxboxes ) self . mesh_yidx , self . mesh_xidx = np . unravel_index ( self . mesh_idx , self . _mesh_shape ) # These properties are needed later to calculate # background_mesh_ma and background_rms_mesh_ma. Note that _bkg1d # and _bkgrms1d are masked arrays, but the mask should always be # False. self . _bkg1d = self . bkg_estimator ( self . _data_sigclip , axis = 1 ) self . _bkgrms1d = self . bkgrms_estimator ( self . _data_sigclip , axis = 1 ) # make the unfiltered 2D mesh arrays (these are not masked) if len ( self . _bkg1d ) == self . nboxes : bkg = self . _make_2d_array ( self . _bkg1d ) bkgrms = self . _make_2d_array ( self . _bkgrms1d ) else : bkg = self . _interpolate_meshes ( self . _bkg1d ) bkgrms = self . _interpolate_meshes ( self . _bkgrms1d ) self . _background_mesh_unfiltered = bkg self . _background_rms_mesh_unfiltered = bkgrms self . background_mesh = bkg self . background_rms_mesh = bkgrms # filter the 2D mesh arrays if not np . array_equal ( self . filter_size , [ 1 , 1 ] ) : self . _filter_meshes ( ) return
Calculate the background and background RMS estimate in each of the meshes .
564
16
242,440
def _calc_coordinates ( self ) : # the position coordinates used to initialize an interpolation self . y = ( self . mesh_yidx * self . box_size [ 0 ] + ( self . box_size [ 0 ] - 1 ) / 2. ) self . x = ( self . mesh_xidx * self . box_size [ 1 ] + ( self . box_size [ 1 ] - 1 ) / 2. ) self . yx = np . column_stack ( [ self . y , self . x ] ) # the position coordinates used when calling an interpolator nx , ny = self . data . shape self . data_coords = np . array ( list ( product ( range ( ny ) , range ( nx ) ) ) )
Calculate the coordinates to use when calling an interpolator .
168
13
242,441
def plot_meshes ( self , ax = None , marker = '+' , color = 'blue' , outlines = False , * * kwargs ) : import matplotlib . pyplot as plt kwargs [ 'color' ] = color if ax is None : ax = plt . gca ( ) ax . scatter ( self . x , self . y , marker = marker , color = color ) if outlines : from . . aperture import RectangularAperture xy = np . column_stack ( [ self . x , self . y ] ) apers = RectangularAperture ( xy , self . box_size [ 1 ] , self . box_size [ 0 ] , 0. ) apers . plot ( ax = ax , * * kwargs ) return
Plot the low - resolution mesh boxes on a matplotlib Axes instance .
168
16
242,442
def extract ( self ) : # the sample values themselves are kept cached to prevent # multiple calls to the integrator code. if self . values is not None : return self . values else : s = self . _extract ( ) self . values = s return s
Extract sample data by scanning an elliptical path over the image array .
55
15
242,443
def update ( self ) : step = self . geometry . astep # Update the mean value first, using extraction from main sample. s = self . extract ( ) self . mean = np . mean ( s [ 2 ] ) # Get sample with same geometry but at a different distance from # center. Estimate gradient from there. gradient , gradient_error = self . _get_gradient ( step ) # Check for meaningful gradient. If no meaningful gradient, try # another sample, this time using larger radius. Meaningful # gradient means something shallower, but still close to within # a factor 3 from previous gradient estimate. If no previous # estimate is available, guess it. previous_gradient = self . gradient if not previous_gradient : previous_gradient = - 0.05 # good enough, based on usage if gradient >= ( previous_gradient / 3. ) : # gradient is negative! gradient , gradient_error = self . _get_gradient ( 2 * step ) # If still no meaningful gradient can be measured, try with # previous one, slightly shallower. A factor 0.8 is not too far # from what is expected from geometrical sampling steps of 10-20% # and a deVaucouleurs law or an exponential disk (at least at its # inner parts, r <~ 5 req). Gradient error is meaningless in this # case. if gradient >= ( previous_gradient / 3. ) : gradient = previous_gradient * 0.8 gradient_error = None self . gradient = gradient self . gradient_error = gradient_error if gradient_error : self . gradient_relative_error = gradient_error / np . abs ( gradient ) else : self . gradient_relative_error = None
Update this ~photutils . isophote . EllipseSample instance .
359
16
242,444
def _extract_stars ( data , catalog , size = ( 11 , 11 ) , use_xy = True ) : colnames = catalog . colnames if ( 'x' not in colnames or 'y' not in colnames ) or not use_xy : xcenters , ycenters = skycoord_to_pixel ( catalog [ 'skycoord' ] , data . wcs , origin = 0 , mode = 'all' ) else : xcenters = catalog [ 'x' ] . data . astype ( np . float ) ycenters = catalog [ 'y' ] . data . astype ( np . float ) if 'id' in colnames : ids = catalog [ 'id' ] else : ids = np . arange ( len ( catalog ) , dtype = np . int ) + 1 if data . uncertainty is None : weights = np . ones_like ( data . data ) else : if data . uncertainty . uncertainty_type == 'weights' : weights = np . asanyarray ( data . uncertainty . array , dtype = np . float ) else : warnings . warn ( 'The data uncertainty attribute has an unsupported ' 'type. Only uncertainty_type="weights" can be ' 'used to set weights. Weights will be set to 1.' , AstropyUserWarning ) weights = np . ones_like ( data . data ) if data . mask is not None : weights [ data . mask ] = 0. stars = [ ] for xcenter , ycenter , obj_id in zip ( xcenters , ycenters , ids ) : try : large_slc , small_slc = overlap_slices ( data . data . shape , size , ( ycenter , xcenter ) , mode = 'strict' ) data_cutout = data . data [ large_slc ] weights_cutout = weights [ large_slc ] except ( PartialOverlapError , NoOverlapError ) : stars . append ( None ) continue origin = ( large_slc [ 1 ] . start , large_slc [ 0 ] . start ) cutout_center = ( xcenter - origin [ 0 ] , ycenter - origin [ 1 ] ) star = EPSFStar ( data_cutout , weights_cutout , cutout_center = cutout_center , origin = origin , wcs_large = data . wcs , id_label = obj_id ) stars . append ( star ) return stars
Extract cutout images from a single image centered on stars defined in the single input catalog .
527
19
242,445
def estimate_flux ( self ) : from . epsf import _interpolate_missing_data if np . any ( self . mask ) : data_interp = _interpolate_missing_data ( self . data , method = 'cubic' , mask = self . mask ) data_interp = _interpolate_missing_data ( data_interp , method = 'nearest' , mask = self . mask ) flux = np . sum ( data_interp , dtype = np . float64 ) else : flux = np . sum ( self . data , dtype = np . float64 ) return flux
Estimate the star s flux by summing values in the input cutout array .
138
17
242,446
def _xy_idx ( self ) : yidx , xidx = np . indices ( self . _data . shape ) return xidx [ ~ self . mask ] . ravel ( ) , yidx [ ~ self . mask ] . ravel ( )
1D arrays of x and y indices of unmasked pixels in the cutout reference frame .
59
20
242,447
def find_group ( self , star , starlist ) : star_distance = np . hypot ( star [ 'x_0' ] - starlist [ 'x_0' ] , star [ 'y_0' ] - starlist [ 'y_0' ] ) distance_criteria = star_distance < self . crit_separation return np . asarray ( starlist [ distance_criteria ] [ 'id' ] )
Find the ids of those stars in starlist which are at a distance less than crit_separation from star .
94
24
242,448
def _from_float ( cls , xmin , xmax , ymin , ymax ) : ixmin = int ( np . floor ( xmin + 0.5 ) ) ixmax = int ( np . ceil ( xmax + 0.5 ) ) iymin = int ( np . floor ( ymin + 0.5 ) ) iymax = int ( np . ceil ( ymax + 0.5 ) ) return cls ( ixmin , ixmax , iymin , iymax )
Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values .
116
18
242,449
def slices ( self ) : return ( slice ( self . iymin , self . iymax ) , slice ( self . ixmin , self . ixmax ) )
The bounding box as a tuple of slice objects .
38
11
242,450
def as_patch ( self , * * kwargs ) : from matplotlib . patches import Rectangle return Rectangle ( xy = ( self . extent [ 0 ] , self . extent [ 2 ] ) , width = self . shape [ 1 ] , height = self . shape [ 0 ] , * * kwargs )
Return a matplotlib . patches . Rectangle that represents the bounding box .
70
17
242,451
def to_aperture ( self ) : from . rectangle import RectangularAperture xpos = ( self . extent [ 1 ] + self . extent [ 0 ] ) / 2. ypos = ( self . extent [ 3 ] + self . extent [ 2 ] ) / 2. xypos = ( xpos , ypos ) h , w = self . shape return RectangularAperture ( xypos , w = w , h = h , theta = 0. )
Return a ~photutils . aperture . RectangularAperture that represents the bounding box .
102
19
242,452
def plot ( self , origin = ( 0 , 0 ) , ax = None , fill = False , * * kwargs ) : aper = self . to_aperture ( ) aper . plot ( origin = origin , ax = ax , fill = fill , * * kwargs )
Plot the BoundingBox on a matplotlib ~matplotlib . axes . Axes instance .
63
21
242,453
def _find_stars ( data , kernel , threshold_eff , min_separation = None , mask = None , exclude_border = False ) : convolved_data = filter_data ( data , kernel . data , mode = 'constant' , fill_value = 0.0 , check_normalization = False ) # define a local footprint for the peak finder if min_separation is None : # daofind footprint = kernel . mask . astype ( np . bool ) else : # define a circular footprint idx = np . arange ( - min_separation , min_separation + 1 ) xx , yy = np . meshgrid ( idx , idx ) footprint = np . array ( ( xx ** 2 + yy ** 2 ) <= min_separation ** 2 , dtype = int ) # pad the data and convolved image by the kernel x/y radius to allow # for detections near the edges if not exclude_border : ypad = kernel . yradius xpad = kernel . xradius pad = ( ( ypad , ypad ) , ( xpad , xpad ) ) # mode must be a string for numpy < 0.11 # (see https://github.com/numpy/numpy/issues/7112) mode = str ( 'constant' ) data = np . pad ( data , pad , mode = mode , constant_values = [ 0. ] ) if mask is not None : mask = np . pad ( mask , pad , mode = mode , constant_values = [ 0. ] ) convolved_data = np . pad ( convolved_data , pad , mode = mode , constant_values = [ 0. ] ) # find local peaks in the convolved data with warnings . catch_warnings ( ) : # suppress any NoDetectionsWarning from find_peaks warnings . filterwarnings ( 'ignore' , category = NoDetectionsWarning ) tbl = find_peaks ( convolved_data , threshold_eff , footprint = footprint , mask = mask ) if tbl is None : return None coords = np . transpose ( [ tbl [ 'y_peak' ] , tbl [ 'x_peak' ] ] ) star_cutouts = [ ] for ( ypeak , xpeak ) in coords : # now extract the object from the data, centered on the peak # pixel in the convolved image, with the same size as the kernel x0 = xpeak - kernel . xradius x1 = xpeak + kernel . xradius + 1 y0 = ypeak - kernel . yradius y1 = ypeak + kernel . yradius + 1 if x0 < 0 or x1 > data . shape [ 1 ] : continue # pragma: no cover if y0 < 0 or y1 > data . shape [ 0 ] : continue # pragma: no cover slices = ( slice ( y0 , y1 ) , slice ( x0 , x1 ) ) data_cutout = data [ slices ] convdata_cutout = convolved_data [ slices ] # correct pixel values for the previous image padding if not exclude_border : x0 -= kernel . xradius x1 -= kernel . xradius y0 -= kernel . yradius y1 -= kernel . yradius xpeak -= kernel . xradius ypeak -= kernel . yradius slices = ( slice ( y0 , y1 ) , slice ( x0 , x1 ) ) star_cutouts . append ( _StarCutout ( data_cutout , convdata_cutout , slices , xpeak , ypeak , kernel , threshold_eff ) ) return star_cutouts
Find stars in an image .
775
6
242,454
def roundness2 ( self ) : if np . isnan ( self . hx ) or np . isnan ( self . hy ) : return np . nan else : return 2.0 * ( self . hx - self . hy ) / ( self . hx + self . hy )
The star roundness .
62
5
242,455
def detect_sources ( data , threshold , npixels , filter_kernel = None , connectivity = 8 , mask = None ) : from scipy import ndimage if ( npixels <= 0 ) or ( int ( npixels ) != npixels ) : raise ValueError ( 'npixels must be a positive integer, got ' '"{0}"' . format ( npixels ) ) image = ( filter_data ( data , filter_kernel , mode = 'constant' , fill_value = 0.0 , check_normalization = True ) > threshold ) if mask is not None : if mask . shape != image . shape : raise ValueError ( 'mask must have the same shape as the input ' 'image.' ) image &= ~ mask if connectivity == 4 : selem = ndimage . generate_binary_structure ( 2 , 1 ) elif connectivity == 8 : selem = ndimage . generate_binary_structure ( 2 , 2 ) else : raise ValueError ( 'Invalid connectivity={0}. ' 'Options are 4 or 8' . format ( connectivity ) ) segm_img , nobj = ndimage . label ( image , structure = selem ) # remove objects with less than npixels # NOTE: for typical data, making the cutout images is ~10x faster # than using segm_img directly segm_slices = ndimage . find_objects ( segm_img ) for i , slices in enumerate ( segm_slices ) : cutout = segm_img [ slices ] segment_mask = ( cutout == ( i + 1 ) ) if np . count_nonzero ( segment_mask ) < npixels : cutout [ segment_mask ] = 0 # now relabel to make consecutive label indices segm_img , nobj = ndimage . label ( segm_img , structure = selem ) if nobj == 0 : warnings . warn ( 'No sources were found.' , NoDetectionsWarning ) return None else : return SegmentationImage ( segm_img )
Detect sources above a specified threshold value in an image and return a ~photutils . segmentation . SegmentationImage object .
442
26
242,456
def make_source_mask ( data , snr , npixels , mask = None , mask_value = None , filter_fwhm = None , filter_size = 3 , filter_kernel = None , sigclip_sigma = 3.0 , sigclip_iters = 5 , dilate_size = 11 ) : from scipy import ndimage threshold = detect_threshold ( data , snr , background = None , error = None , mask = mask , mask_value = None , sigclip_sigma = sigclip_sigma , sigclip_iters = sigclip_iters ) kernel = None if filter_kernel is not None : kernel = filter_kernel if filter_fwhm is not None : sigma = filter_fwhm * gaussian_fwhm_to_sigma kernel = Gaussian2DKernel ( sigma , x_size = filter_size , y_size = filter_size ) if kernel is not None : kernel . normalize ( ) segm = detect_sources ( data , threshold , npixels , filter_kernel = kernel ) selem = np . ones ( ( dilate_size , dilate_size ) ) return ndimage . binary_dilation ( segm . data . astype ( np . bool ) , selem )
Make a source mask using source segmentation and binary dilation .
284
13
242,457
def data_ma ( self ) : mask = ( self . _segment_img [ self . slices ] != self . label ) return np . ma . masked_array ( self . _segment_img [ self . slices ] , mask = mask )
A 2D ~numpy . ma . MaskedArray cutout image of the segment using the minimal bounding box .
54
25
242,458
def _reset_lazy_properties ( self ) : for key , value in self . __class__ . __dict__ . items ( ) : if isinstance ( value , lazyproperty ) : self . __dict__ . pop ( key , None )
Reset all lazy properties .
53
6
242,459
def segments ( self ) : segments = [ ] for label , slc in zip ( self . labels , self . slices ) : segments . append ( Segment ( self . data , label , slc , self . get_area ( label ) ) ) return segments
A list of Segment objects .
55
7
242,460
def get_index ( self , label ) : self . check_labels ( label ) return np . searchsorted ( self . labels , label )
Find the index of the input label .
32
8
242,461
def get_indices ( self , labels ) : self . check_labels ( labels ) return np . searchsorted ( self . labels , labels )
Find the indices of the input labels .
33
8
242,462
def slices ( self ) : from scipy . ndimage import find_objects return [ slc for slc in find_objects ( self . _data ) if slc is not None ]
A list of tuples where each tuple contains two slices representing the minimal box that contains the labeled region .
42
21
242,463
def missing_labels ( self ) : return np . array ( sorted ( set ( range ( 0 , self . max_label + 1 ) ) . difference ( np . insert ( self . labels , 0 , 0 ) ) ) )
A 1D ~numpy . ndarray of the sorted non - zero labels that are missing in the consecutive sequence from zero to the maximum label number .
49
32
242,464
def reassign_label ( self , label , new_label , relabel = False ) : self . reassign_labels ( label , new_label , relabel = relabel )
Reassign a label number to a new number .
40
11
242,465
def reassign_labels ( self , labels , new_label , relabel = False ) : self . check_labels ( labels ) labels = np . atleast_1d ( labels ) if len ( labels ) == 0 : return idx = np . zeros ( self . max_label + 1 , dtype = int ) idx [ self . labels ] = self . labels idx [ labels ] = new_label # calling the data setter resets all cached properties self . data = idx [ self . data ] if relabel : self . relabel_consecutive ( )
Reassign one or more label numbers .
129
9
242,466
def relabel_consecutive ( self , start_label = 1 ) : if start_label <= 0 : raise ValueError ( 'start_label must be > 0.' ) if self . is_consecutive and ( self . labels [ 0 ] == start_label ) : return new_labels = np . zeros ( self . max_label + 1 , dtype = np . int ) new_labels [ self . labels ] = np . arange ( self . nlabels ) + start_label self . data = new_labels [ self . data ]
Reassign the label numbers consecutively such that there are no missing label numbers .
124
17
242,467
def keep_label ( self , label , relabel = False ) : self . keep_labels ( label , relabel = relabel )
Keep only the specified label .
30
6
242,468
def keep_labels ( self , labels , relabel = False ) : self . check_labels ( labels ) labels = np . atleast_1d ( labels ) labels_tmp = list ( set ( self . labels ) - set ( labels ) ) self . remove_labels ( labels_tmp , relabel = relabel )
Keep only the specified labels .
73
6
242,469
def remove_label ( self , label , relabel = False ) : self . remove_labels ( label , relabel = relabel )
Remove the label number .
30
5
242,470
def remove_labels ( self , labels , relabel = False ) : self . check_labels ( labels ) self . reassign_label ( labels , new_label = 0 ) if relabel : self . relabel_consecutive ( )
Remove one or more labels .
54
6
242,471
def remove_border_labels ( self , border_width , partial_overlap = True , relabel = False ) : if border_width >= min ( self . shape ) / 2 : raise ValueError ( 'border_width must be smaller than half the ' 'image size in either dimension' ) border = np . zeros ( self . shape , dtype = np . bool ) border [ : border_width , : ] = True border [ - border_width : , : ] = True border [ : , : border_width ] = True border [ : , - border_width : ] = True self . remove_masked_labels ( border , partial_overlap = partial_overlap , relabel = relabel )
Remove labeled segments near the image border .
156
8
242,472
def remove_masked_labels ( self , mask , partial_overlap = True , relabel = False ) : if mask . shape != self . shape : raise ValueError ( 'mask must have the same shape as the ' 'segmentation image' ) remove_labels = self . _get_labels ( self . data [ mask ] ) if not partial_overlap : interior_labels = self . _get_labels ( self . data [ ~ mask ] ) remove_labels = list ( set ( remove_labels ) - set ( interior_labels ) ) self . remove_labels ( remove_labels , relabel = relabel )
Remove labeled segments located within a masked region .
145
9
242,473
def outline_segments ( self , mask_background = False ) : from scipy . ndimage import grey_erosion , grey_dilation # mode='constant' ensures outline is included on the image borders selem = np . array ( [ [ 0 , 1 , 0 ] , [ 1 , 1 , 1 ] , [ 0 , 1 , 0 ] ] ) eroded = grey_erosion ( self . data , footprint = selem , mode = 'constant' , cval = 0. ) dilated = grey_dilation ( self . data , footprint = selem , mode = 'constant' , cval = 0. ) outlines = ( ( dilated != eroded ) & ( self . data != 0 ) ) . astype ( int ) outlines *= self . data if mask_background : outlines = np . ma . masked_where ( outlines == 0 , outlines ) return outlines
Outline the labeled segments .
192
6
242,474
def _overlap_slices ( self , shape ) : if len ( shape ) != 2 : raise ValueError ( 'input shape must have 2 elements.' ) xmin = self . bbox . ixmin xmax = self . bbox . ixmax ymin = self . bbox . iymin ymax = self . bbox . iymax if xmin >= shape [ 1 ] or ymin >= shape [ 0 ] or xmax <= 0 or ymax <= 0 : # no overlap of the aperture with the data return None , None slices_large = ( slice ( max ( ymin , 0 ) , min ( ymax , shape [ 0 ] ) ) , slice ( max ( xmin , 0 ) , min ( xmax , shape [ 1 ] ) ) ) slices_small = ( slice ( max ( - ymin , 0 ) , min ( ymax - ymin , shape [ 0 ] - ymin ) ) , slice ( max ( - xmin , 0 ) , min ( xmax - xmin , shape [ 1 ] - xmin ) ) ) return slices_large , slices_small
Calculate the slices for the overlapping part of the bounding box and an array of the given shape .
239
22
242,475
def to_image ( self , shape ) : if len ( shape ) != 2 : raise ValueError ( 'input shape must have 2 elements.' ) image = np . zeros ( shape ) if self . bbox . ixmin < 0 or self . bbox . iymin < 0 : return self . _to_image_partial_overlap ( image ) try : image [ self . bbox . slices ] = self . data except ValueError : # partial or no overlap image = self . _to_image_partial_overlap ( image ) return image
Return an image of the mask in a 2D array of the given shape taking any edge effects into account .
120
22
242,476
def cutout ( self , data , fill_value = 0. , copy = False ) : data = np . asanyarray ( data ) if data . ndim != 2 : raise ValueError ( 'data must be a 2D array.' ) partial_overlap = False if self . bbox . ixmin < 0 or self . bbox . iymin < 0 : partial_overlap = True if not partial_overlap : # try this for speed -- the result may still be a partial # overlap, in which case the next block will be triggered if copy : cutout = np . copy ( data [ self . bbox . slices ] ) else : cutout = data [ self . bbox . slices ] if partial_overlap or ( cutout . shape != self . shape ) : slices_large , slices_small = self . _overlap_slices ( data . shape ) if slices_small is None : return None # no overlap # cutout is a copy cutout = np . zeros ( self . shape , dtype = data . dtype ) cutout [ : ] = fill_value cutout [ slices_small ] = data [ slices_large ] if isinstance ( data , u . Quantity ) : cutout = u . Quantity ( cutout , unit = data . unit ) return cutout
Create a cutout from the input data over the mask bounding box taking any edge effects into account .
283
21
242,477
def multiply ( self , data , fill_value = 0. ) : cutout = self . cutout ( data , fill_value = fill_value ) if cutout is None : return None else : return cutout * self . data
Multiply the aperture mask with the input data taking any edge effects into account .
50
17
242,478
def deblend_sources ( data , segment_img , npixels , filter_kernel = None , labels = None , nlevels = 32 , contrast = 0.001 , mode = 'exponential' , connectivity = 8 , relabel = True ) : if not isinstance ( segment_img , SegmentationImage ) : segment_img = SegmentationImage ( segment_img ) if segment_img . shape != data . shape : raise ValueError ( 'The data and segmentation image must have ' 'the same shape' ) if labels is None : labels = segment_img . labels labels = np . atleast_1d ( labels ) segment_img . check_labels ( labels ) data = filter_data ( data , filter_kernel , mode = 'constant' , fill_value = 0.0 ) last_label = segment_img . max_label segm_deblended = deepcopy ( segment_img ) for label in labels : source_slice = segment_img . slices [ segment_img . get_index ( label ) ] source_data = data [ source_slice ] source_segm = SegmentationImage ( np . copy ( segment_img . data [ source_slice ] ) ) source_segm . keep_labels ( label ) # include only one label source_deblended = _deblend_source ( source_data , source_segm , npixels , nlevels = nlevels , contrast = contrast , mode = mode , connectivity = connectivity ) if not np . array_equal ( source_deblended . data . astype ( bool ) , source_segm . data . astype ( bool ) ) : raise ValueError ( 'Deblending failed for source "{0}". Please ' 'ensure you used the same pixel connectivity ' 'in detect_sources and deblend_sources. If ' 'this issue persists, then please inform the ' 'developers.' . format ( label ) ) if source_deblended . nlabels > 1 : # replace the original source with the deblended source source_mask = ( source_deblended . data > 0 ) segm_tmp = segm_deblended . data segm_tmp [ source_slice ] [ source_mask ] = ( source_deblended . data [ source_mask ] + last_label ) segm_deblended . data = segm_tmp # needed to call data setter last_label += source_deblended . nlabels if relabel : segm_deblended . relabel_consecutive ( ) return segm_deblended
Deblend overlapping sources labeled in a segmentation image .
567
12
242,479
def _moments_central ( data , center = None , order = 1 ) : data = np . asarray ( data ) . astype ( float ) if data . ndim != 2 : raise ValueError ( 'data must be a 2D array.' ) if center is None : from . . centroids import centroid_com center = centroid_com ( data ) indices = np . ogrid [ [ slice ( 0 , i ) for i in data . shape ] ] ypowers = ( indices [ 0 ] - center [ 1 ] ) ** np . arange ( order + 1 ) xpowers = np . transpose ( indices [ 1 ] - center [ 0 ] ) ** np . arange ( order + 1 ) return np . dot ( np . dot ( np . transpose ( ypowers ) , data ) , xpowers )
Calculate the central image moments up to the specified order .
178
13
242,480
def first_and_second_harmonic_function ( phi , c ) : return ( c [ 0 ] + c [ 1 ] * np . sin ( phi ) + c [ 2 ] * np . cos ( phi ) + c [ 3 ] * np . sin ( 2 * phi ) + c [ 4 ] * np . cos ( 2 * phi ) )
Compute the harmonic function value used to calculate the corrections for ellipse fitting .
81
17
242,481
def _radial_distance ( shape ) : if len ( shape ) != 2 : raise ValueError ( 'shape must have only 2 elements' ) position = ( np . asarray ( shape ) - 1 ) / 2. x = np . arange ( shape [ 1 ] ) - position [ 1 ] y = np . arange ( shape [ 0 ] ) - position [ 0 ] xx , yy = np . meshgrid ( x , y ) return np . sqrt ( xx ** 2 + yy ** 2 )
Return an array where each value is the Euclidean distance from the array center .
110
17
242,482
def load_spitzer_image ( show_progress = False ) : # pragma: no cover path = get_path ( 'spitzer_example_image.fits' , location = 'remote' , show_progress = show_progress ) hdu = fits . open ( path ) [ 0 ] return hdu
Load a 4 . 5 micron Spitzer image .
67
11
242,483
def load_spitzer_catalog ( show_progress = False ) : # pragma: no cover path = get_path ( 'spitzer_example_catalog.xml' , location = 'remote' , show_progress = show_progress ) table = Table . read ( path ) return table
Load a 4 . 5 micron Spitzer catalog .
64
11
242,484
def load_irac_psf ( channel , show_progress = False ) : # pragma: no cover channel = int ( channel ) if channel < 1 or channel > 4 : raise ValueError ( 'channel must be 1, 2, 3, or 4' ) fn = 'irac_ch{0}_flight.fits' . format ( channel ) path = get_path ( fn , location = 'remote' , show_progress = show_progress ) hdu = fits . open ( path ) [ 0 ] return hdu
Load a Spitzer IRAC PSF image .
114
10
242,485
def fit_isophote ( self , sma , step = 0.1 , conver = DEFAULT_CONVERGENCE , minit = DEFAULT_MINIT , maxit = DEFAULT_MAXIT , fflag = DEFAULT_FFLAG , maxgerr = DEFAULT_MAXGERR , sclip = 3. , nclip = 0 , integrmode = BILINEAR , linear = False , maxrit = None , noniterate = False , going_inwards = False , isophote_list = None ) : geometry = self . _geometry # if available, geometry from last fitted isophote will be # used as initial guess for next isophote. if isophote_list is not None and len ( isophote_list ) > 0 : geometry = isophote_list [ - 1 ] . sample . geometry # do the fit if noniterate or ( maxrit and sma > maxrit ) : isophote = self . _non_iterative ( sma , step , linear , geometry , sclip , nclip , integrmode ) else : isophote = self . _iterative ( sma , step , linear , geometry , sclip , nclip , integrmode , conver , minit , maxit , fflag , maxgerr , going_inwards ) # store result in list if isophote_list is not None and isophote . valid : isophote_list . append ( isophote ) return isophote
Fit a single isophote with a given semimajor axis length .
323
15
242,486
def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyCircularAperture ( * * sky_params )
Convert the aperture to a SkyCircularAperture object defined in celestial coordinates .
50
17
242,487
def to_sky ( self , wcs , mode = 'all' ) : sky_params = self . _to_sky_params ( wcs , mode = mode ) return SkyCircularAnnulus ( * * sky_params )
Convert the aperture to a SkyCircularAnnulus object defined in celestial coordinates .
50
17
242,488
def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return CircularAperture ( * * pixel_params )
Convert the aperture to a CircularAperture object defined in pixel coordinates .
49
16
242,489
def to_pixel ( self , wcs , mode = 'all' ) : pixel_params = self . _to_pixel_params ( wcs , mode = mode ) return CircularAnnulus ( * * pixel_params )
Convert the aperture to a CircularAnnulus object defined in pixel coordinates .
49
16
242,490
def apply_poisson_noise ( data , random_state = None ) : data = np . asanyarray ( data ) if np . any ( data < 0 ) : raise ValueError ( 'data must not contain any negative values' ) prng = check_random_state ( random_state ) return prng . poisson ( data )
Apply Poisson noise to an array where the value of each element in the input array represents the expected number of counts .
74
24
242,491
def make_noise_image ( shape , type = 'gaussian' , mean = None , stddev = None , random_state = None ) : if mean is None : raise ValueError ( '"mean" must be input' ) prng = check_random_state ( random_state ) if type == 'gaussian' : if stddev is None : raise ValueError ( '"stddev" must be input for Gaussian noise' ) image = prng . normal ( loc = mean , scale = stddev , size = shape ) elif type == 'poisson' : image = prng . poisson ( lam = mean , size = shape ) else : raise ValueError ( 'Invalid type: {0}. Use one of ' '{"gaussian", "poisson"}.' . format ( type ) ) return image
Make a noise image containing Gaussian or Poisson noise .
180
12
242,492
def make_random_models_table ( n_sources , param_ranges , random_state = None ) : prng = check_random_state ( random_state ) sources = Table ( ) for param_name , ( lower , upper ) in param_ranges . items ( ) : # Generate a column for every item in param_ranges, even if it # is not in the model (e.g. flux). However, such columns will # be ignored when rendering the image. sources [ param_name ] = prng . uniform ( lower , upper , n_sources ) return sources
Make a ~astropy . table . Table containing randomly generated parameters for an Astropy model to simulate a set of sources .
130
25
242,493
def make_random_gaussians_table ( n_sources , param_ranges , random_state = None ) : sources = make_random_models_table ( n_sources , param_ranges , random_state = random_state ) # convert Gaussian2D flux to amplitude if 'flux' in param_ranges and 'amplitude' not in param_ranges : model = Gaussian2D ( x_stddev = 1 , y_stddev = 1 ) if 'x_stddev' in sources . colnames : xstd = sources [ 'x_stddev' ] else : xstd = model . x_stddev . value # default if 'y_stddev' in sources . colnames : ystd = sources [ 'y_stddev' ] else : ystd = model . y_stddev . value # default sources = sources . copy ( ) sources [ 'amplitude' ] = sources [ 'flux' ] / ( 2. * np . pi * xstd * ystd ) return sources
Make a ~astropy . table . Table containing randomly generated parameters for 2D Gaussian sources .
237
20
242,494
def make_model_sources_image ( shape , model , source_table , oversample = 1 ) : image = np . zeros ( shape , dtype = np . float64 ) y , x = np . indices ( shape ) params_to_set = [ ] for param in source_table . colnames : if param in model . param_names : params_to_set . append ( param ) # Save the initial parameter values so we can set them back when # done with the loop. It's best not to copy a model, because some # models (e.g. PSF models) may have substantial amounts of data in # them. init_params = { param : getattr ( model , param ) for param in params_to_set } try : for i , source in enumerate ( source_table ) : for param in params_to_set : setattr ( model , param , source [ param ] ) if oversample == 1 : image += model ( x , y ) else : image += discretize_model ( model , ( 0 , shape [ 1 ] ) , ( 0 , shape [ 0 ] ) , mode = 'oversample' , factor = oversample ) finally : for param , value in init_params . items ( ) : setattr ( model , param , value ) return image
Make an image containing sources generated from a user - specified model .
280
13
242,495
def make_4gaussians_image ( noise = True ) : table = Table ( ) table [ 'amplitude' ] = [ 50 , 70 , 150 , 210 ] table [ 'x_mean' ] = [ 160 , 25 , 150 , 90 ] table [ 'y_mean' ] = [ 70 , 40 , 25 , 60 ] table [ 'x_stddev' ] = [ 15.2 , 5.1 , 3. , 8.1 ] table [ 'y_stddev' ] = [ 2.6 , 2.5 , 3. , 4.7 ] table [ 'theta' ] = np . array ( [ 145. , 20. , 0. , 60. ] ) * np . pi / 180. shape = ( 100 , 200 ) data = make_gaussian_sources_image ( shape , table ) + 5. if noise : data += make_noise_image ( shape , type = 'gaussian' , mean = 0. , stddev = 5. , random_state = 12345 ) return data
Make an example image containing four 2D Gaussians plus a constant background .
229
16
242,496
def make_100gaussians_image ( noise = True ) : n_sources = 100 flux_range = [ 500 , 1000 ] xmean_range = [ 0 , 500 ] ymean_range = [ 0 , 300 ] xstddev_range = [ 1 , 5 ] ystddev_range = [ 1 , 5 ] params = OrderedDict ( [ ( 'flux' , flux_range ) , ( 'x_mean' , xmean_range ) , ( 'y_mean' , ymean_range ) , ( 'x_stddev' , xstddev_range ) , ( 'y_stddev' , ystddev_range ) , ( 'theta' , [ 0 , 2 * np . pi ] ) ] ) sources = make_random_gaussians_table ( n_sources , params , random_state = 12345 ) shape = ( 300 , 500 ) data = make_gaussian_sources_image ( shape , sources ) + 5. if noise : data += make_noise_image ( shape , type = 'gaussian' , mean = 0. , stddev = 2. , random_state = 12345 ) return data
Make an example image containing 100 2D Gaussians plus a constant background .
265
16
242,497
def make_wcs ( shape , galactic = False ) : wcs = WCS ( naxis = 2 ) rho = np . pi / 3. scale = 0.1 / 3600. if astropy_version < '3.1' : wcs . _naxis1 = shape [ 1 ] # nx wcs . _naxis2 = shape [ 0 ] # ny else : wcs . pixel_shape = shape wcs . wcs . crpix = [ shape [ 1 ] / 2 , shape [ 0 ] / 2 ] # 1-indexed (x, y) wcs . wcs . crval = [ 197.8925 , - 1.36555556 ] wcs . wcs . cunit = [ 'deg' , 'deg' ] wcs . wcs . cd = [ [ - scale * np . cos ( rho ) , scale * np . sin ( rho ) ] , [ scale * np . sin ( rho ) , scale * np . cos ( rho ) ] ] if not galactic : wcs . wcs . radesys = 'ICRS' wcs . wcs . ctype = [ 'RA---TAN' , 'DEC--TAN' ] else : wcs . wcs . ctype = [ 'GLON-CAR' , 'GLAT-CAR' ] return wcs
Create a simple celestial WCS object in either the ICRS or Galactic coordinate frame .
293
16
242,498
def make_imagehdu ( data , wcs = None ) : data = np . asanyarray ( data ) if data . ndim != 2 : raise ValueError ( 'data must be a 2D array' ) if wcs is not None : header = wcs . to_header ( ) else : header = None return fits . ImageHDU ( data , header = header )
Create a FITS ~astropy . io . fits . ImageHDU containing the input 2D image .
82
22
242,499
def centroid_com ( data , mask = None ) : data = data . astype ( np . float ) if mask is not None and mask is not np . ma . nomask : mask = np . asarray ( mask , dtype = bool ) if data . shape != mask . shape : raise ValueError ( 'data and mask must have the same shape.' ) data [ mask ] = 0. badidx = ~ np . isfinite ( data ) if np . any ( badidx ) : warnings . warn ( 'Input data contains input values (e.g. NaNs or infs), ' 'which were automatically masked.' , AstropyUserWarning ) data [ badidx ] = 0. total = np . sum ( data ) indices = np . ogrid [ [ slice ( 0 , i ) for i in data . shape ] ] # note the output array is reversed to give (x, y) order return np . array ( [ np . sum ( indices [ axis ] * data ) / total for axis in range ( data . ndim ) ] ) [ : : - 1 ]
Calculate the centroid of an n - dimensional array as its center of mass determined from moments .
233
21