repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
WCSHelper.get_pixbeam
def get_pixbeam(self, ra, dec): """ Determine the beam in pixels at the given location in sky coordinates. Parameters ---------- ra , dec : float The sly coordinates at which the beam is determined. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` A beam object, with a/b/pa in pixel coordinates. """ if ra is None: ra, dec = self.pix2sky(self.refpix) pos = [ra, dec] beam = self.get_beam(ra, dec) _, _, major, minor, theta = self.sky2pix_ellipse(pos, beam.a, beam.b, beam.pa) if major < minor: major, minor = minor, major theta -= 90 if theta < -180: theta += 180 if not np.isfinite(theta): theta = 0 if not all(np.isfinite([major, minor, theta])): beam = None else: beam = Beam(major, minor, theta) return beam
python
def get_pixbeam(self, ra, dec): """ Determine the beam in pixels at the given location in sky coordinates. Parameters ---------- ra , dec : float The sly coordinates at which the beam is determined. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` A beam object, with a/b/pa in pixel coordinates. """ if ra is None: ra, dec = self.pix2sky(self.refpix) pos = [ra, dec] beam = self.get_beam(ra, dec) _, _, major, minor, theta = self.sky2pix_ellipse(pos, beam.a, beam.b, beam.pa) if major < minor: major, minor = minor, major theta -= 90 if theta < -180: theta += 180 if not np.isfinite(theta): theta = 0 if not all(np.isfinite([major, minor, theta])): beam = None else: beam = Beam(major, minor, theta) return beam
[ "def", "get_pixbeam", "(", "self", ",", "ra", ",", "dec", ")", ":", "if", "ra", "is", "None", ":", "ra", ",", "dec", "=", "self", ".", "pix2sky", "(", "self", ".", "refpix", ")", "pos", "=", "[", "ra", ",", "dec", "]", "beam", "=", "self", "....
Determine the beam in pixels at the given location in sky coordinates. Parameters ---------- ra , dec : float The sly coordinates at which the beam is determined. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` A beam object, with a/b/pa in pixel coordinates.
[ "Determine", "the", "beam", "in", "pixels", "at", "the", "given", "location", "in", "sky", "coordinates", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L348-L381
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
WCSHelper.get_beamarea_deg2
def get_beamarea_deg2(self, ra, dec): """ Calculate the area of the synthesized beam in square degrees. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made. Returns ------- area : float The beam area in square degrees. """ barea = abs(self.beam.a * self.beam.b * np.pi) # in deg**2 at reference coords if self.lat is not None: barea /= np.cos(np.radians(dec - self.lat)) return barea
python
def get_beamarea_deg2(self, ra, dec): """ Calculate the area of the synthesized beam in square degrees. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made. Returns ------- area : float The beam area in square degrees. """ barea = abs(self.beam.a * self.beam.b * np.pi) # in deg**2 at reference coords if self.lat is not None: barea /= np.cos(np.radians(dec - self.lat)) return barea
[ "def", "get_beamarea_deg2", "(", "self", ",", "ra", ",", "dec", ")", ":", "barea", "=", "abs", "(", "self", ".", "beam", ".", "a", "*", "self", ".", "beam", ".", "b", "*", "np", ".", "pi", ")", "# in deg**2 at reference coords", "if", "self", ".", ...
Calculate the area of the synthesized beam in square degrees. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made. Returns ------- area : float The beam area in square degrees.
[ "Calculate", "the", "area", "of", "the", "synthesized", "beam", "in", "square", "degrees", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L383-L400
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
WCSHelper.get_beamarea_pix
def get_beamarea_pix(self, ra, dec): """ Calculate the beam area in square pixels. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made dec Returns ------- area : float The beam area in square pixels. """ parea = abs(self.pixscale[0] * self.pixscale[1]) # in deg**2 at reference coords barea = self.get_beamarea_deg2(ra, dec) return barea / parea
python
def get_beamarea_pix(self, ra, dec): """ Calculate the beam area in square pixels. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made dec Returns ------- area : float The beam area in square pixels. """ parea = abs(self.pixscale[0] * self.pixscale[1]) # in deg**2 at reference coords barea = self.get_beamarea_deg2(ra, dec) return barea / parea
[ "def", "get_beamarea_pix", "(", "self", ",", "ra", ",", "dec", ")", ":", "parea", "=", "abs", "(", "self", ".", "pixscale", "[", "0", "]", "*", "self", ".", "pixscale", "[", "1", "]", ")", "# in deg**2 at reference coords", "barea", "=", "self", ".", ...
Calculate the beam area in square pixels. Parameters ---------- ra, dec : float The sky coordinates at which the calculation is made dec Returns ------- area : float The beam area in square pixels.
[ "Calculate", "the", "beam", "area", "in", "square", "pixels", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L402-L419
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
WCSHelper.sky_sep
def sky_sep(self, pix1, pix2): """ calculate the GCD sky separation (degrees) between two pixels. Parameters ---------- pix1, pix2 : (float, float) The (x,y) pixel coordinates for the two positions. Returns ------- dist : float The distance between the two points (degrees). """ pos1 = self.pix2sky(pix1) pos2 = self.pix2sky(pix2) sep = gcd(pos1[0], pos1[1], pos2[0], pos2[1]) return sep
python
def sky_sep(self, pix1, pix2): """ calculate the GCD sky separation (degrees) between two pixels. Parameters ---------- pix1, pix2 : (float, float) The (x,y) pixel coordinates for the two positions. Returns ------- dist : float The distance between the two points (degrees). """ pos1 = self.pix2sky(pix1) pos2 = self.pix2sky(pix2) sep = gcd(pos1[0], pos1[1], pos2[0], pos2[1]) return sep
[ "def", "sky_sep", "(", "self", ",", "pix1", ",", "pix2", ")", ":", "pos1", "=", "self", ".", "pix2sky", "(", "pix1", ")", "pos2", "=", "self", ".", "pix2sky", "(", "pix2", ")", "sep", "=", "gcd", "(", "pos1", "[", "0", "]", ",", "pos1", "[", ...
calculate the GCD sky separation (degrees) between two pixels. Parameters ---------- pix1, pix2 : (float, float) The (x,y) pixel coordinates for the two positions. Returns ------- dist : float The distance between the two points (degrees).
[ "calculate", "the", "GCD", "sky", "separation", "(", "degrees", ")", "between", "two", "pixels", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L421-L438
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_psf_sky
def get_psf_sky(self, ra, dec): """ Determine the local psf at a given sky location. The psf is returned in degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis, semi-minor axis, and position angle in (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ # If we don't have a psf map then we just fall back to using the beam # from the fits header (including ZA scaling) if self.data is None: beam = self.wcshelper.get_beam(ra, dec) return beam.a, beam.b, beam.pa x, y = self.sky2pix([ra, dec]) # We leave the interpolation in the hands of whoever is making these images # clamping the x,y coords at the image boundaries just makes sense x = int(np.clip(x, 0, self.data.shape[1] - 1)) y = int(np.clip(y, 0, self.data.shape[2] - 1)) psf_sky = self.data[:, x, y] return psf_sky
python
def get_psf_sky(self, ra, dec): """ Determine the local psf at a given sky location. The psf is returned in degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis, semi-minor axis, and position angle in (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ # If we don't have a psf map then we just fall back to using the beam # from the fits header (including ZA scaling) if self.data is None: beam = self.wcshelper.get_beam(ra, dec) return beam.a, beam.b, beam.pa x, y = self.sky2pix([ra, dec]) # We leave the interpolation in the hands of whoever is making these images # clamping the x,y coords at the image boundaries just makes sense x = int(np.clip(x, 0, self.data.shape[1] - 1)) y = int(np.clip(y, 0, self.data.shape[2] - 1)) psf_sky = self.data[:, x, y] return psf_sky
[ "def", "get_psf_sky", "(", "self", ",", "ra", ",", "dec", ")", ":", "# If we don't have a psf map then we just fall back to using the beam", "# from the fits header (including ZA scaling)", "if", "self", ".", "data", "is", "None", ":", "beam", "=", "self", ".", "wcshelp...
Determine the local psf at a given sky location. The psf is returned in degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis, semi-minor axis, and position angle in (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned.
[ "Determine", "the", "local", "psf", "at", "a", "given", "sky", "location", ".", "The", "psf", "is", "returned", "in", "degrees", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L474-L504
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_psf_pix
def get_psf_pix(self, ra, dec): """ Determine the local psf (a,b,pa) at a given sky location. The psf is in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ psf_sky = self.get_psf_sky(ra, dec) psf_pix = self.wcshelper.sky2pix_ellipse([ra, dec], psf_sky[0], psf_sky[1], psf_sky[2])[2:] return psf_pix
python
def get_psf_pix(self, ra, dec): """ Determine the local psf (a,b,pa) at a given sky location. The psf is in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ psf_sky = self.get_psf_sky(ra, dec) psf_pix = self.wcshelper.sky2pix_ellipse([ra, dec], psf_sky[0], psf_sky[1], psf_sky[2])[2:] return psf_pix
[ "def", "get_psf_pix", "(", "self", ",", "ra", ",", "dec", ")", ":", "psf_sky", "=", "self", ".", "get_psf_sky", "(", "ra", ",", "dec", ")", "psf_pix", "=", "self", ".", "wcshelper", ".", "sky2pix_ellipse", "(", "[", "ra", ",", "dec", "]", ",", "psf...
Determine the local psf (a,b,pa) at a given sky location. The psf is in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned.
[ "Determine", "the", "local", "psf", "(", "a", "b", "pa", ")", "at", "a", "given", "sky", "location", ".", "The", "psf", "is", "in", "pixel", "coordinates", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L506-L527
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_pixbeam
def get_pixbeam(self, ra, dec): """ Get the psf at the location specified in pixel coordinates. The psf is also in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ # If there is no psf image then just use the fits header (plus lat scaling) from the wcshelper if self.data is None: return self.wcshelper.get_pixbeam(ra, dec) # get the beam from the psf image data psf = self.get_psf_pix(ra, dec) if not np.all(np.isfinite(psf)): log.warn("PSF requested, returned Null") return None return Beam(psf[0], psf[1], psf[2])
python
def get_pixbeam(self, ra, dec): """ Get the psf at the location specified in pixel coordinates. The psf is also in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ # If there is no psf image then just use the fits header (plus lat scaling) from the wcshelper if self.data is None: return self.wcshelper.get_pixbeam(ra, dec) # get the beam from the psf image data psf = self.get_psf_pix(ra, dec) if not np.all(np.isfinite(psf)): log.warn("PSF requested, returned Null") return None return Beam(psf[0], psf[1], psf[2])
[ "def", "get_pixbeam", "(", "self", ",", "ra", ",", "dec", ")", ":", "# If there is no psf image then just use the fits header (plus lat scaling) from the wcshelper", "if", "self", ".", "data", "is", "None", ":", "return", "self", ".", "wcshelper", ".", "get_pixbeam", ...
Get the psf at the location specified in pixel coordinates. The psf is also in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned.
[ "Get", "the", "psf", "at", "the", "location", "specified", "in", "pixel", "coordinates", ".", "The", "psf", "is", "also", "in", "pixel", "coordinates", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L552-L578
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_beam
def get_beam(self, ra, dec): """ Get the psf as a :class:`AegeanTools.fits_image.Beam` object. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- beam : :class:`AegeanTools.fits_image.Beam` The psf at the given location. """ if self.data is None: return self.wcshelper.beam else: psf = self.get_psf_sky(ra, dec) if not all(np.isfinite(psf)): return None return Beam(psf[0], psf[1], psf[2])
python
def get_beam(self, ra, dec): """ Get the psf as a :class:`AegeanTools.fits_image.Beam` object. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- beam : :class:`AegeanTools.fits_image.Beam` The psf at the given location. """ if self.data is None: return self.wcshelper.beam else: psf = self.get_psf_sky(ra, dec) if not all(np.isfinite(psf)): return None return Beam(psf[0], psf[1], psf[2])
[ "def", "get_beam", "(", "self", ",", "ra", ",", "dec", ")", ":", "if", "self", ".", "data", "is", "None", ":", "return", "self", ".", "wcshelper", ".", "beam", "else", ":", "psf", "=", "self", ".", "get_psf_sky", "(", "ra", ",", "dec", ")", "if",...
Get the psf as a :class:`AegeanTools.fits_image.Beam` object. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- beam : :class:`AegeanTools.fits_image.Beam` The psf at the given location.
[ "Get", "the", "psf", "as", "a", ":", "class", ":", "AegeanTools", ".", "fits_image", ".", "Beam", "object", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L580-L600
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_beamarea_pix
def get_beamarea_pix(self, ra, dec): """ Calculate the area of the beam in square pixels. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square pixels. """ beam = self.get_pixbeam(ra, dec) if beam is None: return 0 return beam.a * beam.b * np.pi
python
def get_beamarea_pix(self, ra, dec): """ Calculate the area of the beam in square pixels. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square pixels. """ beam = self.get_pixbeam(ra, dec) if beam is None: return 0 return beam.a * beam.b * np.pi
[ "def", "get_beamarea_pix", "(", "self", ",", "ra", ",", "dec", ")", ":", "beam", "=", "self", ".", "get_pixbeam", "(", "ra", ",", "dec", ")", "if", "beam", "is", "None", ":", "return", "0", "return", "beam", ".", "a", "*", "beam", ".", "b", "*", ...
Calculate the area of the beam in square pixels. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square pixels.
[ "Calculate", "the", "area", "of", "the", "beam", "in", "square", "pixels", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L602-L619
train
PaulHancock/Aegean
AegeanTools/wcs_helpers.py
PSFHelper.get_beamarea_deg2
def get_beamarea_deg2(self, ra, dec): """ Calculate the area of the beam in square degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square degrees. """ beam = self.get_beam(ra, dec) if beam is None: return 0 return beam.a * beam.b * np.pi
python
def get_beamarea_deg2(self, ra, dec): """ Calculate the area of the beam in square degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square degrees. """ beam = self.get_beam(ra, dec) if beam is None: return 0 return beam.a * beam.b * np.pi
[ "def", "get_beamarea_deg2", "(", "self", ",", "ra", ",", "dec", ")", ":", "beam", "=", "self", ".", "get_beam", "(", "ra", ",", "dec", ")", "if", "beam", "is", "None", ":", "return", "0", "return", "beam", ".", "a", "*", "beam", ".", "b", "*", ...
Calculate the area of the beam in square degrees. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- area : float The area of the beam in square degrees.
[ "Calculate", "the", "area", "of", "the", "beam", "in", "square", "degrees", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/wcs_helpers.py#L621-L639
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.find_start_point
def find_start_point(self): """ Find the first location in our array that is not empty """ for i, row in enumerate(self.data): for j, _ in enumerate(row): if self.data[i, j] != 0: # or not np.isfinite(self.data[i,j]): return i, j
python
def find_start_point(self): """ Find the first location in our array that is not empty """ for i, row in enumerate(self.data): for j, _ in enumerate(row): if self.data[i, j] != 0: # or not np.isfinite(self.data[i,j]): return i, j
[ "def", "find_start_point", "(", "self", ")", ":", "for", "i", ",", "row", "in", "enumerate", "(", "self", ".", "data", ")", ":", "for", "j", ",", "_", "in", "enumerate", "(", "row", ")", ":", "if", "self", ".", "data", "[", "i", ",", "j", "]", ...
Find the first location in our array that is not empty
[ "Find", "the", "first", "location", "in", "our", "array", "that", "is", "not", "empty" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L36-L43
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.step
def step(self, x, y): """ Move from the current location to the next Parameters ---------- x, y : int The current location """ up_left = self.solid(x - 1, y - 1) up_right = self.solid(x, y - 1) down_left = self.solid(x - 1, y) down_right = self.solid(x, y) state = 0 self.prev = self.next # which cells are filled? if up_left: state |= 1 if up_right: state |= 2 if down_left: state |= 4 if down_right: state |= 8 # what is the next step? if state in [1, 5, 13]: self.next = self.UP elif state in [2, 3, 7]: self.next = self.RIGHT elif state in [4, 12, 14]: self.next = self.LEFT elif state in [8, 10, 11]: self.next = self.DOWN elif state == 6: if self.prev == self.UP: self.next = self.LEFT else: self.next = self.RIGHT elif state == 9: if self.prev == self.RIGHT: self.next = self.UP else: self.next = self.DOWN else: self.next = self.NOWHERE return
python
def step(self, x, y): """ Move from the current location to the next Parameters ---------- x, y : int The current location """ up_left = self.solid(x - 1, y - 1) up_right = self.solid(x, y - 1) down_left = self.solid(x - 1, y) down_right = self.solid(x, y) state = 0 self.prev = self.next # which cells are filled? if up_left: state |= 1 if up_right: state |= 2 if down_left: state |= 4 if down_right: state |= 8 # what is the next step? if state in [1, 5, 13]: self.next = self.UP elif state in [2, 3, 7]: self.next = self.RIGHT elif state in [4, 12, 14]: self.next = self.LEFT elif state in [8, 10, 11]: self.next = self.DOWN elif state == 6: if self.prev == self.UP: self.next = self.LEFT else: self.next = self.RIGHT elif state == 9: if self.prev == self.RIGHT: self.next = self.UP else: self.next = self.DOWN else: self.next = self.NOWHERE return
[ "def", "step", "(", "self", ",", "x", ",", "y", ")", ":", "up_left", "=", "self", ".", "solid", "(", "x", "-", "1", ",", "y", "-", "1", ")", "up_right", "=", "self", ".", "solid", "(", "x", ",", "y", "-", "1", ")", "down_left", "=", "self",...
Move from the current location to the next Parameters ---------- x, y : int The current location
[ "Move", "from", "the", "current", "location", "to", "the", "next" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L45-L92
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.solid
def solid(self, x, y): """ Determine whether the pixel x,y is nonzero Parameters ---------- x, y : int The pixel of interest. Returns ------- solid : bool True if the pixel is not zero. """ if not(0 <= x < self.xsize) or not(0 <= y < self.ysize): return False if self.data[x, y] == 0: return False return True
python
def solid(self, x, y): """ Determine whether the pixel x,y is nonzero Parameters ---------- x, y : int The pixel of interest. Returns ------- solid : bool True if the pixel is not zero. """ if not(0 <= x < self.xsize) or not(0 <= y < self.ysize): return False if self.data[x, y] == 0: return False return True
[ "def", "solid", "(", "self", ",", "x", ",", "y", ")", ":", "if", "not", "(", "0", "<=", "x", "<", "self", ".", "xsize", ")", "or", "not", "(", "0", "<=", "y", "<", "self", ".", "ysize", ")", ":", "return", "False", "if", "self", ".", "data"...
Determine whether the pixel x,y is nonzero Parameters ---------- x, y : int The pixel of interest. Returns ------- solid : bool True if the pixel is not zero.
[ "Determine", "whether", "the", "pixel", "x", "y", "is", "nonzero" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L94-L112
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.walk_perimeter
def walk_perimeter(self, startx, starty): """ Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region. """ # checks startx = max(startx, 0) startx = min(startx, self.xsize) starty = max(starty, 0) starty = min(starty, self.ysize) points = [] x, y = startx, starty while True: self.step(x, y) if 0 <= x <= self.xsize and 0 <= y <= self.ysize: points.append((x, y)) if self.next == self.UP: y -= 1 elif self.next == self.LEFT: x -= 1 elif self.next == self.DOWN: y += 1 elif self.next == self.RIGHT: x += 1 # stop if we meet some kind of error elif self.next == self.NOWHERE: break # stop when we return to the starting location if x == startx and y == starty: break return points
python
def walk_perimeter(self, startx, starty): """ Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region. """ # checks startx = max(startx, 0) startx = min(startx, self.xsize) starty = max(starty, 0) starty = min(starty, self.ysize) points = [] x, y = startx, starty while True: self.step(x, y) if 0 <= x <= self.xsize and 0 <= y <= self.ysize: points.append((x, y)) if self.next == self.UP: y -= 1 elif self.next == self.LEFT: x -= 1 elif self.next == self.DOWN: y += 1 elif self.next == self.RIGHT: x += 1 # stop if we meet some kind of error elif self.next == self.NOWHERE: break # stop when we return to the starting location if x == startx and y == starty: break return points
[ "def", "walk_perimeter", "(", "self", ",", "startx", ",", "starty", ")", ":", "# checks", "startx", "=", "max", "(", "startx", ",", "0", ")", "startx", "=", "min", "(", "startx", ",", "self", ".", "xsize", ")", "starty", "=", "max", "(", "starty", ...
Starting at a point on the perimeter of a region, 'walk' the perimeter to return to the starting point. Record the path taken. Parameters ---------- startx, starty : int The starting location. Assumed to be on the perimeter of a region. Returns ------- perimeter : list A list of pixel coordinates [ [x1,y1], ...] that constitute the perimeter of the region.
[ "Starting", "at", "a", "point", "on", "the", "perimeter", "of", "a", "region", "walk", "the", "perimeter", "to", "return", "to", "the", "starting", "point", ".", "Record", "the", "path", "taken", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L114-L157
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.do_march
def do_march(self): """ March about and trace the outline of our object Returns ------- perimeter : list The pixels on the perimeter of the region [[x1, y1], ...] """ x, y = self.find_start_point() perimeter = self.walk_perimeter(x, y) return perimeter
python
def do_march(self): """ March about and trace the outline of our object Returns ------- perimeter : list The pixels on the perimeter of the region [[x1, y1], ...] """ x, y = self.find_start_point() perimeter = self.walk_perimeter(x, y) return perimeter
[ "def", "do_march", "(", "self", ")", ":", "x", ",", "y", "=", "self", ".", "find_start_point", "(", ")", "perimeter", "=", "self", ".", "walk_perimeter", "(", "x", ",", "y", ")", "return", "perimeter" ]
March about and trace the outline of our object Returns ------- perimeter : list The pixels on the perimeter of the region [[x1, y1], ...]
[ "March", "about", "and", "trace", "the", "outline", "of", "our", "object" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L159-L170
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares._blank_within
def _blank_within(self, perimeter): """ Blank all the pixels within the given perimeter. Parameters ---------- perimeter : list The perimeter of the region. """ # Method: # scan around the perimeter filling 'up' from each pixel # stopping when we reach the other boundary for p in perimeter: # if we are on the edge of the data then there is nothing to fill if p[0] >= self.data.shape[0] or p[1] >= self.data.shape[1]: continue # if this pixel is blank then don't fill if self.data[p] == 0: continue # blank this pixel self.data[p] = 0 # blank until we reach the other perimeter for i in range(p[1]+1, self.data.shape[1]): q = p[0], i # stop when we reach another part of the perimeter if q in perimeter: break # fill everything in between, even inclusions self.data[q] = 0 return
python
def _blank_within(self, perimeter): """ Blank all the pixels within the given perimeter. Parameters ---------- perimeter : list The perimeter of the region. """ # Method: # scan around the perimeter filling 'up' from each pixel # stopping when we reach the other boundary for p in perimeter: # if we are on the edge of the data then there is nothing to fill if p[0] >= self.data.shape[0] or p[1] >= self.data.shape[1]: continue # if this pixel is blank then don't fill if self.data[p] == 0: continue # blank this pixel self.data[p] = 0 # blank until we reach the other perimeter for i in range(p[1]+1, self.data.shape[1]): q = p[0], i # stop when we reach another part of the perimeter if q in perimeter: break # fill everything in between, even inclusions self.data[q] = 0 return
[ "def", "_blank_within", "(", "self", ",", "perimeter", ")", ":", "# Method:", "# scan around the perimeter filling 'up' from each pixel", "# stopping when we reach the other boundary", "for", "p", "in", "perimeter", ":", "# if we are on the edge of the data then there is nothing to f...
Blank all the pixels within the given perimeter. Parameters ---------- perimeter : list The perimeter of the region.
[ "Blank", "all", "the", "pixels", "within", "the", "given", "perimeter", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L172-L205
train
PaulHancock/Aegean
AegeanTools/msq2.py
MarchingSquares.do_march_all
def do_march_all(self): """ Recursive march in the case that we have a fragmented shape. Returns ------- perimeters : [perimeter1, ...] The perimeters of all the regions in the image. See Also -------- :func:`AegeanTools.msq2.MarchingSquares.do_march` """ # copy the data since we are going to be modifying it data_copy = copy(self.data) # iterate through finding an island, creating a perimeter, # and then blanking the island perimeters = [] p = self.find_start_point() while p is not None: x, y = p perim = self.walk_perimeter(x, y) perimeters.append(perim) self._blank_within(perim) p = self.find_start_point() # restore the data self.data = data_copy return perimeters
python
def do_march_all(self): """ Recursive march in the case that we have a fragmented shape. Returns ------- perimeters : [perimeter1, ...] The perimeters of all the regions in the image. See Also -------- :func:`AegeanTools.msq2.MarchingSquares.do_march` """ # copy the data since we are going to be modifying it data_copy = copy(self.data) # iterate through finding an island, creating a perimeter, # and then blanking the island perimeters = [] p = self.find_start_point() while p is not None: x, y = p perim = self.walk_perimeter(x, y) perimeters.append(perim) self._blank_within(perim) p = self.find_start_point() # restore the data self.data = data_copy return perimeters
[ "def", "do_march_all", "(", "self", ")", ":", "# copy the data since we are going to be modifying it", "data_copy", "=", "copy", "(", "self", ".", "data", ")", "# iterate through finding an island, creating a perimeter,", "# and then blanking the island", "perimeters", "=", "["...
Recursive march in the case that we have a fragmented shape. Returns ------- perimeters : [perimeter1, ...] The perimeters of all the regions in the image. See Also -------- :func:`AegeanTools.msq2.MarchingSquares.do_march`
[ "Recursive", "march", "in", "the", "case", "that", "we", "have", "a", "fragmented", "shape", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/msq2.py#L207-L236
train
PaulHancock/Aegean
AegeanTools/fitting.py
elliptical_gaussian
def elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta): """ Generate a model 2d Gaussian with the given parameters. Evaluate this model at the given locations x,y. Parameters ---------- x, y : numeric or array-like locations at which to evaluate the gaussian amp : float Peak value. xo, yo : float Center of the gaussian. sx, sy : float major/minor axes in sigmas theta : float position angle (degrees) CCW from x-axis Returns ------- data : numeric or array-like Gaussian function evaluated at the x,y locations. """ try: sint, cost = math.sin(np.radians(theta)), math.cos(np.radians(theta)) except ValueError as e: if 'math domain error' in e.args: sint, cost = np.nan, np.nan xxo = x - xo yyo = y - yo exp = (xxo * cost + yyo * sint) ** 2 / sx ** 2 \ + (xxo * sint - yyo * cost) ** 2 / sy ** 2 exp *= -1. / 2 return amp * np.exp(exp)
python
def elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta): """ Generate a model 2d Gaussian with the given parameters. Evaluate this model at the given locations x,y. Parameters ---------- x, y : numeric or array-like locations at which to evaluate the gaussian amp : float Peak value. xo, yo : float Center of the gaussian. sx, sy : float major/minor axes in sigmas theta : float position angle (degrees) CCW from x-axis Returns ------- data : numeric or array-like Gaussian function evaluated at the x,y locations. """ try: sint, cost = math.sin(np.radians(theta)), math.cos(np.radians(theta)) except ValueError as e: if 'math domain error' in e.args: sint, cost = np.nan, np.nan xxo = x - xo yyo = y - yo exp = (xxo * cost + yyo * sint) ** 2 / sx ** 2 \ + (xxo * sint - yyo * cost) ** 2 / sy ** 2 exp *= -1. / 2 return amp * np.exp(exp)
[ "def", "elliptical_gaussian", "(", "x", ",", "y", ",", "amp", ",", "xo", ",", "yo", ",", "sx", ",", "sy", ",", "theta", ")", ":", "try", ":", "sint", ",", "cost", "=", "math", ".", "sin", "(", "np", ".", "radians", "(", "theta", ")", ")", ","...
Generate a model 2d Gaussian with the given parameters. Evaluate this model at the given locations x,y. Parameters ---------- x, y : numeric or array-like locations at which to evaluate the gaussian amp : float Peak value. xo, yo : float Center of the gaussian. sx, sy : float major/minor axes in sigmas theta : float position angle (degrees) CCW from x-axis Returns ------- data : numeric or array-like Gaussian function evaluated at the x,y locations.
[ "Generate", "a", "model", "2d", "Gaussian", "with", "the", "given", "parameters", ".", "Evaluate", "this", "model", "at", "the", "given", "locations", "x", "y", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L30-L63
train
PaulHancock/Aegean
AegeanTools/fitting.py
Cmatrix
def Cmatrix(x, y, sx, sy, theta): """ Construct a correlation matrix corresponding to the data. The matrix assumes a gaussian correlation function. Parameters ---------- x, y : array-like locations at which to evaluate the correlation matirx sx, sy : float major/minor axes of the gaussian correlation function (sigmas) theta : float position angle of the gaussian correlation function (degrees) Returns ------- data : array-like The C-matrix. """ C = np.vstack([elliptical_gaussian(x, y, 1, i, j, sx, sy, theta) for i, j in zip(x, y)]) return C
python
def Cmatrix(x, y, sx, sy, theta): """ Construct a correlation matrix corresponding to the data. The matrix assumes a gaussian correlation function. Parameters ---------- x, y : array-like locations at which to evaluate the correlation matirx sx, sy : float major/minor axes of the gaussian correlation function (sigmas) theta : float position angle of the gaussian correlation function (degrees) Returns ------- data : array-like The C-matrix. """ C = np.vstack([elliptical_gaussian(x, y, 1, i, j, sx, sy, theta) for i, j in zip(x, y)]) return C
[ "def", "Cmatrix", "(", "x", ",", "y", ",", "sx", ",", "sy", ",", "theta", ")", ":", "C", "=", "np", ".", "vstack", "(", "[", "elliptical_gaussian", "(", "x", ",", "y", ",", "1", ",", "i", ",", "j", ",", "sx", ",", "sy", ",", "theta", ")", ...
Construct a correlation matrix corresponding to the data. The matrix assumes a gaussian correlation function. Parameters ---------- x, y : array-like locations at which to evaluate the correlation matirx sx, sy : float major/minor axes of the gaussian correlation function (sigmas) theta : float position angle of the gaussian correlation function (degrees) Returns ------- data : array-like The C-matrix.
[ "Construct", "a", "correlation", "matrix", "corresponding", "to", "the", "data", ".", "The", "matrix", "assumes", "a", "gaussian", "correlation", "function", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L66-L87
train
PaulHancock/Aegean
AegeanTools/fitting.py
Bmatrix
def Bmatrix(C): """ Calculate a matrix which is effectively the square root of the correlation matrix C Parameters ---------- C : 2d array A covariance matrix Returns ------- B : 2d array A matrix B such the B.dot(B') = inv(C) """ # this version of finding the square root of the inverse matrix # suggested by Cath Trott L, Q = eigh(C) # force very small eigenvalues to have some minimum non-zero value minL = 1e-9*L[-1] L[L < minL] = minL S = np.diag(1 / np.sqrt(L)) B = Q.dot(S) return B
python
def Bmatrix(C): """ Calculate a matrix which is effectively the square root of the correlation matrix C Parameters ---------- C : 2d array A covariance matrix Returns ------- B : 2d array A matrix B such the B.dot(B') = inv(C) """ # this version of finding the square root of the inverse matrix # suggested by Cath Trott L, Q = eigh(C) # force very small eigenvalues to have some minimum non-zero value minL = 1e-9*L[-1] L[L < minL] = minL S = np.diag(1 / np.sqrt(L)) B = Q.dot(S) return B
[ "def", "Bmatrix", "(", "C", ")", ":", "# this version of finding the square root of the inverse matrix", "# suggested by Cath Trott", "L", ",", "Q", "=", "eigh", "(", "C", ")", "# force very small eigenvalues to have some minimum non-zero value", "minL", "=", "1e-9", "*", "...
Calculate a matrix which is effectively the square root of the correlation matrix C Parameters ---------- C : 2d array A covariance matrix Returns ------- B : 2d array A matrix B such the B.dot(B') = inv(C)
[ "Calculate", "a", "matrix", "which", "is", "effectively", "the", "square", "root", "of", "the", "correlation", "matrix", "C" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L90-L113
train
PaulHancock/Aegean
AegeanTools/fitting.py
jacobian
def jacobian(pars, x, y): """ Analytical calculation of the Jacobian for an elliptical gaussian Will work for a model that contains multiple Gaussians, and for which some components are not being fit (don't vary). Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated Returns ------- j : 2d array The Jacobian. See Also -------- :func:`AegeanTools.fitting.emp_jacobian` """ matrix = [] for i in range(pars['components'].value): prefix = "c{0}_".format(i) amp = pars[prefix + 'amp'].value xo = pars[prefix + 'xo'].value yo = pars[prefix + 'yo'].value sx = pars[prefix + 'sx'].value sy = pars[prefix + 'sy'].value theta = pars[prefix + 'theta'].value # The derivative with respect to component i doesn't depend on any other components # thus the model should not contain the other components model = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) # precompute for speed sint = np.sin(np.radians(theta)) cost = np.cos(np.radians(theta)) xxo = x - xo yyo = y - yo xcos, ycos = xxo * cost, yyo * cost xsin, ysin = xxo * sint, yyo * sint if pars[prefix + 'amp'].vary: dmds = model / amp matrix.append(dmds) if pars[prefix + 'xo'].vary: dmdxo = cost * (xcos + ysin) / sx ** 2 + sint * (xsin - ycos) / sy ** 2 dmdxo *= model matrix.append(dmdxo) if pars[prefix + 'yo'].vary: dmdyo = sint * (xcos + ysin) / sx ** 2 - cost * (xsin - ycos) / sy ** 2 dmdyo *= model matrix.append(dmdyo) if pars[prefix + 'sx'].vary: dmdsx = model / sx ** 3 * (xcos + ysin) ** 2 matrix.append(dmdsx) if pars[prefix + 'sy'].vary: dmdsy = model / sy ** 3 * (xsin - ycos) ** 2 matrix.append(dmdsy) if pars[prefix + 'theta'].vary: dmdtheta = model * (sy ** 2 - sx ** 2) * (xsin - ycos) * (xcos + ysin) / sx ** 2 / sy ** 2 matrix.append(dmdtheta) return np.array(matrix)
python
def jacobian(pars, x, y): """ Analytical calculation of the Jacobian for an elliptical gaussian Will work for a model that contains multiple Gaussians, and for which some components are not being fit (don't vary). Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated Returns ------- j : 2d array The Jacobian. See Also -------- :func:`AegeanTools.fitting.emp_jacobian` """ matrix = [] for i in range(pars['components'].value): prefix = "c{0}_".format(i) amp = pars[prefix + 'amp'].value xo = pars[prefix + 'xo'].value yo = pars[prefix + 'yo'].value sx = pars[prefix + 'sx'].value sy = pars[prefix + 'sy'].value theta = pars[prefix + 'theta'].value # The derivative with respect to component i doesn't depend on any other components # thus the model should not contain the other components model = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) # precompute for speed sint = np.sin(np.radians(theta)) cost = np.cos(np.radians(theta)) xxo = x - xo yyo = y - yo xcos, ycos = xxo * cost, yyo * cost xsin, ysin = xxo * sint, yyo * sint if pars[prefix + 'amp'].vary: dmds = model / amp matrix.append(dmds) if pars[prefix + 'xo'].vary: dmdxo = cost * (xcos + ysin) / sx ** 2 + sint * (xsin - ycos) / sy ** 2 dmdxo *= model matrix.append(dmdxo) if pars[prefix + 'yo'].vary: dmdyo = sint * (xcos + ysin) / sx ** 2 - cost * (xsin - ycos) / sy ** 2 dmdyo *= model matrix.append(dmdyo) if pars[prefix + 'sx'].vary: dmdsx = model / sx ** 3 * (xcos + ysin) ** 2 matrix.append(dmdsx) if pars[prefix + 'sy'].vary: dmdsy = model / sy ** 3 * (xsin - ycos) ** 2 matrix.append(dmdsy) if pars[prefix + 'theta'].vary: dmdtheta = model * (sy ** 2 - sx ** 2) * (xsin - ycos) * (xcos + ysin) / sx ** 2 / sy ** 2 matrix.append(dmdtheta) return np.array(matrix)
[ "def", "jacobian", "(", "pars", ",", "x", ",", "y", ")", ":", "matrix", "=", "[", "]", "for", "i", "in", "range", "(", "pars", "[", "'components'", "]", ".", "value", ")", ":", "prefix", "=", "\"c{0}_\"", ".", "format", "(", "i", ")", "amp", "=...
Analytical calculation of the Jacobian for an elliptical gaussian Will work for a model that contains multiple Gaussians, and for which some components are not being fit (don't vary). Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated Returns ------- j : 2d array The Jacobian. See Also -------- :func:`AegeanTools.fitting.emp_jacobian`
[ "Analytical", "calculation", "of", "the", "Jacobian", "for", "an", "elliptical", "gaussian", "Will", "work", "for", "a", "model", "that", "contains", "multiple", "Gaussians", "and", "for", "which", "some", "components", "are", "not", "being", "fit", "(", "don"...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L116-L188
train
PaulHancock/Aegean
AegeanTools/fitting.py
lmfit_jacobian
def lmfit_jacobian(pars, x, y, errs=None, B=None, emp=False): """ Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian` which gives the output in a format that is required for lmfit. Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated errs : list a vector of 1\sigma errors (optional). Default = None B : 2d-array a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix` emp : bool If true the use the empirical Jacobian, otherwise use the analytical one. Default = False. Returns ------- j : 2d-array A Jacobian. See Also -------- :func:`AegeanTools.fitting.Bmatrix` :func:`AegeanTools.fitting.jacobian` :func:`AegeanTools.fitting.emp_jacobian` """ if emp: matrix = emp_jacobian(pars, x, y) else: # calculate in the normal way matrix = jacobian(pars, x, y) # now munge this to be as expected for lmfit matrix = np.vstack(matrix) if errs is not None: matrix /= errs # matrix = matrix.dot(errs) if B is not None: matrix = matrix.dot(B) matrix = np.transpose(matrix) return matrix
python
def lmfit_jacobian(pars, x, y, errs=None, B=None, emp=False): """ Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian` which gives the output in a format that is required for lmfit. Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated errs : list a vector of 1\sigma errors (optional). Default = None B : 2d-array a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix` emp : bool If true the use the empirical Jacobian, otherwise use the analytical one. Default = False. Returns ------- j : 2d-array A Jacobian. See Also -------- :func:`AegeanTools.fitting.Bmatrix` :func:`AegeanTools.fitting.jacobian` :func:`AegeanTools.fitting.emp_jacobian` """ if emp: matrix = emp_jacobian(pars, x, y) else: # calculate in the normal way matrix = jacobian(pars, x, y) # now munge this to be as expected for lmfit matrix = np.vstack(matrix) if errs is not None: matrix /= errs # matrix = matrix.dot(errs) if B is not None: matrix = matrix.dot(B) matrix = np.transpose(matrix) return matrix
[ "def", "lmfit_jacobian", "(", "pars", ",", "x", ",", "y", ",", "errs", "=", "None", ",", "B", "=", "None", ",", "emp", "=", "False", ")", ":", "if", "emp", ":", "matrix", "=", "emp_jacobian", "(", "pars", ",", "x", ",", "y", ")", "else", ":", ...
Wrapper around :func:`AegeanTools.fitting.jacobian` and :func:`AegeanTools.fitting.emp_jacobian` which gives the output in a format that is required for lmfit. Parameters ---------- pars : lmfit.Model The model parameters x, y : list Locations at which the jacobian is being evaluated errs : list a vector of 1\sigma errors (optional). Default = None B : 2d-array a B-matrix (optional) see :func:`AegeanTools.fitting.Bmatrix` emp : bool If true the use the empirical Jacobian, otherwise use the analytical one. Default = False. Returns ------- j : 2d-array A Jacobian. See Also -------- :func:`AegeanTools.fitting.Bmatrix` :func:`AegeanTools.fitting.jacobian` :func:`AegeanTools.fitting.emp_jacobian`
[ "Wrapper", "around", ":", "func", ":", "AegeanTools", ".", "fitting", ".", "jacobian", "and", ":", "func", ":", "AegeanTools", ".", "fitting", ".", "emp_jacobian", "which", "gives", "the", "output", "in", "a", "format", "that", "is", "required", "for", "lm...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L228-L279
train
PaulHancock/Aegean
AegeanTools/fitting.py
hessian
def hessian(pars, x, y): """ Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) See Also -------- :func:`AegeanTools.fitting.emp_hessian` """ j = 0 # keeping track of the number of variable parameters # total number of variable parameters ntvar = np.sum([pars[k].vary for k in pars.keys() if k != 'components']) # construct an empty matrix of the correct size hmat = np.zeros((ntvar, ntvar, x.shape[0], x.shape[1])) npvar = 0 for i in range(pars['components'].value): prefix = "c{0}_".format(i) amp = pars[prefix + 'amp'].value xo = pars[prefix + 'xo'].value yo = pars[prefix + 'yo'].value sx = pars[prefix + 'sx'].value sy = pars[prefix + 'sy'].value theta = pars[prefix + 'theta'].value amp_var = pars[prefix + 'amp'].vary xo_var = pars[prefix + 'xo'].vary yo_var = pars[prefix + 'yo'].vary sx_var = pars[prefix + 'sx'].vary sy_var = pars[prefix + 'sy'].vary theta_var = pars[prefix + 'theta'].vary # precomputed for speed model = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) sint = np.sin(np.radians(theta)) sin2t = np.sin(np.radians(2*theta)) cost = np.cos(np.radians(theta)) cos2t = np.cos(np.radians(2*theta)) sx2 = sx**2 sy2 = sy**2 xxo = x-xo yyo = y-yo xcos, ycos = xxo*cost, yyo*cost xsin, ysin = xxo*sint, yyo*sint if amp_var: k = npvar # second round of keeping track of variable params # H(amp,amp)/G = 0 hmat[j][k] = 0 k += 1 if xo_var: # H(amp,xo)/G = 1.0*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))/(amp*sx**2*sy**2) hmat[j][k] = (xsin - ycos)*sint/sy2 + (xcos + ysin)*cost/sx2 hmat[j][k] *= model k += 1 if yo_var: # H(amp,yo)/G = 1.0*(-sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))/(amp*sx**2*sy**2) hmat[j][k] = -(xsin - ycos)*cost/sy2 + (xcos + ysin)*sint/sx2 hmat[j][k] *= model/amp k += 1 if sx_var: # H(amp,sx)/G = 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(amp*sx**3) hmat[j][k] = (xcos + ysin)**2 hmat[j][k] *= model/(amp*sx**3) k += 1 if sy_var: # H(amp,sy) = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/(amp*sy**3) hmat[j][k] = (xsin - ycos)**2 hmat[j][k] *= model/(amp*sy**3) k += 1 if theta_var: # H(amp,t) = (-1.0*sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(amp*sx**2*sy**2) hmat[j][k] = (xsin - ycos)*(xcos + ysin) hmat[j][k] *= sy2-sx2 hmat[j][k] *= model/(amp*sx2*sy2) # k += 1 j += 1 if xo_var: k = npvar if amp_var: # H(xo,amp)/G = H(amp,xo) hmat[j][k] = hmat[k][j] k += 1 # if xo_var: # H(xo,xo)/G = 1.0*(-sx**2*sy**2*(sx**2*sin(t)**2 + sy**2*cos(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))**2)/(sx**4*sy**4) hmat[j][k] = -sx2*sy2*(sx2*sint**2 + sy2*cost**2) hmat[j][k] += (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)**2 hmat[j][k] *= model/ (sx2**2*sy2**2) k += 1 if yo_var: # H(xo,yo)/G = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*sin(2*t)/2 - (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4) hmat[j][k] = sx2*sy2*(sx2 - sy2)*sin2t/2 hmat[j][k] -= (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)*(sx2*(xsin -ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model / (sx**4*sy**4) k += 1 if sx_var: # H(xo,sx) = ((x - xo)*cos(t) + (y - yo)*sin(t))*(-2.0*sx**2*sy**2*cos(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**5*sy**2) hmat[j][k] = (xcos + ysin) hmat[j][k] *= -2*sx2*sy2*cost + (xcos + ysin)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost) hmat[j][k] *= model / (sx**5*sy2) k += 1 if sy_var: # H(xo,sy) = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(-2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx2*sy**5) hmat[j][k] = (xsin - ycos) hmat[j][k] *= -2*sx2*sy2*sint + (xsin - ycos)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost) hmat[j][k] *= model/(sx2*sy**5) k += 1 if theta_var: # H(xo,t) = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*(x*sin(2*t) - xo*sin(2*t) - y*cos(2*t) + yo*cos(2*t)) + (-sx**2 + 1.0*sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**4*sy**4) # second part hmat[j][k] = (sy2-sx2)*(xsin - ycos)*(xcos + ysin) hmat[j][k] *= sx2*(xsin -ycos)*sint + sy2*(xcos + ysin)*cost # first part hmat[j][k] += sx2*sy2*(sx2 - sy2)*(xxo*sin2t -yyo*cos2t) hmat[j][k] *= model/(sx**4*sy**4) # k += 1 j += 1 if yo_var: k = npvar if amp_var: # H(yo,amp)/G = H(amp,yo) hmat[j][k] = hmat[0][2] k += 1 if xo_var: # H(yo,xo)/G = H(xo,yo)/G hmat[j][k] =hmat[1][2] k += 1 # if yo_var: # H(yo,yo)/G = 1.0*(-sx**2*sy**2*(sx**2*cos(t)**2 + sy**2*sin(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))**2)/(sx**4*sy**4) hmat[j][k] = (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)**2 / (sx2**2*sy2**2) hmat[j][k] -= cost**2/sy2 + sint**2/sx2 hmat[j][k] *= model k += 1 if sx_var: # H(yo,sx)/G = -((x - xo)*cos(t) + (y - yo)*sin(t))*(2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) - (y - yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**5*sy**2) hmat[j][k] = -1*(xcos + ysin) hmat[j][k] *= 2*sx2*sy2*sint + (xcos + ysin)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model/(sx**5*sy2) k += 1 if sy_var: # H(yo,sy)/G = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(2.0*sx**2*sy**2*cos(t) - 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**2*sy**5) hmat[j][k] = (xsin -ycos) hmat[j][k] *= 2*sx2*sy2*cost - (xsin - ycos)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model/(sx2*sy**5) k += 1 if theta_var: # H(yo,t)/G = 1.0*(sx**2*sy**2*(sx**2*(-x*cos(2*t) + xo*cos(2*t) - y*sin(2*t) + yo*sin(2*t)) + sy**2*(x*cos(2*t) - xo*cos(2*t) + y*sin(2*t) - yo*sin(2*t))) + (1.0*sx**2 - sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4) hmat[j][k] = (sx2 - sy2)*(xsin - ycos)*(xcos + ysin) hmat[j][k] *= (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] += sx2*sy2*(sx2-sy2)*(-x*cos2t + xo*cos2t - y*sin2t + yo*sin2t) hmat[j][k] *= model/(sx**4*sy**4) # k += 1 j += 1 if sx_var: k = npvar if amp_var: # H(sx,amp)/G = H(amp,sx)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(sx,xo)/G = H(xo,sx)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(sx,yo)/G = H(yo/sx)/G hmat[j][k] = hmat[k][j] k += 1 # if sx_var: # H(sx,sx)/G = (-3.0*sx**2 + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2/sx**6 hmat[j][k] = -3*sx2 + (xcos + ysin)**2 hmat[j][k] *= (xcos + ysin)**2 hmat[j][k] *= model/sx**6 k += 1 if sy_var: # H(sx,sy)/G = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(sx**3*sy**3) hmat[j][k] = (xsin - ycos)**2 * (xcos + ysin)**2 hmat[j][k] *= model/(sx**3*sy**3) k += 1 if theta_var: # H(sx,t)/G = (-2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**5*sy**2) hmat[j][k] = -2*sx2*sy2 + (sy2 - sx2)*(xcos + ysin)**2 hmat[j][k] *= (xsin -ycos)*(xcos + ysin) hmat[j][k] *= model/(sx**5*sy**2) # k += 1 j += 1 if sy_var: k = npvar if amp_var: # H(sy,amp)/G = H(amp,sy)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(sy,xo)/G = H(xo,sy)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(sy,yo)/G = H(yo/sy)/G hmat[j][k] = hmat[k][j] k += 1 if sx_var: # H(sy,sx)/G = H(sx,sy)/G hmat[j][k] = hmat[k][j] k += 1 # if sy_var: # H(sy,sy)/G = (-3.0*sy**2 + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/sy**6 hmat[j][k] = -3*sy2 + (xsin - ycos)**2 hmat[j][k] *= (xsin - ycos)**2 hmat[j][k] *= model/sy**6 k += 1 if theta_var: # H(sy,t)/G = (2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**2*sy**5) hmat[j][k] = 2*sx2*sy2 + (sy2 - sx2)*(xsin - ycos)**2 hmat[j][k] *= (xsin - ycos)*(xcos + ysin) hmat[j][k] *= model/(sx**2*sy**5) # k += 1 j += 1 if theta_var: k = npvar if amp_var: # H(t,amp)/G = H(amp,t)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(t,xo)/G = H(xo,t)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(t,yo)/G = H(yo/t)/G hmat[j][k] = hmat[k][j] k += 1 if sx_var: # H(t,sx)/G = H(sx,t)/G hmat[j][k] = hmat[k][j] k += 1 if sy_var: # H(t,sy)/G = H(sy,t)/G hmat[j][k] = hmat[k][j] k += 1 # if theta_var: # H(t,t)/G = (sx**2*sy**2*(sx**2*(((x - xo)*sin(t) + (-y + yo)*cos(t))**2 - 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2) + sy**2*(-1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2 + ((x - xo)*cos(t) + (y - yo)*sin(t))**2)) + (sx**2 - 1.0*sy**2)**2*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2)/(sx**4*sy**4) hmat[j][k] = sx2*sy2 hmat[j][k] *= sx2*((xsin - ycos)**2 - (xcos + ysin)**2) + sy2*((xcos + ysin)**2 - (xsin - ycos)**2) hmat[j][k] += (sx2 - sy2)**2*(xsin - ycos)**2*(xcos + ysin)**2 hmat[j][k] *= model/(sx**4*sy**4) # j += 1 # save the number of variables for the next iteration # as we need to start our indexing at this number npvar = k return np.array(hmat)
python
def hessian(pars, x, y): """ Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) See Also -------- :func:`AegeanTools.fitting.emp_hessian` """ j = 0 # keeping track of the number of variable parameters # total number of variable parameters ntvar = np.sum([pars[k].vary for k in pars.keys() if k != 'components']) # construct an empty matrix of the correct size hmat = np.zeros((ntvar, ntvar, x.shape[0], x.shape[1])) npvar = 0 for i in range(pars['components'].value): prefix = "c{0}_".format(i) amp = pars[prefix + 'amp'].value xo = pars[prefix + 'xo'].value yo = pars[prefix + 'yo'].value sx = pars[prefix + 'sx'].value sy = pars[prefix + 'sy'].value theta = pars[prefix + 'theta'].value amp_var = pars[prefix + 'amp'].vary xo_var = pars[prefix + 'xo'].vary yo_var = pars[prefix + 'yo'].vary sx_var = pars[prefix + 'sx'].vary sy_var = pars[prefix + 'sy'].vary theta_var = pars[prefix + 'theta'].vary # precomputed for speed model = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) sint = np.sin(np.radians(theta)) sin2t = np.sin(np.radians(2*theta)) cost = np.cos(np.radians(theta)) cos2t = np.cos(np.radians(2*theta)) sx2 = sx**2 sy2 = sy**2 xxo = x-xo yyo = y-yo xcos, ycos = xxo*cost, yyo*cost xsin, ysin = xxo*sint, yyo*sint if amp_var: k = npvar # second round of keeping track of variable params # H(amp,amp)/G = 0 hmat[j][k] = 0 k += 1 if xo_var: # H(amp,xo)/G = 1.0*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))/(amp*sx**2*sy**2) hmat[j][k] = (xsin - ycos)*sint/sy2 + (xcos + ysin)*cost/sx2 hmat[j][k] *= model k += 1 if yo_var: # H(amp,yo)/G = 1.0*(-sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))/(amp*sx**2*sy**2) hmat[j][k] = -(xsin - ycos)*cost/sy2 + (xcos + ysin)*sint/sx2 hmat[j][k] *= model/amp k += 1 if sx_var: # H(amp,sx)/G = 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(amp*sx**3) hmat[j][k] = (xcos + ysin)**2 hmat[j][k] *= model/(amp*sx**3) k += 1 if sy_var: # H(amp,sy) = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/(amp*sy**3) hmat[j][k] = (xsin - ycos)**2 hmat[j][k] *= model/(amp*sy**3) k += 1 if theta_var: # H(amp,t) = (-1.0*sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(amp*sx**2*sy**2) hmat[j][k] = (xsin - ycos)*(xcos + ysin) hmat[j][k] *= sy2-sx2 hmat[j][k] *= model/(amp*sx2*sy2) # k += 1 j += 1 if xo_var: k = npvar if amp_var: # H(xo,amp)/G = H(amp,xo) hmat[j][k] = hmat[k][j] k += 1 # if xo_var: # H(xo,xo)/G = 1.0*(-sx**2*sy**2*(sx**2*sin(t)**2 + sy**2*cos(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))**2)/(sx**4*sy**4) hmat[j][k] = -sx2*sy2*(sx2*sint**2 + sy2*cost**2) hmat[j][k] += (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)**2 hmat[j][k] *= model/ (sx2**2*sy2**2) k += 1 if yo_var: # H(xo,yo)/G = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*sin(2*t)/2 - (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4) hmat[j][k] = sx2*sy2*(sx2 - sy2)*sin2t/2 hmat[j][k] -= (sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost)*(sx2*(xsin -ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model / (sx**4*sy**4) k += 1 if sx_var: # H(xo,sx) = ((x - xo)*cos(t) + (y - yo)*sin(t))*(-2.0*sx**2*sy**2*cos(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**5*sy**2) hmat[j][k] = (xcos + ysin) hmat[j][k] *= -2*sx2*sy2*cost + (xcos + ysin)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost) hmat[j][k] *= model / (sx**5*sy2) k += 1 if sy_var: # H(xo,sy) = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(-2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx2*sy**5) hmat[j][k] = (xsin - ycos) hmat[j][k] *= -2*sx2*sy2*sint + (xsin - ycos)*(sx2*(xsin - ycos)*sint + sy2*(xcos + ysin)*cost) hmat[j][k] *= model/(sx2*sy**5) k += 1 if theta_var: # H(xo,t) = 1.0*(sx**2*sy**2*(sx**2 - sy**2)*(x*sin(2*t) - xo*sin(2*t) - y*cos(2*t) + yo*cos(2*t)) + (-sx**2 + 1.0*sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*sin(t) + sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*cos(t)))/(sx**4*sy**4) # second part hmat[j][k] = (sy2-sx2)*(xsin - ycos)*(xcos + ysin) hmat[j][k] *= sx2*(xsin -ycos)*sint + sy2*(xcos + ysin)*cost # first part hmat[j][k] += sx2*sy2*(sx2 - sy2)*(xxo*sin2t -yyo*cos2t) hmat[j][k] *= model/(sx**4*sy**4) # k += 1 j += 1 if yo_var: k = npvar if amp_var: # H(yo,amp)/G = H(amp,yo) hmat[j][k] = hmat[0][2] k += 1 if xo_var: # H(yo,xo)/G = H(xo,yo)/G hmat[j][k] =hmat[1][2] k += 1 # if yo_var: # H(yo,yo)/G = 1.0*(-sx**2*sy**2*(sx**2*cos(t)**2 + sy**2*sin(t)**2) + (sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t))**2)/(sx**4*sy**4) hmat[j][k] = (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint)**2 / (sx2**2*sy2**2) hmat[j][k] -= cost**2/sy2 + sint**2/sx2 hmat[j][k] *= model k += 1 if sx_var: # H(yo,sx)/G = -((x - xo)*cos(t) + (y - yo)*sin(t))*(2.0*sx**2*sy**2*sin(t) + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) - (y - yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**5*sy**2) hmat[j][k] = -1*(xcos + ysin) hmat[j][k] *= 2*sx2*sy2*sint + (xcos + ysin)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model/(sx**5*sy2) k += 1 if sy_var: # H(yo,sy)/G = ((x - xo)*sin(t) + (-y + yo)*cos(t))*(2.0*sx**2*sy**2*cos(t) - 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**2*sy**5) hmat[j][k] = (xsin -ycos) hmat[j][k] *= 2*sx2*sy2*cost - (xsin - ycos)*(sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] *= model/(sx2*sy**5) k += 1 if theta_var: # H(yo,t)/G = 1.0*(sx**2*sy**2*(sx**2*(-x*cos(2*t) + xo*cos(2*t) - y*sin(2*t) + yo*sin(2*t)) + sy**2*(x*cos(2*t) - xo*cos(2*t) + y*sin(2*t) - yo*sin(2*t))) + (1.0*sx**2 - sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))*(sx**2*((x - xo)*sin(t) + (-y + yo)*cos(t))*cos(t) - sy**2*((x - xo)*cos(t) + (y - yo)*sin(t))*sin(t)))/(sx**4*sy**4) hmat[j][k] = (sx2 - sy2)*(xsin - ycos)*(xcos + ysin) hmat[j][k] *= (sx2*(xsin - ycos)*cost - sy2*(xcos + ysin)*sint) hmat[j][k] += sx2*sy2*(sx2-sy2)*(-x*cos2t + xo*cos2t - y*sin2t + yo*sin2t) hmat[j][k] *= model/(sx**4*sy**4) # k += 1 j += 1 if sx_var: k = npvar if amp_var: # H(sx,amp)/G = H(amp,sx)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(sx,xo)/G = H(xo,sx)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(sx,yo)/G = H(yo/sx)/G hmat[j][k] = hmat[k][j] k += 1 # if sx_var: # H(sx,sx)/G = (-3.0*sx**2 + 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2/sx**6 hmat[j][k] = -3*sx2 + (xcos + ysin)**2 hmat[j][k] *= (xcos + ysin)**2 hmat[j][k] *= model/sx**6 k += 1 if sy_var: # H(sx,sy)/G = 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2/(sx**3*sy**3) hmat[j][k] = (xsin - ycos)**2 * (xcos + ysin)**2 hmat[j][k] *= model/(sx**3*sy**3) k += 1 if theta_var: # H(sx,t)/G = (-2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*cos(t) + (y - yo)*sin(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**5*sy**2) hmat[j][k] = -2*sx2*sy2 + (sy2 - sx2)*(xcos + ysin)**2 hmat[j][k] *= (xsin -ycos)*(xcos + ysin) hmat[j][k] *= model/(sx**5*sy**2) # k += 1 j += 1 if sy_var: k = npvar if amp_var: # H(sy,amp)/G = H(amp,sy)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(sy,xo)/G = H(xo,sy)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(sy,yo)/G = H(yo/sy)/G hmat[j][k] = hmat[k][j] k += 1 if sx_var: # H(sy,sx)/G = H(sx,sy)/G hmat[j][k] = hmat[k][j] k += 1 # if sy_var: # H(sy,sy)/G = (-3.0*sy**2 + 1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2/sy**6 hmat[j][k] = -3*sy2 + (xsin - ycos)**2 hmat[j][k] *= (xsin - ycos)**2 hmat[j][k] *= model/sy**6 k += 1 if theta_var: # H(sy,t)/G = (2.0*sx**2*sy**2 + 1.0*(-sx**2 + sy**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))**2)*((x - xo)*sin(t) + (-y + yo)*cos(t))*((x - xo)*cos(t) + (y - yo)*sin(t))/(sx**2*sy**5) hmat[j][k] = 2*sx2*sy2 + (sy2 - sx2)*(xsin - ycos)**2 hmat[j][k] *= (xsin - ycos)*(xcos + ysin) hmat[j][k] *= model/(sx**2*sy**5) # k += 1 j += 1 if theta_var: k = npvar if amp_var: # H(t,amp)/G = H(amp,t)/G hmat[j][k] = hmat[k][j] k += 1 if xo_var: # H(t,xo)/G = H(xo,t)/G hmat[j][k] = hmat[k][j] k += 1 if yo_var: # H(t,yo)/G = H(yo/t)/G hmat[j][k] = hmat[k][j] k += 1 if sx_var: # H(t,sx)/G = H(sx,t)/G hmat[j][k] = hmat[k][j] k += 1 if sy_var: # H(t,sy)/G = H(sy,t)/G hmat[j][k] = hmat[k][j] k += 1 # if theta_var: # H(t,t)/G = (sx**2*sy**2*(sx**2*(((x - xo)*sin(t) + (-y + yo)*cos(t))**2 - 1.0*((x - xo)*cos(t) + (y - yo)*sin(t))**2) + sy**2*(-1.0*((x - xo)*sin(t) + (-y + yo)*cos(t))**2 + ((x - xo)*cos(t) + (y - yo)*sin(t))**2)) + (sx**2 - 1.0*sy**2)**2*((x - xo)*sin(t) + (-y + yo)*cos(t))**2*((x - xo)*cos(t) + (y - yo)*sin(t))**2)/(sx**4*sy**4) hmat[j][k] = sx2*sy2 hmat[j][k] *= sx2*((xsin - ycos)**2 - (xcos + ysin)**2) + sy2*((xcos + ysin)**2 - (xsin - ycos)**2) hmat[j][k] += (sx2 - sy2)**2*(xsin - ycos)**2*(xcos + ysin)**2 hmat[j][k] *= model/(sx**4*sy**4) # j += 1 # save the number of variables for the next iteration # as we need to start our indexing at this number npvar = k return np.array(hmat)
[ "def", "hessian", "(", "pars", ",", "x", ",", "y", ")", ":", "j", "=", "0", "# keeping track of the number of variable parameters", "# total number of variable parameters", "ntvar", "=", "np", ".", "sum", "(", "[", "pars", "[", "k", "]", ".", "vary", "for", ...
Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) See Also -------- :func:`AegeanTools.fitting.emp_hessian`
[ "Create", "a", "hessian", "matrix", "corresponding", "to", "the", "source", "model", "pars", "Only", "parameters", "that", "vary", "will", "contribute", "to", "the", "hessian", ".", "Thus", "there", "will", "be", "a", "total", "of", "nvar", "x", "nvar", "e...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L282-L572
train
PaulHancock/Aegean
AegeanTools/fitting.py
emp_hessian
def emp_hessian(pars, x, y): """ Calculate the hessian matrix empirically. Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) Notes ----- Uses :func:`AegeanTools.fitting.emp_jacobian` to calculate the first order derivatives. See Also -------- :func:`AegeanTools.fitting.hessian` """ eps = 1e-5 matrix = [] for i in range(pars['components'].value): model = emp_jacobian(pars, x, y) prefix = "c{0}_".format(i) for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: if pars[prefix+p].vary: pars[prefix+p].value += eps dm2didj = emp_jacobian(pars, x, y) - model matrix.append(dm2didj/eps) pars[prefix+p].value -= eps matrix = np.array(matrix) return matrix
python
def emp_hessian(pars, x, y): """ Calculate the hessian matrix empirically. Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) Notes ----- Uses :func:`AegeanTools.fitting.emp_jacobian` to calculate the first order derivatives. See Also -------- :func:`AegeanTools.fitting.hessian` """ eps = 1e-5 matrix = [] for i in range(pars['components'].value): model = emp_jacobian(pars, x, y) prefix = "c{0}_".format(i) for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: if pars[prefix+p].vary: pars[prefix+p].value += eps dm2didj = emp_jacobian(pars, x, y) - model matrix.append(dm2didj/eps) pars[prefix+p].value -= eps matrix = np.array(matrix) return matrix
[ "def", "emp_hessian", "(", "pars", ",", "x", ",", "y", ")", ":", "eps", "=", "1e-5", "matrix", "=", "[", "]", "for", "i", "in", "range", "(", "pars", "[", "'components'", "]", ".", "value", ")", ":", "model", "=", "emp_jacobian", "(", "pars", ","...
Calculate the hessian matrix empirically. Create a hessian matrix corresponding to the source model 'pars' Only parameters that vary will contribute to the hessian. Thus there will be a total of nvar x nvar entries, each of which is a len(x) x len(y) array. Parameters ---------- pars : lmfit.Parameters The model x, y : list locations at which to evaluate the Hessian Returns ------- h : np.array Hessian. Shape will be (nvar, nvar, len(x), len(y)) Notes ----- Uses :func:`AegeanTools.fitting.emp_jacobian` to calculate the first order derivatives. See Also -------- :func:`AegeanTools.fitting.hessian`
[ "Calculate", "the", "hessian", "matrix", "empirically", ".", "Create", "a", "hessian", "matrix", "corresponding", "to", "the", "source", "model", "pars", "Only", "parameters", "that", "vary", "will", "contribute", "to", "the", "hessian", ".", "Thus", "there", ...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L575-L615
train
PaulHancock/Aegean
AegeanTools/fitting.py
nan_acf
def nan_acf(noise): """ Calculate the autocorrelation function of the noise where the noise is a 2d array that may contain nans Parameters ---------- noise : 2d-array Noise image. Returns ------- acf : 2d-array The ACF. """ corr = np.zeros(noise.shape) ix,jx = noise.shape for i in range(ix): si_min = slice(i, None, None) si_max = slice(None, ix-i, None) for j in range(jx): sj_min = slice(j, None, None) sj_max = slice(None, jx-j, None) if np.all(np.isnan(noise[si_min, sj_min])) or np.all(np.isnan(noise[si_max, sj_max])): corr[i, j] = np.nan else: corr[i, j] = np.nansum(noise[si_min, sj_min] * noise[si_max, sj_max]) # return the normalised acf return corr / np.nanmax(corr)
python
def nan_acf(noise): """ Calculate the autocorrelation function of the noise where the noise is a 2d array that may contain nans Parameters ---------- noise : 2d-array Noise image. Returns ------- acf : 2d-array The ACF. """ corr = np.zeros(noise.shape) ix,jx = noise.shape for i in range(ix): si_min = slice(i, None, None) si_max = slice(None, ix-i, None) for j in range(jx): sj_min = slice(j, None, None) sj_max = slice(None, jx-j, None) if np.all(np.isnan(noise[si_min, sj_min])) or np.all(np.isnan(noise[si_max, sj_max])): corr[i, j] = np.nan else: corr[i, j] = np.nansum(noise[si_min, sj_min] * noise[si_max, sj_max]) # return the normalised acf return corr / np.nanmax(corr)
[ "def", "nan_acf", "(", "noise", ")", ":", "corr", "=", "np", ".", "zeros", "(", "noise", ".", "shape", ")", "ix", ",", "jx", "=", "noise", ".", "shape", "for", "i", "in", "range", "(", "ix", ")", ":", "si_min", "=", "slice", "(", "i", ",", "N...
Calculate the autocorrelation function of the noise where the noise is a 2d array that may contain nans Parameters ---------- noise : 2d-array Noise image. Returns ------- acf : 2d-array The ACF.
[ "Calculate", "the", "autocorrelation", "function", "of", "the", "noise", "where", "the", "noise", "is", "a", "2d", "array", "that", "may", "contain", "nans" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L618-L647
train
PaulHancock/Aegean
AegeanTools/fitting.py
make_ita
def make_ita(noise, acf=None): """ Create the matrix ita of the noise where the noise may be a masked array where ita(x,y) is the correlation between pixel pairs that have the same separation as x and y. Parameters ---------- noise : 2d-array The noise image acf : 2d-array The autocorrelation matrix. (None = calculate from data). Default = None. Returns ------- ita : 2d-array The matrix ita """ if acf is None: acf = nan_acf(noise) # s should be the number of non-masked pixels s = np.count_nonzero(np.isfinite(noise)) # the indices of the non-masked pixels xm, ym = np.where(np.isfinite(noise)) ita = np.zeros((s, s)) # iterate over the pixels for i, (x1, y1) in enumerate(zip(xm, ym)): for j, (x2, y2) in enumerate(zip(xm, ym)): k = abs(x1-x2) l = abs(y1-y2) ita[i, j] = acf[k, l] return ita
python
def make_ita(noise, acf=None): """ Create the matrix ita of the noise where the noise may be a masked array where ita(x,y) is the correlation between pixel pairs that have the same separation as x and y. Parameters ---------- noise : 2d-array The noise image acf : 2d-array The autocorrelation matrix. (None = calculate from data). Default = None. Returns ------- ita : 2d-array The matrix ita """ if acf is None: acf = nan_acf(noise) # s should be the number of non-masked pixels s = np.count_nonzero(np.isfinite(noise)) # the indices of the non-masked pixels xm, ym = np.where(np.isfinite(noise)) ita = np.zeros((s, s)) # iterate over the pixels for i, (x1, y1) in enumerate(zip(xm, ym)): for j, (x2, y2) in enumerate(zip(xm, ym)): k = abs(x1-x2) l = abs(y1-y2) ita[i, j] = acf[k, l] return ita
[ "def", "make_ita", "(", "noise", ",", "acf", "=", "None", ")", ":", "if", "acf", "is", "None", ":", "acf", "=", "nan_acf", "(", "noise", ")", "# s should be the number of non-masked pixels", "s", "=", "np", ".", "count_nonzero", "(", "np", ".", "isfinite",...
Create the matrix ita of the noise where the noise may be a masked array where ita(x,y) is the correlation between pixel pairs that have the same separation as x and y. Parameters ---------- noise : 2d-array The noise image acf : 2d-array The autocorrelation matrix. (None = calculate from data). Default = None. Returns ------- ita : 2d-array The matrix ita
[ "Create", "the", "matrix", "ita", "of", "the", "noise", "where", "the", "noise", "may", "be", "a", "masked", "array", "where", "ita", "(", "x", "y", ")", "is", "the", "correlation", "between", "pixel", "pairs", "that", "have", "the", "same", "separation"...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L650-L682
train
PaulHancock/Aegean
AegeanTools/fitting.py
RB_bias
def RB_bias(data, pars, ita=None, acf=None): """ Calculate the expected bias on each of the parameters in the model pars. Only parameters that are allowed to vary will have a bias. Calculation follows the description of Refrieger & Brown 1998 (cite). Parameters ---------- data : 2d-array data that was fit pars : lmfit.Parameters The model ita : 2d-array The ita matrix (optional). acf : 2d-array The acf for the data. Returns ------- bias : array The bias on each of the parameters """ log.info("data {0}".format(data.shape)) nparams = np.sum([pars[k].vary for k in pars.keys() if k != 'components']) # masked pixels xm, ym = np.where(np.isfinite(data)) # all pixels x, y = np.indices(data.shape) # Create the jacobian as an AxN array accounting for the masked pixels j = np.array(np.vsplit(lmfit_jacobian(pars, xm, ym).T, nparams)).reshape(nparams, -1) h = hessian(pars, x, y) # mask the hessian to be AxAxN array h = h[:, :, xm, ym] Hij = np.einsum('ik,jk', j, j) Dij = np.linalg.inv(Hij) Bijk = np.einsum('ip,jkp', j, h) Eilkm = np.einsum('il,km', Dij, Dij) Cimn_1 = -1 * np.einsum('krj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn_2 = -1./2 * np.einsum('rkj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn = Cimn_1 + Cimn_2 if ita is None: # N is the noise (data-model) N = data - ntwodgaussian_lmfit(pars)(x, y) if acf is None: acf = nan_acf(N) ita = make_ita(N, acf=acf) log.info('acf.shape {0}'.format(acf.shape)) log.info('acf[0] {0}'.format(acf[0])) log.info('ita.shape {0}'.format(ita.shape)) log.info('ita[0] {0}'.format(ita[0])) # Included for completeness but not required # now mask/ravel the noise # N = N[np.isfinite(N)].ravel() # Pi = np.einsum('ip,p', j, N) # Qij = np.einsum('ijp,p', h, N) Vij = np.einsum('ip,jq,pq', j, j, ita) Uijk = np.einsum('ip,jkq,pq', j, h, ita) bias_1 = np.einsum('imn, mn', Cimn, Vij) bias_2 = np.einsum('ilkm, mlk', Eilkm, Uijk) bias = bias_1 + bias_2 log.info('bias {0}'.format(bias)) return bias
python
def RB_bias(data, pars, ita=None, acf=None): """ Calculate the expected bias on each of the parameters in the model pars. Only parameters that are allowed to vary will have a bias. Calculation follows the description of Refrieger & Brown 1998 (cite). Parameters ---------- data : 2d-array data that was fit pars : lmfit.Parameters The model ita : 2d-array The ita matrix (optional). acf : 2d-array The acf for the data. Returns ------- bias : array The bias on each of the parameters """ log.info("data {0}".format(data.shape)) nparams = np.sum([pars[k].vary for k in pars.keys() if k != 'components']) # masked pixels xm, ym = np.where(np.isfinite(data)) # all pixels x, y = np.indices(data.shape) # Create the jacobian as an AxN array accounting for the masked pixels j = np.array(np.vsplit(lmfit_jacobian(pars, xm, ym).T, nparams)).reshape(nparams, -1) h = hessian(pars, x, y) # mask the hessian to be AxAxN array h = h[:, :, xm, ym] Hij = np.einsum('ik,jk', j, j) Dij = np.linalg.inv(Hij) Bijk = np.einsum('ip,jkp', j, h) Eilkm = np.einsum('il,km', Dij, Dij) Cimn_1 = -1 * np.einsum('krj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn_2 = -1./2 * np.einsum('rkj,ir,km,jn', Bijk, Dij, Dij, Dij) Cimn = Cimn_1 + Cimn_2 if ita is None: # N is the noise (data-model) N = data - ntwodgaussian_lmfit(pars)(x, y) if acf is None: acf = nan_acf(N) ita = make_ita(N, acf=acf) log.info('acf.shape {0}'.format(acf.shape)) log.info('acf[0] {0}'.format(acf[0])) log.info('ita.shape {0}'.format(ita.shape)) log.info('ita[0] {0}'.format(ita[0])) # Included for completeness but not required # now mask/ravel the noise # N = N[np.isfinite(N)].ravel() # Pi = np.einsum('ip,p', j, N) # Qij = np.einsum('ijp,p', h, N) Vij = np.einsum('ip,jq,pq', j, j, ita) Uijk = np.einsum('ip,jkq,pq', j, h, ita) bias_1 = np.einsum('imn, mn', Cimn, Vij) bias_2 = np.einsum('ilkm, mlk', Eilkm, Uijk) bias = bias_1 + bias_2 log.info('bias {0}'.format(bias)) return bias
[ "def", "RB_bias", "(", "data", ",", "pars", ",", "ita", "=", "None", ",", "acf", "=", "None", ")", ":", "log", ".", "info", "(", "\"data {0}\"", ".", "format", "(", "data", ".", "shape", ")", ")", "nparams", "=", "np", ".", "sum", "(", "[", "pa...
Calculate the expected bias on each of the parameters in the model pars. Only parameters that are allowed to vary will have a bias. Calculation follows the description of Refrieger & Brown 1998 (cite). Parameters ---------- data : 2d-array data that was fit pars : lmfit.Parameters The model ita : 2d-array The ita matrix (optional). acf : 2d-array The acf for the data. Returns ------- bias : array The bias on each of the parameters
[ "Calculate", "the", "expected", "bias", "on", "each", "of", "the", "parameters", "in", "the", "model", "pars", ".", "Only", "parameters", "that", "are", "allowed", "to", "vary", "will", "have", "a", "bias", ".", "Calculation", "follows", "the", "description"...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L685-L757
train
PaulHancock/Aegean
AegeanTools/fitting.py
bias_correct
def bias_correct(params, data, acf=None): """ Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias` """ bias = RB_bias(data, params, acf=acf) i = 0 for p in params: if 'theta' in p: continue if params[p].vary: params[p].value -= bias[i] i += 1 return
python
def bias_correct(params, data, acf=None): """ Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias` """ bias = RB_bias(data, params, acf=acf) i = 0 for p in params: if 'theta' in p: continue if params[p].vary: params[p].value -= bias[i] i += 1 return
[ "def", "bias_correct", "(", "params", ",", "data", ",", "acf", "=", "None", ")", ":", "bias", "=", "RB_bias", "(", "data", ",", "params", ",", "acf", "=", "acf", ")", "i", "=", "0", "for", "p", "in", "params", ":", "if", "'theta'", "in", "p", "...
Calculate and apply a bias correction to the given fit parameters Parameters ---------- params : lmfit.Parameters The model parameters. These will be modified. data : 2d-array The data which was used in the fitting acf : 2d-array ACF of the data. Default = None. Returns ------- None See Also -------- :func:`AegeanTools.fitting.RB_bias`
[ "Calculate", "and", "apply", "a", "bias", "correction", "to", "the", "given", "fit", "parameters" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L760-L792
train
PaulHancock/Aegean
AegeanTools/fitting.py
condon_errors
def condon_errors(source, theta_n, psf=None): """ Calculate the parameter errors for a fitted source using the description of Condon'97 All parameters are assigned errors, assuming that all params were fit. If some params were held fixed then these errors are overestimated. Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. theta_n : float or None A measure of the beam sampling. (See Condon'97). psf : :class:`AegeanTools.fits_image.Beam` The psf at the location of the source. Returns ------- None """ # indices for the calculation or rho alphas = {'amp': (3. / 2, 3. / 2), 'major': (5. / 2, 1. / 2), 'xo': (5. / 2, 1. / 2), 'minor': (1. / 2, 5. / 2), 'yo': (1. / 2, 5. / 2), 'pa': (1. / 2, 5. / 2)} major = source.a / 3600. # degrees minor = source.b / 3600. # degrees phi = np.radians(source.pa) # radians if psf is not None: beam = psf.get_beam(source.ra, source.dec) if beam is not None: theta_n = np.hypot(beam.a, beam.b) print(beam, theta_n) if theta_n is None: source.err_a = source.err_b = source.err_peak_flux = source.err_pa = source.err_int_flux = 0.0 return smoothing = major * minor / (theta_n ** 2) factor1 = (1 + (major / theta_n)) factor2 = (1 + (minor / theta_n)) snr = source.peak_flux / source.local_rms # calculation of rho2 depends on the parameter being used so we lambda this into a function rho2 = lambda x: smoothing / 4 * factor1 ** alphas[x][0] * factor2 ** alphas[x][1] * snr ** 2 source.err_peak_flux = source.peak_flux * np.sqrt(2 / rho2('amp')) source.err_a = major * np.sqrt(2 / rho2('major')) * 3600. # arcsec source.err_b = minor * np.sqrt(2 / rho2('minor')) * 3600. # arcsec err_xo2 = 2. / rho2('xo') * major ** 2 / (8 * np.log(2)) # Condon'97 eq 21 err_yo2 = 2. / rho2('yo') * minor ** 2 / (8 * np.log(2)) source.err_ra = np.sqrt(err_xo2 * np.sin(phi)**2 + err_yo2 * np.cos(phi)**2) source.err_dec = np.sqrt(err_xo2 * np.cos(phi)**2 + err_yo2 * np.sin(phi)**2) if (major == 0) or (minor == 0): source.err_pa = ERR_MASK # if major/minor are very similar then we should not be able to figure out what pa is. elif abs(2 * (major-minor) / (major+minor)) < 0.01: source.err_pa = ERR_MASK else: source.err_pa = np.degrees(np.sqrt(4 / rho2('pa')) * (major * minor / (major ** 2 - minor ** 2))) # integrated flux error err2 = (source.err_peak_flux / source.peak_flux) ** 2 err2 += (theta_n ** 2 / (major * minor)) * ((source.err_a / source.a) ** 2 + (source.err_b / source.b) ** 2) source.err_int_flux = source.int_flux * np.sqrt(err2) return
python
def condon_errors(source, theta_n, psf=None): """ Calculate the parameter errors for a fitted source using the description of Condon'97 All parameters are assigned errors, assuming that all params were fit. If some params were held fixed then these errors are overestimated. Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. theta_n : float or None A measure of the beam sampling. (See Condon'97). psf : :class:`AegeanTools.fits_image.Beam` The psf at the location of the source. Returns ------- None """ # indices for the calculation or rho alphas = {'amp': (3. / 2, 3. / 2), 'major': (5. / 2, 1. / 2), 'xo': (5. / 2, 1. / 2), 'minor': (1. / 2, 5. / 2), 'yo': (1. / 2, 5. / 2), 'pa': (1. / 2, 5. / 2)} major = source.a / 3600. # degrees minor = source.b / 3600. # degrees phi = np.radians(source.pa) # radians if psf is not None: beam = psf.get_beam(source.ra, source.dec) if beam is not None: theta_n = np.hypot(beam.a, beam.b) print(beam, theta_n) if theta_n is None: source.err_a = source.err_b = source.err_peak_flux = source.err_pa = source.err_int_flux = 0.0 return smoothing = major * minor / (theta_n ** 2) factor1 = (1 + (major / theta_n)) factor2 = (1 + (minor / theta_n)) snr = source.peak_flux / source.local_rms # calculation of rho2 depends on the parameter being used so we lambda this into a function rho2 = lambda x: smoothing / 4 * factor1 ** alphas[x][0] * factor2 ** alphas[x][1] * snr ** 2 source.err_peak_flux = source.peak_flux * np.sqrt(2 / rho2('amp')) source.err_a = major * np.sqrt(2 / rho2('major')) * 3600. # arcsec source.err_b = minor * np.sqrt(2 / rho2('minor')) * 3600. # arcsec err_xo2 = 2. / rho2('xo') * major ** 2 / (8 * np.log(2)) # Condon'97 eq 21 err_yo2 = 2. / rho2('yo') * minor ** 2 / (8 * np.log(2)) source.err_ra = np.sqrt(err_xo2 * np.sin(phi)**2 + err_yo2 * np.cos(phi)**2) source.err_dec = np.sqrt(err_xo2 * np.cos(phi)**2 + err_yo2 * np.sin(phi)**2) if (major == 0) or (minor == 0): source.err_pa = ERR_MASK # if major/minor are very similar then we should not be able to figure out what pa is. elif abs(2 * (major-minor) / (major+minor)) < 0.01: source.err_pa = ERR_MASK else: source.err_pa = np.degrees(np.sqrt(4 / rho2('pa')) * (major * minor / (major ** 2 - minor ** 2))) # integrated flux error err2 = (source.err_peak_flux / source.peak_flux) ** 2 err2 += (theta_n ** 2 / (major * minor)) * ((source.err_a / source.a) ** 2 + (source.err_b / source.b) ** 2) source.err_int_flux = source.int_flux * np.sqrt(err2) return
[ "def", "condon_errors", "(", "source", ",", "theta_n", ",", "psf", "=", "None", ")", ":", "# indices for the calculation or rho", "alphas", "=", "{", "'amp'", ":", "(", "3.", "/", "2", ",", "3.", "/", "2", ")", ",", "'major'", ":", "(", "5.", "/", "2...
Calculate the parameter errors for a fitted source using the description of Condon'97 All parameters are assigned errors, assuming that all params were fit. If some params were held fixed then these errors are overestimated. Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. theta_n : float or None A measure of the beam sampling. (See Condon'97). psf : :class:`AegeanTools.fits_image.Beam` The psf at the location of the source. Returns ------- None
[ "Calculate", "the", "parameter", "errors", "for", "a", "fitted", "source", "using", "the", "description", "of", "Condon", "97", "All", "parameters", "are", "assigned", "errors", "assuming", "that", "all", "params", "were", "fit", ".", "If", "some", "params", ...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L795-L868
train
PaulHancock/Aegean
AegeanTools/fitting.py
errors
def errors(source, model, wcshelper): """ Convert pixel based errors into sky coord errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct. """ # if the source wasn't fit then all errors are -1 if source.flags & (flags.NOTFIT | flags.FITERR): source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # copy the errors from the model prefix = "c{0}_".format(source.source) err_amp = model[prefix + 'amp'].stderr xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value err_xo = model[prefix + 'xo'].stderr err_yo = model[prefix + 'yo'].stderr sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value err_sx = model[prefix + 'sx'].stderr err_sy = model[prefix + 'sy'].stderr theta = model[prefix + 'theta'].value err_theta = model[prefix + 'theta'].stderr source.err_peak_flux = err_amp pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta] log.debug("Pix errs: {0}".format(pix_errs)) ref = wcshelper.pix2sky([xo, yo]) # check to see if the reference position has a valid WCS coordinate # It is possible for this to fail, even if the ra/dec conversion works elsewhere if not all(np.isfinite(ref)): source.flags |= flags.WCSERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # position errors if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary \ and all(np.isfinite([err_xo, err_yo])): offset = wcshelper.pix2sky([xo + err_xo, yo + err_yo]) source.err_ra = gcd(ref[0], ref[1], offset[0], ref[1]) source.err_dec = gcd(ref[0], ref[1], ref[0], offset[1]) else: source.err_ra = source.err_dec = -1 if model[prefix + 'theta'].vary and np.isfinite(err_theta): # pa error off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) off2 = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + err_theta)), yo + sy * np.sin(np.radians(theta + err_theta))]) source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1])) else: source.err_pa = ERR_MASK if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary \ and all(np.isfinite([err_sx, err_sy])): # major axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) offset = wcshelper.pix2sky( [xo + (sx + err_sx) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) source.err_a = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600 # minor axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))]) offset = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + err_sy) * np.sin(np.radians(theta + 90))]) source.err_b = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600 else: source.err_a = source.err_b = ERR_MASK sqerr = 0 sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0 sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0 sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0 if sqerr == 0: source.err_int_flux = ERR_MASK else: source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr)) return source
python
def errors(source, model, wcshelper): """ Convert pixel based errors into sky coord errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct. """ # if the source wasn't fit then all errors are -1 if source.flags & (flags.NOTFIT | flags.FITERR): source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # copy the errors from the model prefix = "c{0}_".format(source.source) err_amp = model[prefix + 'amp'].stderr xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value err_xo = model[prefix + 'xo'].stderr err_yo = model[prefix + 'yo'].stderr sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value err_sx = model[prefix + 'sx'].stderr err_sy = model[prefix + 'sy'].stderr theta = model[prefix + 'theta'].value err_theta = model[prefix + 'theta'].stderr source.err_peak_flux = err_amp pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta] log.debug("Pix errs: {0}".format(pix_errs)) ref = wcshelper.pix2sky([xo, yo]) # check to see if the reference position has a valid WCS coordinate # It is possible for this to fail, even if the ra/dec conversion works elsewhere if not all(np.isfinite(ref)): source.flags |= flags.WCSERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # position errors if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary \ and all(np.isfinite([err_xo, err_yo])): offset = wcshelper.pix2sky([xo + err_xo, yo + err_yo]) source.err_ra = gcd(ref[0], ref[1], offset[0], ref[1]) source.err_dec = gcd(ref[0], ref[1], ref[0], offset[1]) else: source.err_ra = source.err_dec = -1 if model[prefix + 'theta'].vary and np.isfinite(err_theta): # pa error off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) off2 = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + err_theta)), yo + sy * np.sin(np.radians(theta + err_theta))]) source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1])) else: source.err_pa = ERR_MASK if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary \ and all(np.isfinite([err_sx, err_sy])): # major axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) offset = wcshelper.pix2sky( [xo + (sx + err_sx) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) source.err_a = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600 # minor axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))]) offset = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + err_sy) * np.sin(np.radians(theta + 90))]) source.err_b = gcd(ref[0], ref[1], offset[0], offset[1]) * 3600 else: source.err_a = source.err_b = ERR_MASK sqerr = 0 sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0 sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0 sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0 if sqerr == 0: source.err_int_flux = ERR_MASK else: source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr)) return source
[ "def", "errors", "(", "source", ",", "model", ",", "wcshelper", ")", ":", "# if the source wasn't fit then all errors are -1", "if", "source", ".", "flags", "&", "(", "flags", ".", "NOTFIT", "|", "flags", ".", "FITERR", ")", ":", "source", ".", "err_peak_flux"...
Convert pixel based errors into sky coord errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct.
[ "Convert", "pixel", "based", "errors", "into", "sky", "coord", "errors" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L871-L969
train
PaulHancock/Aegean
AegeanTools/fitting.py
new_errors
def new_errors(source, model, wcshelper): # pragma: no cover """ Convert pixel based errors into sky coord errors Uses covariance matrix for ra/dec errors and calculus approach to a/b/pa errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct. """ # if the source wasn't fit then all errors are -1 if source.flags & (flags.NOTFIT | flags.FITERR): source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # copy the errors/values from the model prefix = "c{0}_".format(source.source) err_amp = model[prefix + 'amp'].stderr xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value err_xo = model[prefix + 'xo'].stderr err_yo = model[prefix + 'yo'].stderr sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value err_sx = model[prefix + 'sx'].stderr err_sy = model[prefix + 'sy'].stderr theta = model[prefix + 'theta'].value err_theta = model[prefix + 'theta'].stderr # the peak flux error doesn't need to be converted, just copied source.err_peak_flux = err_amp pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta] # check for inf/nan errors -> these sources have poor fits. if not all(a is not None and np.isfinite(a) for a in pix_errs): source.flags |= flags.FITERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # calculate the reference coordinate ref = wcshelper.pix2sky([xo, yo]) # check to see if the reference position has a valid WCS coordinate # It is possible for this to fail, even if the ra/dec conversion works elsewhere if not all(np.isfinite(ref)): source.flags |= flags.WCSERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # calculate position errors by transforming the error ellipse if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary: # determine the error ellipse from the Jacobian mat = model.covar[1:3, 1:3] if not(np.all(np.isfinite(mat))): source.err_ra = source.err_dec = ERR_MASK else: (a, b), e = np.linalg.eig(mat) pa = np.degrees(np.arctan2(*e[0])) # transform this ellipse into sky coordinates _, _, major, minor, pa = wcshelper.pix2sky_ellipse([xo, yo], a, b, pa) # now determine the radius of the ellipse along the ra/dec directions. source.err_ra = major*minor / np.hypot(major*np.sin(np.radians(pa)), minor*np.cos(np.radians(pa))) source.err_dec = major*minor / np.hypot(major*np.cos(np.radians(pa)), minor*np.sin(np.radians(pa))) else: source.err_ra = source.err_dec = -1 if model[prefix + 'theta'].vary: # pa error off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) # offset by 1 degree off2 = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 1)), yo + sy * np.sin(np.radians(theta + 1))]) # scale the initial theta error by this amount source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1])) * err_theta else: source.err_pa = ERR_MASK if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary: # major axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) # offset by 0.1 pixels offset = wcshelper.pix2sky( [xo + (sx + 0.1) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) source.err_a = gcd(ref[0], ref[1], offset[0], offset[1])/0.1 * err_sx * 3600 # minor axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))]) # offset by 0.1 pixels offset = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + 0.1) * np.sin(np.radians(theta + 90))]) source.err_b = gcd(ref[0], ref[1], offset[0], offset[1])/0.1*err_sy * 3600 else: source.err_a = source.err_b = ERR_MASK sqerr = 0 sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0 sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0 sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0 source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr)) return source
python
def new_errors(source, model, wcshelper): # pragma: no cover """ Convert pixel based errors into sky coord errors Uses covariance matrix for ra/dec errors and calculus approach to a/b/pa errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct. """ # if the source wasn't fit then all errors are -1 if source.flags & (flags.NOTFIT | flags.FITERR): source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # copy the errors/values from the model prefix = "c{0}_".format(source.source) err_amp = model[prefix + 'amp'].stderr xo, yo = model[prefix + 'xo'].value, model[prefix + 'yo'].value err_xo = model[prefix + 'xo'].stderr err_yo = model[prefix + 'yo'].stderr sx, sy = model[prefix + 'sx'].value, model[prefix + 'sy'].value err_sx = model[prefix + 'sx'].stderr err_sy = model[prefix + 'sy'].stderr theta = model[prefix + 'theta'].value err_theta = model[prefix + 'theta'].stderr # the peak flux error doesn't need to be converted, just copied source.err_peak_flux = err_amp pix_errs = [err_xo, err_yo, err_sx, err_sy, err_theta] # check for inf/nan errors -> these sources have poor fits. if not all(a is not None and np.isfinite(a) for a in pix_errs): source.flags |= flags.FITERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # calculate the reference coordinate ref = wcshelper.pix2sky([xo, yo]) # check to see if the reference position has a valid WCS coordinate # It is possible for this to fail, even if the ra/dec conversion works elsewhere if not all(np.isfinite(ref)): source.flags |= flags.WCSERR source.err_peak_flux = source.err_a = source.err_b = source.err_pa = ERR_MASK source.err_ra = source.err_dec = source.err_int_flux = ERR_MASK return source # calculate position errors by transforming the error ellipse if model[prefix + 'xo'].vary and model[prefix + 'yo'].vary: # determine the error ellipse from the Jacobian mat = model.covar[1:3, 1:3] if not(np.all(np.isfinite(mat))): source.err_ra = source.err_dec = ERR_MASK else: (a, b), e = np.linalg.eig(mat) pa = np.degrees(np.arctan2(*e[0])) # transform this ellipse into sky coordinates _, _, major, minor, pa = wcshelper.pix2sky_ellipse([xo, yo], a, b, pa) # now determine the radius of the ellipse along the ra/dec directions. source.err_ra = major*minor / np.hypot(major*np.sin(np.radians(pa)), minor*np.cos(np.radians(pa))) source.err_dec = major*minor / np.hypot(major*np.cos(np.radians(pa)), minor*np.sin(np.radians(pa))) else: source.err_ra = source.err_dec = -1 if model[prefix + 'theta'].vary: # pa error off1 = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) # offset by 1 degree off2 = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 1)), yo + sy * np.sin(np.radians(theta + 1))]) # scale the initial theta error by this amount source.err_pa = abs(bear(ref[0], ref[1], off1[0], off1[1]) - bear(ref[0], ref[1], off2[0], off2[1])) * err_theta else: source.err_pa = ERR_MASK if model[prefix + 'sx'].vary and model[prefix + 'sy'].vary: # major axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) # offset by 0.1 pixels offset = wcshelper.pix2sky( [xo + (sx + 0.1) * np.cos(np.radians(theta)), yo + sy * np.sin(np.radians(theta))]) source.err_a = gcd(ref[0], ref[1], offset[0], offset[1])/0.1 * err_sx * 3600 # minor axis error ref = wcshelper.pix2sky([xo + sx * np.cos(np.radians(theta + 90)), yo + sy * np.sin(np.radians(theta + 90))]) # offset by 0.1 pixels offset = wcshelper.pix2sky( [xo + sx * np.cos(np.radians(theta + 90)), yo + (sy + 0.1) * np.sin(np.radians(theta + 90))]) source.err_b = gcd(ref[0], ref[1], offset[0], offset[1])/0.1*err_sy * 3600 else: source.err_a = source.err_b = ERR_MASK sqerr = 0 sqerr += (source.err_peak_flux / source.peak_flux) ** 2 if source.err_peak_flux > 0 else 0 sqerr += (source.err_a / source.a) ** 2 if source.err_a > 0 else 0 sqerr += (source.err_b / source.b) ** 2 if source.err_b > 0 else 0 source.err_int_flux = abs(source.int_flux * np.sqrt(sqerr)) return source
[ "def", "new_errors", "(", "source", ",", "model", ",", "wcshelper", ")", ":", "# pragma: no cover", "# if the source wasn't fit then all errors are -1", "if", "source", ".", "flags", "&", "(", "flags", ".", "NOTFIT", "|", "flags", ".", "FITERR", ")", ":", "sourc...
Convert pixel based errors into sky coord errors Uses covariance matrix for ra/dec errors and calculus approach to a/b/pa errors Parameters ---------- source : :class:`AegeanTools.models.SimpleSource` The source which was fit. model : lmfit.Parameters The model which was fit. wcshelper : :class:`AegeanTools.wcs_helpers.WCSHelper` WCS information. Returns ------- source : :class:`AegeanTools.models.SimpleSource` The modified source obejct.
[ "Convert", "pixel", "based", "errors", "into", "sky", "coord", "errors", "Uses", "covariance", "matrix", "for", "ra", "/", "dec", "errors", "and", "calculus", "approach", "to", "a", "/", "b", "/", "pa", "errors" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L972-L1088
train
PaulHancock/Aegean
AegeanTools/fitting.py
ntwodgaussian_lmfit
def ntwodgaussian_lmfit(params): """ Convert an lmfit.Parameters object into a function which calculates the model. Parameters ---------- params : lmfit.Parameters Model parameters, can have multiple components. Returns ------- model : func A function f(x,y) that will compute the model. """ def rfunc(x, y): """ Compute the model given by params, at pixel coordinates x,y Parameters ---------- x, y : numpy.ndarray The x/y pixel coordinates at which the model is being evaluated Returns ------- result : numpy.ndarray Model """ result = None for i in range(params['components'].value): prefix = "c{0}_".format(i) # I hope this doesn't kill our run time amp = np.nan_to_num(params[prefix + 'amp'].value) xo = params[prefix + 'xo'].value yo = params[prefix + 'yo'].value sx = params[prefix + 'sx'].value sy = params[prefix + 'sy'].value theta = params[prefix + 'theta'].value if result is not None: result += elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) else: result = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) return result return rfunc
python
def ntwodgaussian_lmfit(params): """ Convert an lmfit.Parameters object into a function which calculates the model. Parameters ---------- params : lmfit.Parameters Model parameters, can have multiple components. Returns ------- model : func A function f(x,y) that will compute the model. """ def rfunc(x, y): """ Compute the model given by params, at pixel coordinates x,y Parameters ---------- x, y : numpy.ndarray The x/y pixel coordinates at which the model is being evaluated Returns ------- result : numpy.ndarray Model """ result = None for i in range(params['components'].value): prefix = "c{0}_".format(i) # I hope this doesn't kill our run time amp = np.nan_to_num(params[prefix + 'amp'].value) xo = params[prefix + 'xo'].value yo = params[prefix + 'yo'].value sx = params[prefix + 'sx'].value sy = params[prefix + 'sy'].value theta = params[prefix + 'theta'].value if result is not None: result += elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) else: result = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) return result return rfunc
[ "def", "ntwodgaussian_lmfit", "(", "params", ")", ":", "def", "rfunc", "(", "x", ",", "y", ")", ":", "\"\"\"\n Compute the model given by params, at pixel coordinates x,y\n\n Parameters\n ----------\n x, y : numpy.ndarray\n The x/y pixel coordinates...
Convert an lmfit.Parameters object into a function which calculates the model. Parameters ---------- params : lmfit.Parameters Model parameters, can have multiple components. Returns ------- model : func A function f(x,y) that will compute the model.
[ "Convert", "an", "lmfit", ".", "Parameters", "object", "into", "a", "function", "which", "calculates", "the", "model", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L1091-L1137
train
PaulHancock/Aegean
AegeanTools/fitting.py
do_lmfit
def do_lmfit(data, params, B=None, errs=None, dojac=True): """ Fit the model to the data data may contain 'flagged' or 'masked' data with the value of np.NaN Parameters ---------- data : 2d-array Image data params : lmfit.Parameters Initial model guess. B : 2d-array B matrix to be used in residual calculations. Default = None. errs : 1d-array dojac : bool If true then an analytic jacobian will be passed to the fitting routine. Returns ------- result : ? lmfit.minimize result. params : lmfit.Params Fitted model. See Also -------- :func:`AegeanTools.fitting.lmfit_jacobian` """ # copy the params so as not to change the initial conditions # in case we want to use them elsewhere params = copy.deepcopy(params) data = np.array(data) mask = np.where(np.isfinite(data)) def residual(params, **kwargs): """ The residual function required by lmfit Parameters ---------- params: lmfit.Params The parameters of the model being fit Returns ------- result : numpy.ndarray Model - Data """ f = ntwodgaussian_lmfit(params) # A function describing the model model = f(*mask) # The actual model if B is None: return model - data[mask] else: return (model - data[mask]).dot(B) if dojac: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}, Dfun=lmfit_jacobian) else: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}) # Remake the residual so that it is once again (model - data) if B is not None: result.residual = result.residual.dot(inv(B)) return result, params
python
def do_lmfit(data, params, B=None, errs=None, dojac=True): """ Fit the model to the data data may contain 'flagged' or 'masked' data with the value of np.NaN Parameters ---------- data : 2d-array Image data params : lmfit.Parameters Initial model guess. B : 2d-array B matrix to be used in residual calculations. Default = None. errs : 1d-array dojac : bool If true then an analytic jacobian will be passed to the fitting routine. Returns ------- result : ? lmfit.minimize result. params : lmfit.Params Fitted model. See Also -------- :func:`AegeanTools.fitting.lmfit_jacobian` """ # copy the params so as not to change the initial conditions # in case we want to use them elsewhere params = copy.deepcopy(params) data = np.array(data) mask = np.where(np.isfinite(data)) def residual(params, **kwargs): """ The residual function required by lmfit Parameters ---------- params: lmfit.Params The parameters of the model being fit Returns ------- result : numpy.ndarray Model - Data """ f = ntwodgaussian_lmfit(params) # A function describing the model model = f(*mask) # The actual model if B is None: return model - data[mask] else: return (model - data[mask]).dot(B) if dojac: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}, Dfun=lmfit_jacobian) else: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}) # Remake the residual so that it is once again (model - data) if B is not None: result.residual = result.residual.dot(inv(B)) return result, params
[ "def", "do_lmfit", "(", "data", ",", "params", ",", "B", "=", "None", ",", "errs", "=", "None", ",", "dojac", "=", "True", ")", ":", "# copy the params so as not to change the initial conditions", "# in case we want to use them elsewhere", "params", "=", "copy", "."...
Fit the model to the data data may contain 'flagged' or 'masked' data with the value of np.NaN Parameters ---------- data : 2d-array Image data params : lmfit.Parameters Initial model guess. B : 2d-array B matrix to be used in residual calculations. Default = None. errs : 1d-array dojac : bool If true then an analytic jacobian will be passed to the fitting routine. Returns ------- result : ? lmfit.minimize result. params : lmfit.Params Fitted model. See Also -------- :func:`AegeanTools.fitting.lmfit_jacobian`
[ "Fit", "the", "model", "to", "the", "data", "data", "may", "contain", "flagged", "or", "masked", "data", "with", "the", "value", "of", "np", ".", "NaN" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L1140-L1210
train
PaulHancock/Aegean
AegeanTools/fitting.py
covar_errors
def covar_errors(params, data, errs, B, C=None): """ Take a set of parameters that were fit with lmfit, and replace the errors with the 1\sigma errors calculated using the covariance matrix. Parameters ---------- params : lmfit.Parameters Model data : 2d-array Image data errs : 2d-array ? Image noise. B : 2d-array B matrix. C : 2d-array C matrix. Optional. If supplied then Bmatrix will not be used. Returns ------- params : lmfit.Parameters Modified model. """ mask = np.where(np.isfinite(data)) # calculate the proper parameter errors and copy them across. if C is not None: try: J = lmfit_jacobian(params, mask[0], mask[1], errs=errs) covar = np.transpose(J).dot(inv(C)).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: C = None if C is None: try: J = lmfit_jacobian(params, mask[0], mask[1], B=B, errs=errs) covar = np.transpose(J).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: onesigma = [-2] * len(mask[0]) for i in range(params['components'].value): prefix = "c{0}_".format(i) j = 0 for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: if params[prefix + p].vary: params[prefix + p].stderr = onesigma[j] j += 1 return params
python
def covar_errors(params, data, errs, B, C=None): """ Take a set of parameters that were fit with lmfit, and replace the errors with the 1\sigma errors calculated using the covariance matrix. Parameters ---------- params : lmfit.Parameters Model data : 2d-array Image data errs : 2d-array ? Image noise. B : 2d-array B matrix. C : 2d-array C matrix. Optional. If supplied then Bmatrix will not be used. Returns ------- params : lmfit.Parameters Modified model. """ mask = np.where(np.isfinite(data)) # calculate the proper parameter errors and copy them across. if C is not None: try: J = lmfit_jacobian(params, mask[0], mask[1], errs=errs) covar = np.transpose(J).dot(inv(C)).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: C = None if C is None: try: J = lmfit_jacobian(params, mask[0], mask[1], B=B, errs=errs) covar = np.transpose(J).dot(J) onesigma = np.sqrt(np.diag(inv(covar))) except (np.linalg.linalg.LinAlgError, ValueError) as _: onesigma = [-2] * len(mask[0]) for i in range(params['components'].value): prefix = "c{0}_".format(i) j = 0 for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: if params[prefix + p].vary: params[prefix + p].stderr = onesigma[j] j += 1 return params
[ "def", "covar_errors", "(", "params", ",", "data", ",", "errs", ",", "B", ",", "C", "=", "None", ")", ":", "mask", "=", "np", ".", "where", "(", "np", ".", "isfinite", "(", "data", ")", ")", "# calculate the proper parameter errors and copy them across.", ...
Take a set of parameters that were fit with lmfit, and replace the errors with the 1\sigma errors calculated using the covariance matrix. Parameters ---------- params : lmfit.Parameters Model data : 2d-array Image data errs : 2d-array ? Image noise. B : 2d-array B matrix. C : 2d-array C matrix. Optional. If supplied then Bmatrix will not be used. Returns ------- params : lmfit.Parameters Modified model.
[ "Take", "a", "set", "of", "parameters", "that", "were", "fit", "with", "lmfit", "and", "replace", "the", "errors", "with", "the", "1", "\\", "sigma", "errors", "calculated", "using", "the", "covariance", "matrix", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fitting.py#L1213-L1269
train
PaulHancock/Aegean
AegeanTools/BANE.py
barrier
def barrier(events, sid, kind='neighbour'): """ act as a multiprocessing barrier """ events[sid].set() # only wait for the neighbours if kind=='neighbour': if sid > 0: logging.debug("{0} is waiting for {1}".format(sid, sid - 1)) events[sid - 1].wait() if sid < len(bkg_events) - 1: logging.debug("{0} is waiting for {1}".format(sid, sid + 1)) events[sid + 1].wait() # wait for all else: [e.wait() for e in events] return
python
def barrier(events, sid, kind='neighbour'): """ act as a multiprocessing barrier """ events[sid].set() # only wait for the neighbours if kind=='neighbour': if sid > 0: logging.debug("{0} is waiting for {1}".format(sid, sid - 1)) events[sid - 1].wait() if sid < len(bkg_events) - 1: logging.debug("{0} is waiting for {1}".format(sid, sid + 1)) events[sid + 1].wait() # wait for all else: [e.wait() for e in events] return
[ "def", "barrier", "(", "events", ",", "sid", ",", "kind", "=", "'neighbour'", ")", ":", "events", "[", "sid", "]", ".", "set", "(", ")", "# only wait for the neighbours", "if", "kind", "==", "'neighbour'", ":", "if", "sid", ">", "0", ":", "logging", "....
act as a multiprocessing barrier
[ "act", "as", "a", "multiprocessing", "barrier" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L34-L50
train
PaulHancock/Aegean
AegeanTools/BANE.py
sigmaclip
def sigmaclip(arr, lo, hi, reps=3): """ Perform sigma clipping on an array, ignoring non finite values. During each iteration return an array whose elements c obey: mean -std*lo < c < mean + std*hi where mean/std are the mean std of the input array. Parameters ---------- arr : iterable An iterable array of numeric types. lo : float The negative clipping level. hi : float The positive clipping level. reps : int The number of iterations to perform. Default = 3. Returns ------- mean : float The mean of the array, possibly nan std : float The std of the array, possibly nan Notes ----- Scipy v0.16 now contains a comparable method that will ignore nan/inf values. """ clipped = np.array(arr)[np.isfinite(arr)] if len(clipped) < 1: return np.nan, np.nan std = np.std(clipped) mean = np.mean(clipped) for _ in range(int(reps)): clipped = clipped[np.where(clipped > mean-std*lo)] clipped = clipped[np.where(clipped < mean+std*hi)] pstd = std if len(clipped) < 1: break std = np.std(clipped) mean = np.mean(clipped) if 2*abs(pstd-std)/(pstd+std) < 0.2: break return mean, std
python
def sigmaclip(arr, lo, hi, reps=3): """ Perform sigma clipping on an array, ignoring non finite values. During each iteration return an array whose elements c obey: mean -std*lo < c < mean + std*hi where mean/std are the mean std of the input array. Parameters ---------- arr : iterable An iterable array of numeric types. lo : float The negative clipping level. hi : float The positive clipping level. reps : int The number of iterations to perform. Default = 3. Returns ------- mean : float The mean of the array, possibly nan std : float The std of the array, possibly nan Notes ----- Scipy v0.16 now contains a comparable method that will ignore nan/inf values. """ clipped = np.array(arr)[np.isfinite(arr)] if len(clipped) < 1: return np.nan, np.nan std = np.std(clipped) mean = np.mean(clipped) for _ in range(int(reps)): clipped = clipped[np.where(clipped > mean-std*lo)] clipped = clipped[np.where(clipped < mean+std*hi)] pstd = std if len(clipped) < 1: break std = np.std(clipped) mean = np.mean(clipped) if 2*abs(pstd-std)/(pstd+std) < 0.2: break return mean, std
[ "def", "sigmaclip", "(", "arr", ",", "lo", ",", "hi", ",", "reps", "=", "3", ")", ":", "clipped", "=", "np", ".", "array", "(", "arr", ")", "[", "np", ".", "isfinite", "(", "arr", ")", "]", "if", "len", "(", "clipped", ")", "<", "1", ":", "...
Perform sigma clipping on an array, ignoring non finite values. During each iteration return an array whose elements c obey: mean -std*lo < c < mean + std*hi where mean/std are the mean std of the input array. Parameters ---------- arr : iterable An iterable array of numeric types. lo : float The negative clipping level. hi : float The positive clipping level. reps : int The number of iterations to perform. Default = 3. Returns ------- mean : float The mean of the array, possibly nan std : float The std of the array, possibly nan Notes ----- Scipy v0.16 now contains a comparable method that will ignore nan/inf values.
[ "Perform", "sigma", "clipping", "on", "an", "array", "ignoring", "non", "finite", "values", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L53-L102
train
PaulHancock/Aegean
AegeanTools/BANE.py
_sf2
def _sf2(args): """ A shallow wrapper for sigma_filter. Parameters ---------- args : list A list of arguments for sigma_filter Returns ------- None """ # an easier to debug traceback when multiprocessing # thanks to https://stackoverflow.com/a/16618842/1710603 try: return sigma_filter(*args) except: import traceback raise Exception("".join(traceback.format_exception(*sys.exc_info())))
python
def _sf2(args): """ A shallow wrapper for sigma_filter. Parameters ---------- args : list A list of arguments for sigma_filter Returns ------- None """ # an easier to debug traceback when multiprocessing # thanks to https://stackoverflow.com/a/16618842/1710603 try: return sigma_filter(*args) except: import traceback raise Exception("".join(traceback.format_exception(*sys.exc_info())))
[ "def", "_sf2", "(", "args", ")", ":", "# an easier to debug traceback when multiprocessing", "# thanks to https://stackoverflow.com/a/16618842/1710603", "try", ":", "return", "sigma_filter", "(", "*", "args", ")", "except", ":", "import", "traceback", "raise", "Exception", ...
A shallow wrapper for sigma_filter. Parameters ---------- args : list A list of arguments for sigma_filter Returns ------- None
[ "A", "shallow", "wrapper", "for", "sigma_filter", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L105-L124
train
PaulHancock/Aegean
AegeanTools/BANE.py
sigma_filter
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid): """ Calculate the background and rms for a sub region of an image. The results are written to shared memory - irms and ibkg. Parameters ---------- filename : string Fits file to open region : list Region within the fits file that is to be processed. (row_min, row_max). step_size : (int, int) The filtering step size box_size : (int, int) The size of the box over which the filter is applied (each step). shape : tuple The shape of the fits image domask : bool If true then copy the data mask to the output. sid : int The stripe number Returns ------- None """ ymin, ymax = region logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) # cut out the region of interest plus 1/2 the box size, but clip to the image size data_row_min = max(0, ymin - box_size[0]//2) data_row_max = min(shape[0], ymax + box_size[0]//2) # Figure out how many axes are in the datafile NAXIS = fits.getheader(filename)["NAXIS"] with fits.open(filename, memmap=True) as a: if NAXIS == 2: data = a[0].section[data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 3: data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 4: data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]] else: logging.error("Too many NAXIS for me {0}".format(NAXIS)) logging.error("fix your file to be more sane") raise Exception("Too many NAXIS") row_len = shape[1] logging.debug('data size is {0}'.format(data.shape)) def box(r, c): """ calculate the boundaries of the box centered at r,c with size = box_size """ r_min = max(0, r - box_size[0] // 2) r_max = min(data.shape[0] - 1, r + box_size[0] // 2) c_min = max(0, c - box_size[1] // 2) c_max = min(data.shape[1] - 1, c + box_size[1] // 2) return r_min, r_max, c_min, c_max # set up a grid of rows/cols at which we will compute the bkg/rms rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0])) rows.append(ymax-data_row_min) cols = list(range(0, shape[1], step_size[1])) cols.append(shape[1]) # store the computed bkg/rms in this smaller array vals = np.zeros(shape=(len(rows),len(cols))) for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) bkg, _ = sigmaclip(new, 3, 3) vals[i,j] = bkg # indices of all the pixels within our region gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]] logging.debug("Interpolating bkg to sharemem") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" ... done writing bkg") # signal that the bkg is done for this region, and wait for neighbours barrier(bkg_events, sid) logging.debug("{0} background subtraction".format(sid)) for i in range(data_row_max - data_row_min): start_idx = np.ravel_multi_index((data_row_min + i, 0), shape) end_idx = start_idx + row_len data[i, :] = data[i, :] - ibkg[start_idx:end_idx] # reset/recycle the vals array vals[:] = 0 for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) _ , rms = sigmaclip(new, 3, 3) vals[i,j] = rms logging.debug("Interpolating rm to sharemem rms") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" .. done writing rms") if domask: barrier(mask_events, sid) logging.debug("applying mask") for i in range(gr.shape[0]): mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0] for j in mask: idx = np.ravel_multi_index((i + ymin,j),shape) ibkg[idx] = np.nan irms[idx] = np.nan logging.debug(" ... done applying mask") logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) return
python
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid): """ Calculate the background and rms for a sub region of an image. The results are written to shared memory - irms and ibkg. Parameters ---------- filename : string Fits file to open region : list Region within the fits file that is to be processed. (row_min, row_max). step_size : (int, int) The filtering step size box_size : (int, int) The size of the box over which the filter is applied (each step). shape : tuple The shape of the fits image domask : bool If true then copy the data mask to the output. sid : int The stripe number Returns ------- None """ ymin, ymax = region logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) # cut out the region of interest plus 1/2 the box size, but clip to the image size data_row_min = max(0, ymin - box_size[0]//2) data_row_max = min(shape[0], ymax + box_size[0]//2) # Figure out how many axes are in the datafile NAXIS = fits.getheader(filename)["NAXIS"] with fits.open(filename, memmap=True) as a: if NAXIS == 2: data = a[0].section[data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 3: data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]] elif NAXIS == 4: data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]] else: logging.error("Too many NAXIS for me {0}".format(NAXIS)) logging.error("fix your file to be more sane") raise Exception("Too many NAXIS") row_len = shape[1] logging.debug('data size is {0}'.format(data.shape)) def box(r, c): """ calculate the boundaries of the box centered at r,c with size = box_size """ r_min = max(0, r - box_size[0] // 2) r_max = min(data.shape[0] - 1, r + box_size[0] // 2) c_min = max(0, c - box_size[1] // 2) c_max = min(data.shape[1] - 1, c + box_size[1] // 2) return r_min, r_max, c_min, c_max # set up a grid of rows/cols at which we will compute the bkg/rms rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0])) rows.append(ymax-data_row_min) cols = list(range(0, shape[1], step_size[1])) cols.append(shape[1]) # store the computed bkg/rms in this smaller array vals = np.zeros(shape=(len(rows),len(cols))) for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) bkg, _ = sigmaclip(new, 3, 3) vals[i,j] = bkg # indices of all the pixels within our region gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]] logging.debug("Interpolating bkg to sharemem") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" ... done writing bkg") # signal that the bkg is done for this region, and wait for neighbours barrier(bkg_events, sid) logging.debug("{0} background subtraction".format(sid)) for i in range(data_row_max - data_row_min): start_idx = np.ravel_multi_index((data_row_min + i, 0), shape) end_idx = start_idx + row_len data[i, :] = data[i, :] - ibkg[start_idx:end_idx] # reset/recycle the vals array vals[:] = 0 for i, row in enumerate(rows): for j, col in enumerate(cols): r_min, r_max, c_min, c_max = box(row, col) new = data[r_min:r_max, c_min:c_max] new = np.ravel(new) _ , rms = sigmaclip(new, 3, 3) vals[i,j] = rms logging.debug("Interpolating rm to sharemem rms") ifunc = RegularGridInterpolator((rows, cols), vals) for i in range(gr.shape[0]): row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32) start_idx = np.ravel_multi_index((ymin+i, 0), shape) end_idx = start_idx + row_len irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row) del ifunc logging.debug(" .. done writing rms") if domask: barrier(mask_events, sid) logging.debug("applying mask") for i in range(gr.shape[0]): mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0] for j in mask: idx = np.ravel_multi_index((i + ymin,j),shape) ibkg[idx] = np.nan irms[idx] = np.nan logging.debug(" ... done applying mask") logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime()))) return
[ "def", "sigma_filter", "(", "filename", ",", "region", ",", "step_size", ",", "box_size", ",", "shape", ",", "domask", ",", "sid", ")", ":", "ymin", ",", "ymax", "=", "region", "logging", ".", "debug", "(", "'rows {0}-{1} starting at {2}'", ".", "format", ...
Calculate the background and rms for a sub region of an image. The results are written to shared memory - irms and ibkg. Parameters ---------- filename : string Fits file to open region : list Region within the fits file that is to be processed. (row_min, row_max). step_size : (int, int) The filtering step size box_size : (int, int) The size of the box over which the filter is applied (each step). shape : tuple The shape of the fits image domask : bool If true then copy the data mask to the output. sid : int The stripe number Returns ------- None
[ "Calculate", "the", "background", "and", "rms", "for", "a", "sub", "region", "of", "an", "image", ".", "The", "results", "are", "written", "to", "shared", "memory", "-", "irms", "and", "ibkg", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L127-L267
train
PaulHancock/Aegean
AegeanTools/BANE.py
filter_mc_sharemem
def filter_mc_sharemem(filename, step_size, box_size, cores, shape, nslice=None, domask=True): """ Calculate the background and noise images corresponding to the input file. The calculation is done via a box-car approach and uses multiple cores and shared memory. Parameters ---------- filename : str Filename to be filtered. step_size : (int, int) Step size for the filter. box_size : (int, int) Box size for the filter. cores : int Number of cores to use. If None then use all available. nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores shape : (int, int) The shape of the image in the given file. domask : bool True(Default) = copy data mask to output. Returns ------- bkg, rms : numpy.ndarray The interpolated background and noise images. """ if cores is None: cores = multiprocessing.cpu_count() if (nslice is None) or (cores==1): nslice = cores img_y, img_x = shape # initialise some shared memory global ibkg # bkg = np.ctypeslib.as_ctypes(np.empty(shape, dtype=np.float32)) # ibkg = multiprocessing.sharedctypes.Array(bkg._type_, bkg, lock=True) ibkg = multiprocessing.Array('f', img_y*img_x) global irms #rms = np.ctypeslib.as_ctypes(np.empty(shape, dtype=np.float32)) #irms = multiprocessing.sharedctypes.Array(rms._type_, rms, lock=True) irms = multiprocessing.Array('f', img_y * img_x) logging.info("using {0} cores".format(cores)) logging.info("using {0} stripes".format(nslice)) if nslice > 1: # box widths should be multiples of the step_size, and not zero width_y = int(max(img_y/nslice/step_size[1], 1) * step_size[1]) # locations of the box edges ymins = list(range(0, img_y, width_y)) ymaxs = list(range(width_y, img_y, width_y)) ymaxs.append(img_y) else: ymins = [0] ymaxs = [img_y] logging.debug("ymins {0}".format(ymins)) logging.debug("ymaxs {0}".format(ymaxs)) # create an event per stripe global bkg_events, mask_events bkg_events = [multiprocessing.Event() for _ in range(len(ymaxs))] mask_events = [multiprocessing.Event() for _ in range(len(ymaxs))] args = [] for i, region in enumerate(zip(ymins, ymaxs)): args.append((filename, region, step_size, box_size, shape, domask, i)) # start a new process for each task, hopefully to reduce residual memory use pool = multiprocessing.Pool(processes=cores, maxtasksperchild=1) try: # chunksize=1 ensures that we only send a single task to each process pool.map_async(_sf2, args, chunksize=1).get(timeout=10000000) except KeyboardInterrupt: logging.error("Caught keyboard interrupt") pool.close() sys.exit(1) pool.close() pool.join() bkg = np.reshape(np.array(ibkg[:], dtype=np.float32), shape) rms = np.reshape(np.array(irms[:], dtype=np.float32), shape) del ibkg, irms return bkg, rms
python
def filter_mc_sharemem(filename, step_size, box_size, cores, shape, nslice=None, domask=True): """ Calculate the background and noise images corresponding to the input file. The calculation is done via a box-car approach and uses multiple cores and shared memory. Parameters ---------- filename : str Filename to be filtered. step_size : (int, int) Step size for the filter. box_size : (int, int) Box size for the filter. cores : int Number of cores to use. If None then use all available. nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores shape : (int, int) The shape of the image in the given file. domask : bool True(Default) = copy data mask to output. Returns ------- bkg, rms : numpy.ndarray The interpolated background and noise images. """ if cores is None: cores = multiprocessing.cpu_count() if (nslice is None) or (cores==1): nslice = cores img_y, img_x = shape # initialise some shared memory global ibkg # bkg = np.ctypeslib.as_ctypes(np.empty(shape, dtype=np.float32)) # ibkg = multiprocessing.sharedctypes.Array(bkg._type_, bkg, lock=True) ibkg = multiprocessing.Array('f', img_y*img_x) global irms #rms = np.ctypeslib.as_ctypes(np.empty(shape, dtype=np.float32)) #irms = multiprocessing.sharedctypes.Array(rms._type_, rms, lock=True) irms = multiprocessing.Array('f', img_y * img_x) logging.info("using {0} cores".format(cores)) logging.info("using {0} stripes".format(nslice)) if nslice > 1: # box widths should be multiples of the step_size, and not zero width_y = int(max(img_y/nslice/step_size[1], 1) * step_size[1]) # locations of the box edges ymins = list(range(0, img_y, width_y)) ymaxs = list(range(width_y, img_y, width_y)) ymaxs.append(img_y) else: ymins = [0] ymaxs = [img_y] logging.debug("ymins {0}".format(ymins)) logging.debug("ymaxs {0}".format(ymaxs)) # create an event per stripe global bkg_events, mask_events bkg_events = [multiprocessing.Event() for _ in range(len(ymaxs))] mask_events = [multiprocessing.Event() for _ in range(len(ymaxs))] args = [] for i, region in enumerate(zip(ymins, ymaxs)): args.append((filename, region, step_size, box_size, shape, domask, i)) # start a new process for each task, hopefully to reduce residual memory use pool = multiprocessing.Pool(processes=cores, maxtasksperchild=1) try: # chunksize=1 ensures that we only send a single task to each process pool.map_async(_sf2, args, chunksize=1).get(timeout=10000000) except KeyboardInterrupt: logging.error("Caught keyboard interrupt") pool.close() sys.exit(1) pool.close() pool.join() bkg = np.reshape(np.array(ibkg[:], dtype=np.float32), shape) rms = np.reshape(np.array(irms[:], dtype=np.float32), shape) del ibkg, irms return bkg, rms
[ "def", "filter_mc_sharemem", "(", "filename", ",", "step_size", ",", "box_size", ",", "cores", ",", "shape", ",", "nslice", "=", "None", ",", "domask", "=", "True", ")", ":", "if", "cores", "is", "None", ":", "cores", "=", "multiprocessing", ".", "cpu_co...
Calculate the background and noise images corresponding to the input file. The calculation is done via a box-car approach and uses multiple cores and shared memory. Parameters ---------- filename : str Filename to be filtered. step_size : (int, int) Step size for the filter. box_size : (int, int) Box size for the filter. cores : int Number of cores to use. If None then use all available. nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores shape : (int, int) The shape of the image in the given file. domask : bool True(Default) = copy data mask to output. Returns ------- bkg, rms : numpy.ndarray The interpolated background and noise images.
[ "Calculate", "the", "background", "and", "noise", "images", "corresponding", "to", "the", "input", "file", ".", "The", "calculation", "is", "done", "via", "a", "box", "-", "car", "approach", "and", "uses", "multiple", "cores", "and", "shared", "memory", "." ...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L270-L363
train
PaulHancock/Aegean
AegeanTools/BANE.py
filter_image
def filter_image(im_name, out_base, step_size=None, box_size=None, twopass=False, cores=None, mask=True, compressed=False, nslice=None): """ Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None """ header = fits.getheader(im_name) shape = (header['NAXIS2'],header['NAXIS1']) if step_size is None: if 'BMAJ' in header and 'BMIN' in header: beam_size = np.sqrt(abs(header['BMAJ']*header['BMIN'])) if 'CDELT1' in header: pix_scale = np.sqrt(abs(header['CDELT1']*header['CDELT2'])) elif 'CD1_1' in header: pix_scale = np.sqrt(abs(header['CD1_1']*header['CD2_2'])) if 'CD1_2' in header and 'CD2_1' in header: if header['CD1_2'] != 0 or header['CD2_1']!=0: logging.warning("CD1_2 and/or CD2_1 are non-zero and I don't know what to do with them") logging.warning("Ingoring them") else: logging.warning("Cannot determine pixel scale, assuming 4 pixels per beam") pix_scale = beam_size/4. # default to 4x the synthesized beam width step_size = int(np.ceil(4*beam_size/pix_scale)) else: logging.info("BMAJ and/or BMIN not in fits header.") logging.info("Assuming 4 pix/beam, so we have step_size = 16 pixels") step_size = 16 step_size = (step_size, step_size) if box_size is None: # default to 6x the step size so we have ~ 30beams box_size = (step_size[0]*6, step_size[1]*6) if compressed: if not step_size[0] == step_size[1]: step_size = (min(step_size), min(step_size)) logging.info("Changing grid to be {0} so we can compress the output".format(step_size)) logging.info("using grid_size {0}, box_size {1}".format(step_size,box_size)) logging.info("on data shape {0}".format(shape)) bkg, rms = filter_mc_sharemem(im_name, step_size=step_size, box_size=box_size, cores=cores, shape=shape, nslice=nslice, domask=mask) logging.info("done") bkg_out = '_'.join([os.path.expanduser(out_base), 'bkg.fits']) rms_out = '_'.join([os.path.expanduser(out_base), 'rms.fits']) # add a comment to the fits header header['HISTORY'] = 'BANE {0}-({1})'.format(__version__, __date__) # compress if compressed: hdu = fits.PrimaryHDU(bkg) hdu.header = copy.deepcopy(header) hdulist = fits.HDUList([hdu]) compress(hdulist, step_size[0], bkg_out) hdulist[0].header = copy.deepcopy(header) hdulist[0].data = rms compress(hdulist, step_size[0], rms_out) return write_fits(bkg, header, bkg_out) write_fits(rms, header, rms_out)
python
def filter_image(im_name, out_base, step_size=None, box_size=None, twopass=False, cores=None, mask=True, compressed=False, nslice=None): """ Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None """ header = fits.getheader(im_name) shape = (header['NAXIS2'],header['NAXIS1']) if step_size is None: if 'BMAJ' in header and 'BMIN' in header: beam_size = np.sqrt(abs(header['BMAJ']*header['BMIN'])) if 'CDELT1' in header: pix_scale = np.sqrt(abs(header['CDELT1']*header['CDELT2'])) elif 'CD1_1' in header: pix_scale = np.sqrt(abs(header['CD1_1']*header['CD2_2'])) if 'CD1_2' in header and 'CD2_1' in header: if header['CD1_2'] != 0 or header['CD2_1']!=0: logging.warning("CD1_2 and/or CD2_1 are non-zero and I don't know what to do with them") logging.warning("Ingoring them") else: logging.warning("Cannot determine pixel scale, assuming 4 pixels per beam") pix_scale = beam_size/4. # default to 4x the synthesized beam width step_size = int(np.ceil(4*beam_size/pix_scale)) else: logging.info("BMAJ and/or BMIN not in fits header.") logging.info("Assuming 4 pix/beam, so we have step_size = 16 pixels") step_size = 16 step_size = (step_size, step_size) if box_size is None: # default to 6x the step size so we have ~ 30beams box_size = (step_size[0]*6, step_size[1]*6) if compressed: if not step_size[0] == step_size[1]: step_size = (min(step_size), min(step_size)) logging.info("Changing grid to be {0} so we can compress the output".format(step_size)) logging.info("using grid_size {0}, box_size {1}".format(step_size,box_size)) logging.info("on data shape {0}".format(shape)) bkg, rms = filter_mc_sharemem(im_name, step_size=step_size, box_size=box_size, cores=cores, shape=shape, nslice=nslice, domask=mask) logging.info("done") bkg_out = '_'.join([os.path.expanduser(out_base), 'bkg.fits']) rms_out = '_'.join([os.path.expanduser(out_base), 'rms.fits']) # add a comment to the fits header header['HISTORY'] = 'BANE {0}-({1})'.format(__version__, __date__) # compress if compressed: hdu = fits.PrimaryHDU(bkg) hdu.header = copy.deepcopy(header) hdulist = fits.HDUList([hdu]) compress(hdulist, step_size[0], bkg_out) hdulist[0].header = copy.deepcopy(header) hdulist[0].data = rms compress(hdulist, step_size[0], rms_out) return write_fits(bkg, header, bkg_out) write_fits(rms, header, rms_out)
[ "def", "filter_image", "(", "im_name", ",", "out_base", ",", "step_size", "=", "None", ",", "box_size", "=", "None", ",", "twopass", "=", "False", ",", "cores", "=", "None", ",", "mask", "=", "True", ",", "compressed", "=", "False", ",", "nslice", "=",...
Create a background and noise image from an input image. Resulting images are written to `outbase_bkg.fits` and `outbase_rms.fits` Parameters ---------- im_name : str or HDUList Image to filter. Either a string filename or an astropy.io.fits.HDUList. out_base : str The output filename base. Will be modified to make _bkg and _rms files. step_size : (int,int) Tuple of the x,y step size in pixels box_size : (int,int) The size of the box in piexls twopass : bool Perform a second pass calculation to ensure that the noise is not contaminated by the background. Default = False cores : int Number of CPU corse to use. Default = all available nslice : int The image will be divided into this many horizontal stripes for processing. Default = None = equal to cores mask : bool Mask the output array to contain np.nna wherever the input array is nan or not finite. Default = true compressed : bool Return a compressed version of the background/noise images. Default = False Returns ------- None
[ "Create", "a", "background", "and", "noise", "image", "from", "an", "input", "image", ".", "Resulting", "images", "are", "written", "to", "outbase_bkg", ".", "fits", "and", "outbase_rms", ".", "fits" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L366-L461
train
PaulHancock/Aegean
AegeanTools/BANE.py
write_fits
def write_fits(data, header, file_name): """ Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None """ hdu = fits.PrimaryHDU(data) hdu.header = header hdulist = fits.HDUList([hdu]) hdulist.writeto(file_name, overwrite=True) logging.info("Wrote {0}".format(file_name)) return
python
def write_fits(data, header, file_name): """ Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None """ hdu = fits.PrimaryHDU(data) hdu.header = header hdulist = fits.HDUList([hdu]) hdulist.writeto(file_name, overwrite=True) logging.info("Wrote {0}".format(file_name)) return
[ "def", "write_fits", "(", "data", ",", "header", ",", "file_name", ")", ":", "hdu", "=", "fits", ".", "PrimaryHDU", "(", "data", ")", "hdu", ".", "header", "=", "header", "hdulist", "=", "fits", ".", "HDUList", "(", "[", "hdu", "]", ")", "hdulist", ...
Combine data and a fits header to write a fits file. Parameters ---------- data : numpy.ndarray The data to be written. header : astropy.io.fits.hduheader The header for the fits file. file_name : string The file to write Returns ------- None
[ "Combine", "data", "and", "a", "fits", "header", "to", "write", "a", "fits", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/BANE.py#L467-L491
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
dec2dec
def dec2dec(dec): """ Convert sexegessimal RA string into a float in degrees. Parameters ---------- dec : string A string separated representing the Dec. Expected format is `[+- ]hh:mm[:ss.s]` Colons can be replaced with any whit space character. Returns ------- dec : float The Dec in degrees. """ d = dec.replace(':', ' ').split() if len(d) == 2: d.append(0.0) if d[0].startswith('-') or float(d[0]) < 0: return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0 return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0
python
def dec2dec(dec): """ Convert sexegessimal RA string into a float in degrees. Parameters ---------- dec : string A string separated representing the Dec. Expected format is `[+- ]hh:mm[:ss.s]` Colons can be replaced with any whit space character. Returns ------- dec : float The Dec in degrees. """ d = dec.replace(':', ' ').split() if len(d) == 2: d.append(0.0) if d[0].startswith('-') or float(d[0]) < 0: return float(d[0]) - float(d[1]) / 60.0 - float(d[2]) / 3600.0 return float(d[0]) + float(d[1]) / 60.0 + float(d[2]) / 3600.0
[ "def", "dec2dec", "(", "dec", ")", ":", "d", "=", "dec", ".", "replace", "(", "':'", ",", "' '", ")", ".", "split", "(", ")", "if", "len", "(", "d", ")", "==", "2", ":", "d", ".", "append", "(", "0.0", ")", "if", "d", "[", "0", "]", ".", ...
Convert sexegessimal RA string into a float in degrees. Parameters ---------- dec : string A string separated representing the Dec. Expected format is `[+- ]hh:mm[:ss.s]` Colons can be replaced with any whit space character. Returns ------- dec : float The Dec in degrees.
[ "Convert", "sexegessimal", "RA", "string", "into", "a", "float", "in", "degrees", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L38-L59
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
dec2dms
def dec2dms(x): """ Convert decimal degrees into a sexagessimal string in degrees. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format [+-]DD:MM:SS.SS or XX:XX:XX.XX if x is not finite. """ if not np.isfinite(x): return 'XX:XX:XX.XX' if x < 0: sign = '-' else: sign = '+' x = abs(x) d = int(math.floor(x)) m = int(math.floor((x - d) * 60)) s = float(( (x - d) * 60 - m) * 60) return '{0}{1:02d}:{2:02d}:{3:05.2f}'.format(sign, d, m, s)
python
def dec2dms(x): """ Convert decimal degrees into a sexagessimal string in degrees. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format [+-]DD:MM:SS.SS or XX:XX:XX.XX if x is not finite. """ if not np.isfinite(x): return 'XX:XX:XX.XX' if x < 0: sign = '-' else: sign = '+' x = abs(x) d = int(math.floor(x)) m = int(math.floor((x - d) * 60)) s = float(( (x - d) * 60 - m) * 60) return '{0}{1:02d}:{2:02d}:{3:05.2f}'.format(sign, d, m, s)
[ "def", "dec2dms", "(", "x", ")", ":", "if", "not", "np", ".", "isfinite", "(", "x", ")", ":", "return", "'XX:XX:XX.XX'", "if", "x", "<", "0", ":", "sign", "=", "'-'", "else", ":", "sign", "=", "'+'", "x", "=", "abs", "(", "x", ")", "d", "=", ...
Convert decimal degrees into a sexagessimal string in degrees. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format [+-]DD:MM:SS.SS or XX:XX:XX.XX if x is not finite.
[ "Convert", "decimal", "degrees", "into", "a", "sexagessimal", "string", "in", "degrees", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L62-L87
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
dec2hms
def dec2hms(x): """ Convert decimal degrees into a sexagessimal string in hours. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format HH:MM:SS.SS or XX:XX:XX.XX if x is not finite. """ if not np.isfinite(x): return 'XX:XX:XX.XX' # wrap negative RA's if x < 0: x += 360 x /= 15.0 h = int(x) x = (x - h) * 60 m = int(x) s = (x - m) * 60 return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s)
python
def dec2hms(x): """ Convert decimal degrees into a sexagessimal string in hours. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format HH:MM:SS.SS or XX:XX:XX.XX if x is not finite. """ if not np.isfinite(x): return 'XX:XX:XX.XX' # wrap negative RA's if x < 0: x += 360 x /= 15.0 h = int(x) x = (x - h) * 60 m = int(x) s = (x - m) * 60 return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s)
[ "def", "dec2hms", "(", "x", ")", ":", "if", "not", "np", ".", "isfinite", "(", "x", ")", ":", "return", "'XX:XX:XX.XX'", "# wrap negative RA's", "if", "x", "<", "0", ":", "x", "+=", "360", "x", "/=", "15.0", "h", "=", "int", "(", "x", ")", "x", ...
Convert decimal degrees into a sexagessimal string in hours. Parameters ---------- x : float Angle in degrees Returns ------- dms : string String of format HH:MM:SS.SS or XX:XX:XX.XX if x is not finite.
[ "Convert", "decimal", "degrees", "into", "a", "sexagessimal", "string", "in", "hours", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L90-L115
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
gcd
def gcd(ra1, dec1, ra2, dec2): """ Calculate the great circle distance between to points using the haversine formula [1]_. Parameters ---------- ra1, dec1, ra2, dec2 : float The coordinates of the two points of interest. Units are in degrees. Returns ------- dist : float The distance between the two points in degrees. Notes ----- This duplicates the functionality of astropy but is faster as there is no creation of SkyCoords objects. .. [1] `Haversine formula <https://en.wikipedia.org/wiki/Haversine_formula>`_ """ # TODO: Vincenty formula see - https://en.wikipedia.org/wiki/Great-circle_distance dlon = ra2 - ra1 dlat = dec2 - dec1 a = np.sin(np.radians(dlat) / 2) ** 2 a += np.cos(np.radians(dec1)) * np.cos(np.radians(dec2)) * np.sin(np.radians(dlon) / 2) ** 2 sep = np.degrees(2 * np.arcsin(np.minimum(1, np.sqrt(a)))) return sep
python
def gcd(ra1, dec1, ra2, dec2): """ Calculate the great circle distance between to points using the haversine formula [1]_. Parameters ---------- ra1, dec1, ra2, dec2 : float The coordinates of the two points of interest. Units are in degrees. Returns ------- dist : float The distance between the two points in degrees. Notes ----- This duplicates the functionality of astropy but is faster as there is no creation of SkyCoords objects. .. [1] `Haversine formula <https://en.wikipedia.org/wiki/Haversine_formula>`_ """ # TODO: Vincenty formula see - https://en.wikipedia.org/wiki/Great-circle_distance dlon = ra2 - ra1 dlat = dec2 - dec1 a = np.sin(np.radians(dlat) / 2) ** 2 a += np.cos(np.radians(dec1)) * np.cos(np.radians(dec2)) * np.sin(np.radians(dlon) / 2) ** 2 sep = np.degrees(2 * np.arcsin(np.minimum(1, np.sqrt(a)))) return sep
[ "def", "gcd", "(", "ra1", ",", "dec1", ",", "ra2", ",", "dec2", ")", ":", "# TODO: Vincenty formula see - https://en.wikipedia.org/wiki/Great-circle_distance", "dlon", "=", "ra2", "-", "ra1", "dlat", "=", "dec2", "-", "dec1", "a", "=", "np", ".", "sin", "(", ...
Calculate the great circle distance between to points using the haversine formula [1]_. Parameters ---------- ra1, dec1, ra2, dec2 : float The coordinates of the two points of interest. Units are in degrees. Returns ------- dist : float The distance between the two points in degrees. Notes ----- This duplicates the functionality of astropy but is faster as there is no creation of SkyCoords objects. .. [1] `Haversine formula <https://en.wikipedia.org/wiki/Haversine_formula>`_
[ "Calculate", "the", "great", "circle", "distance", "between", "to", "points", "using", "the", "haversine", "formula", "[", "1", "]", "_", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L121-L149
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
bear
def bear(ra1, dec1, ra2, dec2): """ Calculate the bearing of point 2 from point 1 along a great circle. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- bear : float The bearing of point 2 from point 1 (degrees). """ rdec1 = np.radians(dec1) rdec2 = np.radians(dec2) rdlon = np.radians(ra2-ra1) y = np.sin(rdlon) * np.cos(rdec2) x = np.cos(rdec1) * np.sin(rdec2) x -= np.sin(rdec1) * np.cos(rdec2) * np.cos(rdlon) return np.degrees(np.arctan2(y, x))
python
def bear(ra1, dec1, ra2, dec2): """ Calculate the bearing of point 2 from point 1 along a great circle. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- bear : float The bearing of point 2 from point 1 (degrees). """ rdec1 = np.radians(dec1) rdec2 = np.radians(dec2) rdlon = np.radians(ra2-ra1) y = np.sin(rdlon) * np.cos(rdec2) x = np.cos(rdec1) * np.sin(rdec2) x -= np.sin(rdec1) * np.cos(rdec2) * np.cos(rdlon) return np.degrees(np.arctan2(y, x))
[ "def", "bear", "(", "ra1", ",", "dec1", ",", "ra2", ",", "dec2", ")", ":", "rdec1", "=", "np", ".", "radians", "(", "dec1", ")", "rdec2", "=", "np", ".", "radians", "(", "dec2", ")", "rdlon", "=", "np", ".", "radians", "(", "ra2", "-", "ra1", ...
Calculate the bearing of point 2 from point 1 along a great circle. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- bear : float The bearing of point 2 from point 1 (degrees).
[ "Calculate", "the", "bearing", "of", "point", "2", "from", "point", "1", "along", "a", "great", "circle", ".", "The", "bearing", "is", "East", "of", "North", "and", "is", "in", "[", "0", "360", ")", "whereas", "position", "angle", "is", "also", "East",...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L152-L173
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
translate
def translate(ra, dec, r, theta): """ Translate a given point a distance r in the (initial) direction theta, along a great circle. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees). """ factor = np.sin(np.radians(dec)) * np.cos(np.radians(r)) factor += np.cos(np.radians(dec)) * np.sin(np.radians(r)) * np.cos(np.radians(theta)) dec_out = np.degrees(np.arcsin(factor)) y = np.sin(np.radians(theta)) * np.sin(np.radians(r)) * np.cos(np.radians(dec)) x = np.cos(np.radians(r)) - np.sin(np.radians(dec)) * np.sin(np.radians(dec_out)) ra_out = ra + np.degrees(np.arctan2(y, x)) return ra_out, dec_out
python
def translate(ra, dec, r, theta): """ Translate a given point a distance r in the (initial) direction theta, along a great circle. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees). """ factor = np.sin(np.radians(dec)) * np.cos(np.radians(r)) factor += np.cos(np.radians(dec)) * np.sin(np.radians(r)) * np.cos(np.radians(theta)) dec_out = np.degrees(np.arcsin(factor)) y = np.sin(np.radians(theta)) * np.sin(np.radians(r)) * np.cos(np.radians(dec)) x = np.cos(np.radians(r)) - np.sin(np.radians(dec)) * np.sin(np.radians(dec_out)) ra_out = ra + np.degrees(np.arctan2(y, x)) return ra_out, dec_out
[ "def", "translate", "(", "ra", ",", "dec", ",", "r", ",", "theta", ")", ":", "factor", "=", "np", ".", "sin", "(", "np", ".", "radians", "(", "dec", ")", ")", "*", "np", ".", "cos", "(", "np", ".", "radians", "(", "r", ")", ")", "factor", "...
Translate a given point a distance r in the (initial) direction theta, along a great circle. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees).
[ "Translate", "a", "given", "point", "a", "distance", "r", "in", "the", "(", "initial", ")", "direction", "theta", "along", "a", "great", "circle", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L176-L200
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
dist_rhumb
def dist_rhumb(ra1, dec1, ra2, dec2): """ Calculate the Rhumb line distance between two points [1]_. A Rhumb line between two points is one which follows a constant bearing. Parameters ---------- ra1, dec1, ra2, dec2 : float The position of the two points (degrees). Returns ------- dist : float The distance between the two points along a line of constant bearing. Notes ----- .. [1] `Rhumb line <https://en.wikipedia.org/wiki/Rhumb_line>`_ """ # verified against website to give correct results phi1 = np.radians(dec1) phi2 = np.radians(dec2) dphi = phi2 - phi1 lambda1 = np.radians(ra1) lambda2 = np.radians(ra2) dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) if dpsi < 1e-12: q = np.cos(phi1) else: q = dpsi / dphi dlambda = lambda2 - lambda1 if dlambda > np.pi: dlambda -= 2 * np.pi dist = np.hypot(dphi, q * dlambda) return np.degrees(dist)
python
def dist_rhumb(ra1, dec1, ra2, dec2): """ Calculate the Rhumb line distance between two points [1]_. A Rhumb line between two points is one which follows a constant bearing. Parameters ---------- ra1, dec1, ra2, dec2 : float The position of the two points (degrees). Returns ------- dist : float The distance between the two points along a line of constant bearing. Notes ----- .. [1] `Rhumb line <https://en.wikipedia.org/wiki/Rhumb_line>`_ """ # verified against website to give correct results phi1 = np.radians(dec1) phi2 = np.radians(dec2) dphi = phi2 - phi1 lambda1 = np.radians(ra1) lambda2 = np.radians(ra2) dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) if dpsi < 1e-12: q = np.cos(phi1) else: q = dpsi / dphi dlambda = lambda2 - lambda1 if dlambda > np.pi: dlambda -= 2 * np.pi dist = np.hypot(dphi, q * dlambda) return np.degrees(dist)
[ "def", "dist_rhumb", "(", "ra1", ",", "dec1", ",", "ra2", ",", "dec2", ")", ":", "# verified against website to give correct results", "phi1", "=", "np", ".", "radians", "(", "dec1", ")", "phi2", "=", "np", ".", "radians", "(", "dec2", ")", "dphi", "=", ...
Calculate the Rhumb line distance between two points [1]_. A Rhumb line between two points is one which follows a constant bearing. Parameters ---------- ra1, dec1, ra2, dec2 : float The position of the two points (degrees). Returns ------- dist : float The distance between the two points along a line of constant bearing. Notes ----- .. [1] `Rhumb line <https://en.wikipedia.org/wiki/Rhumb_line>`_
[ "Calculate", "the", "Rhumb", "line", "distance", "between", "two", "points", "[", "1", "]", "_", ".", "A", "Rhumb", "line", "between", "two", "points", "is", "one", "which", "follows", "a", "constant", "bearing", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L203-L237
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
bear_rhumb
def bear_rhumb(ra1, dec1, ra2, dec2): """ Calculate the bearing of point 2 from point 1 along a Rhumb line. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- dist : float The bearing of point 2 from point 1 along a Rhumb line (degrees). """ # verified against website to give correct results phi1 = np.radians(dec1) phi2 = np.radians(dec2) lambda1 = np.radians(ra1) lambda2 = np.radians(ra2) dlambda = lambda2 - lambda1 dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) theta = np.arctan2(dlambda, dpsi) return np.degrees(theta)
python
def bear_rhumb(ra1, dec1, ra2, dec2): """ Calculate the bearing of point 2 from point 1 along a Rhumb line. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- dist : float The bearing of point 2 from point 1 along a Rhumb line (degrees). """ # verified against website to give correct results phi1 = np.radians(dec1) phi2 = np.radians(dec2) lambda1 = np.radians(ra1) lambda2 = np.radians(ra2) dlambda = lambda2 - lambda1 dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) theta = np.arctan2(dlambda, dpsi) return np.degrees(theta)
[ "def", "bear_rhumb", "(", "ra1", ",", "dec1", ",", "ra2", ",", "dec2", ")", ":", "# verified against website to give correct results", "phi1", "=", "np", ".", "radians", "(", "dec1", ")", "phi2", "=", "np", ".", "radians", "(", "dec2", ")", "lambda1", "=",...
Calculate the bearing of point 2 from point 1 along a Rhumb line. The bearing is East of North and is in [0, 360), whereas position angle is also East of North but (-180,180] Parameters ---------- ra1, dec1, ra2, dec2 : float The sky coordinates (degrees) of the two points. Returns ------- dist : float The bearing of point 2 from point 1 along a Rhumb line (degrees).
[ "Calculate", "the", "bearing", "of", "point", "2", "from", "point", "1", "along", "a", "Rhumb", "line", ".", "The", "bearing", "is", "East", "of", "North", "and", "is", "in", "[", "0", "360", ")", "whereas", "position", "angle", "is", "also", "East", ...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L240-L265
train
PaulHancock/Aegean
AegeanTools/angle_tools.py
translate_rhumb
def translate_rhumb(ra, dec, r, theta): """ Translate a given point a distance r in the (initial) direction theta, along a Rhumb line. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees). """ # verified against website to give correct results # with the help of http://williams.best.vwh.net/avform.htm#Rhumb delta = np.radians(r) phi1 = np.radians(dec) phi2 = phi1 + delta * np.cos(np.radians(theta)) dphi = phi2 - phi1 if abs(dphi) < 1e-9: q = np.cos(phi1) else: dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) q = dphi / dpsi lambda1 = np.radians(ra) dlambda = delta * np.sin(np.radians(theta)) / q lambda2 = lambda1 + dlambda ra_out = np.degrees(lambda2) dec_out = np.degrees(phi2) return ra_out, dec_out
python
def translate_rhumb(ra, dec, r, theta): """ Translate a given point a distance r in the (initial) direction theta, along a Rhumb line. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees). """ # verified against website to give correct results # with the help of http://williams.best.vwh.net/avform.htm#Rhumb delta = np.radians(r) phi1 = np.radians(dec) phi2 = phi1 + delta * np.cos(np.radians(theta)) dphi = phi2 - phi1 if abs(dphi) < 1e-9: q = np.cos(phi1) else: dpsi = np.log(np.tan(np.pi / 4 + phi2 / 2) / np.tan(np.pi / 4 + phi1 / 2)) q = dphi / dpsi lambda1 = np.radians(ra) dlambda = delta * np.sin(np.radians(theta)) / q lambda2 = lambda1 + dlambda ra_out = np.degrees(lambda2) dec_out = np.degrees(phi2) return ra_out, dec_out
[ "def", "translate_rhumb", "(", "ra", ",", "dec", ",", "r", ",", "theta", ")", ":", "# verified against website to give correct results", "# with the help of http://williams.best.vwh.net/avform.htm#Rhumb", "delta", "=", "np", ".", "radians", "(", "r", ")", "phi1", "=", ...
Translate a given point a distance r in the (initial) direction theta, along a Rhumb line. Parameters ---------- ra, dec : float The initial point of interest (degrees). r, theta : float The distance and initial direction to translate (degrees). Returns ------- ra, dec : float The translated position (degrees).
[ "Translate", "a", "given", "point", "a", "distance", "r", "in", "the", "(", "initial", ")", "direction", "theta", "along", "a", "Rhumb", "line", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/angle_tools.py#L268-L303
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
galactic2fk5
def galactic2fk5(l, b): """ Convert galactic l/b to fk5 ra/dec Parameters ---------- l, b : float Galactic coordinates in radians. Returns ------- ra, dec : float FK5 ecliptic coordinates in radians. """ a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic') return a.fk5.ra.radian, a.fk5.dec.radian
python
def galactic2fk5(l, b): """ Convert galactic l/b to fk5 ra/dec Parameters ---------- l, b : float Galactic coordinates in radians. Returns ------- ra, dec : float FK5 ecliptic coordinates in radians. """ a = SkyCoord(l, b, unit=(u.radian, u.radian), frame='galactic') return a.fk5.ra.radian, a.fk5.dec.radian
[ "def", "galactic2fk5", "(", "l", ",", "b", ")", ":", "a", "=", "SkyCoord", "(", "l", ",", "b", ",", "unit", "=", "(", "u", ".", "radian", ",", "u", ".", "radian", ")", ",", "frame", "=", "'galactic'", ")", "return", "a", ".", "fk5", ".", "ra"...
Convert galactic l/b to fk5 ra/dec Parameters ---------- l, b : float Galactic coordinates in radians. Returns ------- ra, dec : float FK5 ecliptic coordinates in radians.
[ "Convert", "galactic", "l", "/", "b", "to", "fk5", "ra", "/", "dec" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L77-L92
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mask_plane
def mask_plane(data, wcs, region, negate=False): """ Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. """ # create an array but don't set the values (they are random) indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int) # since I know exactly what the index array needs to look like i can construct # it faster than list comprehension would allow # we do this only once and then recycle it idx = np.array([(j, 0) for j in range(data.shape[1])]) j = data.shape[1] for i in range(data.shape[0]): idx[:, 1] = i indexes[i*j:(i+1)*j] = idx # put ALL the pixles into our vectorized functions and minimise our overheads ra, dec = wcs.wcs_pix2world(indexes, 1).transpose() bigmask = region.sky_within(ra, dec, degin=True) if not negate: bigmask = np.bitwise_not(bigmask) # rework our 1d list into a 2d array bigmask = bigmask.reshape(data.shape) # and apply the mask data[bigmask] = np.nan return data
python
def mask_plane(data, wcs, region, negate=False): """ Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required. """ # create an array but don't set the values (they are random) indexes = np.empty((data.shape[0]*data.shape[1], 2), dtype=int) # since I know exactly what the index array needs to look like i can construct # it faster than list comprehension would allow # we do this only once and then recycle it idx = np.array([(j, 0) for j in range(data.shape[1])]) j = data.shape[1] for i in range(data.shape[0]): idx[:, 1] = i indexes[i*j:(i+1)*j] = idx # put ALL the pixles into our vectorized functions and minimise our overheads ra, dec = wcs.wcs_pix2world(indexes, 1).transpose() bigmask = region.sky_within(ra, dec, degin=True) if not negate: bigmask = np.bitwise_not(bigmask) # rework our 1d list into a 2d array bigmask = bigmask.reshape(data.shape) # and apply the mask data[bigmask] = np.nan return data
[ "def", "mask_plane", "(", "data", ",", "wcs", ",", "region", ",", "negate", "=", "False", ")", ":", "# create an array but don't set the values (they are random)", "indexes", "=", "np", ".", "empty", "(", "(", "data", ".", "shape", "[", "0", "]", "*", "data"...
Mask a 2d image (data) such that pixels within 'region' are set to nan. Parameters ---------- data : 2d-array Image array. wcs : astropy.wcs.WCS WCS for the image in question. region : :class:`AegeanTools.regions.Region` A region within which the image pixels will be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. Returns ------- masked : 2d-array The original array, but masked as required.
[ "Mask", "a", "2d", "image", "(", "data", ")", "such", "that", "pixels", "within", "region", "are", "set", "to", "nan", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L95-L139
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mask_file
def mask_file(regionfile, infile, outfile, negate=False): """ Created a masked version of file, using a region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The image will be masked according to this region. infile : str Input FITS image. outfile : str Output FITS image. negate : bool If True then pixels *outside* the region are masked. Default = False. See Also -------- :func:`AegeanTools.MIMAS.mask_plane` """ # Check that the input file is accessible and then open it if not os.path.exists(infile): raise AssertionError("Cannot locate fits file {0}".format(infile)) im = pyfits.open(infile) if not os.path.exists(regionfile): raise AssertionError("Cannot locate region file {0}".format(regionfile)) region = Region.load(regionfile) try: wcs = pywcs.WCS(im[0].header, naxis=2) except: # TODO: figure out what error is being thrown wcs = pywcs.WCS(str(im[0].header), naxis=2) if len(im[0].data.shape) > 2: data = np.squeeze(im[0].data) else: data = im[0].data print(data.shape) if len(data.shape) == 3: for plane in range(data.shape[0]): mask_plane(data[plane], wcs, region, negate) else: mask_plane(data, wcs, region, negate) im[0].data = data im.writeto(outfile, overwrite=True) logging.info("Wrote {0}".format(outfile)) return
python
def mask_file(regionfile, infile, outfile, negate=False): """ Created a masked version of file, using a region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The image will be masked according to this region. infile : str Input FITS image. outfile : str Output FITS image. negate : bool If True then pixels *outside* the region are masked. Default = False. See Also -------- :func:`AegeanTools.MIMAS.mask_plane` """ # Check that the input file is accessible and then open it if not os.path.exists(infile): raise AssertionError("Cannot locate fits file {0}".format(infile)) im = pyfits.open(infile) if not os.path.exists(regionfile): raise AssertionError("Cannot locate region file {0}".format(regionfile)) region = Region.load(regionfile) try: wcs = pywcs.WCS(im[0].header, naxis=2) except: # TODO: figure out what error is being thrown wcs = pywcs.WCS(str(im[0].header), naxis=2) if len(im[0].data.shape) > 2: data = np.squeeze(im[0].data) else: data = im[0].data print(data.shape) if len(data.shape) == 3: for plane in range(data.shape[0]): mask_plane(data[plane], wcs, region, negate) else: mask_plane(data, wcs, region, negate) im[0].data = data im.writeto(outfile, overwrite=True) logging.info("Wrote {0}".format(outfile)) return
[ "def", "mask_file", "(", "regionfile", ",", "infile", ",", "outfile", ",", "negate", "=", "False", ")", ":", "# Check that the input file is accessible and then open it", "if", "not", "os", ".", "path", ".", "exists", "(", "infile", ")", ":", "raise", "Assertion...
Created a masked version of file, using a region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The image will be masked according to this region. infile : str Input FITS image. outfile : str Output FITS image. negate : bool If True then pixels *outside* the region are masked. Default = False. See Also -------- :func:`AegeanTools.MIMAS.mask_plane`
[ "Created", "a", "masked", "version", "of", "file", "using", "a", "region", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L142-L191
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mask_table
def mask_table(region, table, negate=False, racol='ra', deccol='dec'): """ Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- region : :class:`AegeanTools.regions.Region` Region to mask. table : Astropy.table.Table Table to be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' Returns ------- masked : Astropy.table.Table A view of the given table which has been masked. """ inside = region.sky_within(table[racol], table[deccol], degin=True) if not negate: mask = np.bitwise_not(inside) else: mask = inside return table[mask]
python
def mask_table(region, table, negate=False, racol='ra', deccol='dec'): """ Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- region : :class:`AegeanTools.regions.Region` Region to mask. table : Astropy.table.Table Table to be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' Returns ------- masked : Astropy.table.Table A view of the given table which has been masked. """ inside = region.sky_within(table[racol], table[deccol], degin=True) if not negate: mask = np.bitwise_not(inside) else: mask = inside return table[mask]
[ "def", "mask_table", "(", "region", ",", "table", ",", "negate", "=", "False", ",", "racol", "=", "'ra'", ",", "deccol", "=", "'dec'", ")", ":", "inside", "=", "region", ".", "sky_within", "(", "table", "[", "racol", "]", ",", "table", "[", "deccol",...
Apply a given mask (region) to the table, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- region : :class:`AegeanTools.regions.Region` Region to mask. table : Astropy.table.Table Table to be masked. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' Returns ------- masked : Astropy.table.Table A view of the given table which has been masked.
[ "Apply", "a", "given", "mask", "(", "region", ")", "to", "the", "table", "removing", "all", "the", "rows", "with", "ra", "/", "dec", "inside", "the", "region", "If", "negate", "=", "False", "then", "remove", "the", "rows", "with", "ra", "/", "dec", "...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L194-L226
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mask_catalog
def mask_catalog(regionfile, infile, outfile, negate=False, racol='ra', deccol='dec'): """ Apply a region file as a mask to a catalog, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The catalogue will be masked according to this region. infile : str Input catalogue. outfile : str Output catalogue. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' See Also -------- :func:`AegeanTools.MIMAS.mask_table` :func:`AegeanTools.catalogs.load_table` """ logging.info("Loading region from {0}".format(regionfile)) region = Region.load(regionfile) logging.info("Loading catalog from {0}".format(infile)) table = load_table(infile) masked_table = mask_table(region, table, negate=negate, racol=racol, deccol=deccol) write_table(masked_table, outfile) return
python
def mask_catalog(regionfile, infile, outfile, negate=False, racol='ra', deccol='dec'): """ Apply a region file as a mask to a catalog, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The catalogue will be masked according to this region. infile : str Input catalogue. outfile : str Output catalogue. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' See Also -------- :func:`AegeanTools.MIMAS.mask_table` :func:`AegeanTools.catalogs.load_table` """ logging.info("Loading region from {0}".format(regionfile)) region = Region.load(regionfile) logging.info("Loading catalog from {0}".format(infile)) table = load_table(infile) masked_table = mask_table(region, table, negate=negate, racol=racol, deccol=deccol) write_table(masked_table, outfile) return
[ "def", "mask_catalog", "(", "regionfile", ",", "infile", ",", "outfile", ",", "negate", "=", "False", ",", "racol", "=", "'ra'", ",", "deccol", "=", "'dec'", ")", ":", "logging", ".", "info", "(", "\"Loading region from {0}\"", ".", "format", "(", "regionf...
Apply a region file as a mask to a catalog, removing all the rows with ra/dec inside the region If negate=False then remove the rows with ra/dec outside the region. Parameters ---------- regionfile : str A file which can be loaded as a :class:`AegeanTools.regions.Region`. The catalogue will be masked according to this region. infile : str Input catalogue. outfile : str Output catalogue. negate : bool If True then pixels *outside* the region are masked. Default = False. racol, deccol : str The name of the columns in `table` that should be interpreted as ra and dec. Default = 'ra', 'dec' See Also -------- :func:`AegeanTools.MIMAS.mask_table` :func:`AegeanTools.catalogs.load_table`
[ "Apply", "a", "region", "file", "as", "a", "mask", "to", "a", "catalog", "removing", "all", "the", "rows", "with", "ra", "/", "dec", "inside", "the", "region", "If", "negate", "=", "False", "then", "remove", "the", "rows", "with", "ra", "/", "dec", "...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L229-L267
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mim2reg
def mim2reg(mimfile, regfile): """ Convert a MIMAS region (.mim) file into a DS9 region (.reg) file. Parameters ---------- mimfile : str Input file in MIMAS format. regfile : str Output file. """ region = Region.load(mimfile) region.write_reg(regfile) logging.info("Converted {0} -> {1}".format(mimfile, regfile)) return
python
def mim2reg(mimfile, regfile): """ Convert a MIMAS region (.mim) file into a DS9 region (.reg) file. Parameters ---------- mimfile : str Input file in MIMAS format. regfile : str Output file. """ region = Region.load(mimfile) region.write_reg(regfile) logging.info("Converted {0} -> {1}".format(mimfile, regfile)) return
[ "def", "mim2reg", "(", "mimfile", ",", "regfile", ")", ":", "region", "=", "Region", ".", "load", "(", "mimfile", ")", "region", ".", "write_reg", "(", "regfile", ")", "logging", ".", "info", "(", "\"Converted {0} -> {1}\"", ".", "format", "(", "mimfile", ...
Convert a MIMAS region (.mim) file into a DS9 region (.reg) file. Parameters ---------- mimfile : str Input file in MIMAS format. regfile : str Output file.
[ "Convert", "a", "MIMAS", "region", "(", ".", "mim", ")", "file", "into", "a", "DS9", "region", "(", ".", "reg", ")", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L270-L286
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
mim2fits
def mim2fits(mimfile, fitsfile): """ Convert a MIMAS region (.mim) file into a MOC region (.fits) file. Parameters ---------- mimfile : str Input file in MIMAS format. fitsfile : str Output file. """ region = Region.load(mimfile) region.write_fits(fitsfile, moctool='MIMAS {0}-{1}'.format(__version__, __date__)) logging.info("Converted {0} -> {1}".format(mimfile, fitsfile)) return
python
def mim2fits(mimfile, fitsfile): """ Convert a MIMAS region (.mim) file into a MOC region (.fits) file. Parameters ---------- mimfile : str Input file in MIMAS format. fitsfile : str Output file. """ region = Region.load(mimfile) region.write_fits(fitsfile, moctool='MIMAS {0}-{1}'.format(__version__, __date__)) logging.info("Converted {0} -> {1}".format(mimfile, fitsfile)) return
[ "def", "mim2fits", "(", "mimfile", ",", "fitsfile", ")", ":", "region", "=", "Region", ".", "load", "(", "mimfile", ")", "region", ".", "write_fits", "(", "fitsfile", ",", "moctool", "=", "'MIMAS {0}-{1}'", ".", "format", "(", "__version__", ",", "__date__...
Convert a MIMAS region (.mim) file into a MOC region (.fits) file. Parameters ---------- mimfile : str Input file in MIMAS format. fitsfile : str Output file.
[ "Convert", "a", "MIMAS", "region", "(", ".", "mim", ")", "file", "into", "a", "MOC", "region", "(", ".", "fits", ")", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L289-L304
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
box2poly
def box2poly(line): """ Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box Parameters ---------- line : str A string containing a DS9 region command for a box. Returns ------- poly : [ra, dec, ...] The corners of the box in clockwise order from top left. """ words = re.split('[(\s,)]', line) ra = words[1] dec = words[2] width = words[3] height = words[4] if ":" in ra: ra = Angle(ra, unit=u.hour) else: ra = Angle(ra, unit=u.degree) dec = Angle(dec, unit=u.degree) width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the " height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the " center = SkyCoord(ra, dec) tl = center.ra.degree+width.degree, center.dec.degree+height.degree tr = center.ra.degree-width.degree, center.dec.degree+height.degree bl = center.ra.degree+width.degree, center.dec.degree-height.degree br = center.ra.degree-width.degree, center.dec.degree-height.degree return np.ravel([tl, tr, br, bl]).tolist()
python
def box2poly(line): """ Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box Parameters ---------- line : str A string containing a DS9 region command for a box. Returns ------- poly : [ra, dec, ...] The corners of the box in clockwise order from top left. """ words = re.split('[(\s,)]', line) ra = words[1] dec = words[2] width = words[3] height = words[4] if ":" in ra: ra = Angle(ra, unit=u.hour) else: ra = Angle(ra, unit=u.degree) dec = Angle(dec, unit=u.degree) width = Angle(float(width[:-1])/2, unit=u.arcsecond) # strip the " height = Angle(float(height[:-1])/2, unit=u.arcsecond) # strip the " center = SkyCoord(ra, dec) tl = center.ra.degree+width.degree, center.dec.degree+height.degree tr = center.ra.degree-width.degree, center.dec.degree+height.degree bl = center.ra.degree+width.degree, center.dec.degree-height.degree br = center.ra.degree-width.degree, center.dec.degree-height.degree return np.ravel([tl, tr, br, bl]).tolist()
[ "def", "box2poly", "(", "line", ")", ":", "words", "=", "re", ".", "split", "(", "'[(\\s,)]'", ",", "line", ")", "ra", "=", "words", "[", "1", "]", "dec", "=", "words", "[", "2", "]", "width", "=", "words", "[", "3", "]", "height", "=", "words"...
Convert a string that describes a box in ds9 format, into a polygon that is given by the corners of the box Parameters ---------- line : str A string containing a DS9 region command for a box. Returns ------- poly : [ra, dec, ...] The corners of the box in clockwise order from top left.
[ "Convert", "a", "string", "that", "describes", "a", "box", "in", "ds9", "format", "into", "a", "polygon", "that", "is", "given", "by", "the", "corners", "of", "the", "box" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L307-L338
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
circle2circle
def circle2circle(line): """ Parse a string that describes a circle in ds9 format. Parameters ---------- line : str A string containing a DS9 region command for a circle. Returns ------- circle : [ra, dec, radius] The center and radius of the circle. """ words = re.split('[(,\s)]', line) ra = words[1] dec = words[2] radius = words[3][:-1] # strip the " if ":" in ra: ra = Angle(ra, unit=u.hour) else: ra = Angle(ra, unit=u.degree) dec = Angle(dec, unit=u.degree) radius = Angle(radius, unit=u.arcsecond) return [ra.degree, dec.degree, radius.degree]
python
def circle2circle(line): """ Parse a string that describes a circle in ds9 format. Parameters ---------- line : str A string containing a DS9 region command for a circle. Returns ------- circle : [ra, dec, radius] The center and radius of the circle. """ words = re.split('[(,\s)]', line) ra = words[1] dec = words[2] radius = words[3][:-1] # strip the " if ":" in ra: ra = Angle(ra, unit=u.hour) else: ra = Angle(ra, unit=u.degree) dec = Angle(dec, unit=u.degree) radius = Angle(radius, unit=u.arcsecond) return [ra.degree, dec.degree, radius.degree]
[ "def", "circle2circle", "(", "line", ")", ":", "words", "=", "re", ".", "split", "(", "'[(,\\s)]'", ",", "line", ")", "ra", "=", "words", "[", "1", "]", "dec", "=", "words", "[", "2", "]", "radius", "=", "words", "[", "3", "]", "[", ":", "-", ...
Parse a string that describes a circle in ds9 format. Parameters ---------- line : str A string containing a DS9 region command for a circle. Returns ------- circle : [ra, dec, radius] The center and radius of the circle.
[ "Parse", "a", "string", "that", "describes", "a", "circle", "in", "ds9", "format", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L341-L365
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
poly2poly
def poly2poly(line): """ Parse a string of text containing a DS9 description of a polygon. This function works but is not very robust due to the constraints of healpy. Parameters ---------- line : str A string containing a DS9 region command for a polygon. Returns ------- poly : [ra, dec, ...] The coordinates of the polygon. """ words = re.split('[(\s,)]', line) ras = np.array(words[1::2]) decs = np.array(words[2::2]) coords = [] for ra, dec in zip(ras, decs): if ra.strip() == '' or dec.strip() == '': continue if ":" in ra: pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree)) else: pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree)) # only add this point if it is some distance from the previous one coords.extend([pos.ra.degree, pos.dec.degree]) return coords
python
def poly2poly(line): """ Parse a string of text containing a DS9 description of a polygon. This function works but is not very robust due to the constraints of healpy. Parameters ---------- line : str A string containing a DS9 region command for a polygon. Returns ------- poly : [ra, dec, ...] The coordinates of the polygon. """ words = re.split('[(\s,)]', line) ras = np.array(words[1::2]) decs = np.array(words[2::2]) coords = [] for ra, dec in zip(ras, decs): if ra.strip() == '' or dec.strip() == '': continue if ":" in ra: pos = SkyCoord(Angle(ra, unit=u.hour), Angle(dec, unit=u.degree)) else: pos = SkyCoord(Angle(ra, unit=u.degree), Angle(dec, unit=u.degree)) # only add this point if it is some distance from the previous one coords.extend([pos.ra.degree, pos.dec.degree]) return coords
[ "def", "poly2poly", "(", "line", ")", ":", "words", "=", "re", ".", "split", "(", "'[(\\s,)]'", ",", "line", ")", "ras", "=", "np", ".", "array", "(", "words", "[", "1", ":", ":", "2", "]", ")", "decs", "=", "np", ".", "array", "(", "words", ...
Parse a string of text containing a DS9 description of a polygon. This function works but is not very robust due to the constraints of healpy. Parameters ---------- line : str A string containing a DS9 region command for a polygon. Returns ------- poly : [ra, dec, ...] The coordinates of the polygon.
[ "Parse", "a", "string", "of", "text", "containing", "a", "DS9", "description", "of", "a", "polygon", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L368-L397
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
reg2mim
def reg2mim(regfile, mimfile, maxdepth): """ Parse a DS9 region file and write a MIMAS region (.mim) file. Parameters ---------- regfile : str DS9 region (.reg) file. mimfile : str MIMAS region (.mim) file. maxdepth : str Depth/resolution of the region file. """ logging.info("Reading regions from {0}".format(regfile)) lines = (l for l in open(regfile, 'r') if not l.startswith('#')) poly = [] circles = [] for line in lines: if line.startswith('box'): poly.append(box2poly(line)) elif line.startswith('circle'): circles.append(circle2circle(line)) elif line.startswith('polygon'): logging.warning("Polygons break a lot, but I'll try this one anyway.") poly.append(poly2poly(line)) else: logging.warning("Not sure what to do with {0}".format(line[:-1])) container = Dummy(maxdepth=maxdepth) container.include_circles = circles container.include_polygons = poly region = combine_regions(container) save_region(region, mimfile) return
python
def reg2mim(regfile, mimfile, maxdepth): """ Parse a DS9 region file and write a MIMAS region (.mim) file. Parameters ---------- regfile : str DS9 region (.reg) file. mimfile : str MIMAS region (.mim) file. maxdepth : str Depth/resolution of the region file. """ logging.info("Reading regions from {0}".format(regfile)) lines = (l for l in open(regfile, 'r') if not l.startswith('#')) poly = [] circles = [] for line in lines: if line.startswith('box'): poly.append(box2poly(line)) elif line.startswith('circle'): circles.append(circle2circle(line)) elif line.startswith('polygon'): logging.warning("Polygons break a lot, but I'll try this one anyway.") poly.append(poly2poly(line)) else: logging.warning("Not sure what to do with {0}".format(line[:-1])) container = Dummy(maxdepth=maxdepth) container.include_circles = circles container.include_polygons = poly region = combine_regions(container) save_region(region, mimfile) return
[ "def", "reg2mim", "(", "regfile", ",", "mimfile", ",", "maxdepth", ")", ":", "logging", ".", "info", "(", "\"Reading regions from {0}\"", ".", "format", "(", "regfile", ")", ")", "lines", "=", "(", "l", "for", "l", "in", "open", "(", "regfile", ",", "'...
Parse a DS9 region file and write a MIMAS region (.mim) file. Parameters ---------- regfile : str DS9 region (.reg) file. mimfile : str MIMAS region (.mim) file. maxdepth : str Depth/resolution of the region file.
[ "Parse", "a", "DS9", "region", "file", "and", "write", "a", "MIMAS", "region", "(", ".", "mim", ")", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L400-L436
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
combine_regions
def combine_regions(container): """ Return a region that is the combination of those specified in the container. The container is typically a results instance that comes from argparse. Order of construction is: add regions, subtract regions, add circles, subtract circles, add polygons, subtract polygons. Parameters ---------- container : :class:`AegeanTools.MIMAS.Dummy` The regions to be combined. Returns ------- region : :class:`AegeanTools.regions.Region` The constructed region. """ # create empty region region = Region(container.maxdepth) # add/rem all the regions from files for r in container.add_region: logging.info("adding region from {0}".format(r)) r2 = Region.load(r[0]) region.union(r2) for r in container.rem_region: logging.info("removing region from {0}".format(r)) r2 = Region.load(r[0]) region.without(r2) # add circles if len(container.include_circles) > 0: for c in container.include_circles: circles = np.radians(np.array(c)) if container.galactic: l, b, radii = circles.reshape(3, circles.shape[0]//3) ras, decs = galactic2fk5(l, b) else: ras, decs, radii = circles.reshape(3, circles.shape[0]//3) region.add_circles(ras, decs, radii) # remove circles if len(container.exclude_circles) > 0: for c in container.exclude_circles: r2 = Region(container.maxdepth) circles = np.radians(np.array(c)) if container.galactic: l, b, radii = circles.reshape(3, circles.shape[0]//3) ras, decs = galactic2fk5(l, b) else: ras, decs, radii = circles.reshape(3, circles.shape[0]//3) r2.add_circles(ras, decs, radii) region.without(r2) # add polygons if len(container.include_polygons) > 0: for p in container.include_polygons: poly = np.radians(np.array(p)) poly = poly.reshape((poly.shape[0]//2, 2)) region.add_poly(poly) # remove polygons if len(container.exclude_polygons) > 0: for p in container.include_polygons: poly = np.array(np.radians(p)) r2 = Region(container.maxdepth) r2.add_poly(poly) region.without(r2) return region
python
def combine_regions(container): """ Return a region that is the combination of those specified in the container. The container is typically a results instance that comes from argparse. Order of construction is: add regions, subtract regions, add circles, subtract circles, add polygons, subtract polygons. Parameters ---------- container : :class:`AegeanTools.MIMAS.Dummy` The regions to be combined. Returns ------- region : :class:`AegeanTools.regions.Region` The constructed region. """ # create empty region region = Region(container.maxdepth) # add/rem all the regions from files for r in container.add_region: logging.info("adding region from {0}".format(r)) r2 = Region.load(r[0]) region.union(r2) for r in container.rem_region: logging.info("removing region from {0}".format(r)) r2 = Region.load(r[0]) region.without(r2) # add circles if len(container.include_circles) > 0: for c in container.include_circles: circles = np.radians(np.array(c)) if container.galactic: l, b, radii = circles.reshape(3, circles.shape[0]//3) ras, decs = galactic2fk5(l, b) else: ras, decs, radii = circles.reshape(3, circles.shape[0]//3) region.add_circles(ras, decs, radii) # remove circles if len(container.exclude_circles) > 0: for c in container.exclude_circles: r2 = Region(container.maxdepth) circles = np.radians(np.array(c)) if container.galactic: l, b, radii = circles.reshape(3, circles.shape[0]//3) ras, decs = galactic2fk5(l, b) else: ras, decs, radii = circles.reshape(3, circles.shape[0]//3) r2.add_circles(ras, decs, radii) region.without(r2) # add polygons if len(container.include_polygons) > 0: for p in container.include_polygons: poly = np.radians(np.array(p)) poly = poly.reshape((poly.shape[0]//2, 2)) region.add_poly(poly) # remove polygons if len(container.exclude_polygons) > 0: for p in container.include_polygons: poly = np.array(np.radians(p)) r2 = Region(container.maxdepth) r2.add_poly(poly) region.without(r2) return region
[ "def", "combine_regions", "(", "container", ")", ":", "# create empty region", "region", "=", "Region", "(", "container", ".", "maxdepth", ")", "# add/rem all the regions from files", "for", "r", "in", "container", ".", "add_region", ":", "logging", ".", "info", "...
Return a region that is the combination of those specified in the container. The container is typically a results instance that comes from argparse. Order of construction is: add regions, subtract regions, add circles, subtract circles, add polygons, subtract polygons. Parameters ---------- container : :class:`AegeanTools.MIMAS.Dummy` The regions to be combined. Returns ------- region : :class:`AegeanTools.regions.Region` The constructed region.
[ "Return", "a", "region", "that", "is", "the", "combination", "of", "those", "specified", "in", "the", "container", ".", "The", "container", "is", "typically", "a", "results", "instance", "that", "comes", "from", "argparse", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L439-L511
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
intersect_regions
def intersect_regions(flist): """ Construct a region which is the intersection of all regions described in the given list of file names. Parameters ---------- flist : list A list of region filenames. Returns ------- region : :class:`AegeanTools.regions.Region` The intersection of all regions, possibly empty. """ if len(flist) < 2: raise Exception("Require at least two regions to perform intersection") a = Region.load(flist[0]) for b in [Region.load(f) for f in flist[1:]]: a.intersect(b) return a
python
def intersect_regions(flist): """ Construct a region which is the intersection of all regions described in the given list of file names. Parameters ---------- flist : list A list of region filenames. Returns ------- region : :class:`AegeanTools.regions.Region` The intersection of all regions, possibly empty. """ if len(flist) < 2: raise Exception("Require at least two regions to perform intersection") a = Region.load(flist[0]) for b in [Region.load(f) for f in flist[1:]]: a.intersect(b) return a
[ "def", "intersect_regions", "(", "flist", ")", ":", "if", "len", "(", "flist", ")", "<", "2", ":", "raise", "Exception", "(", "\"Require at least two regions to perform intersection\"", ")", "a", "=", "Region", ".", "load", "(", "flist", "[", "0", "]", ")", ...
Construct a region which is the intersection of all regions described in the given list of file names. Parameters ---------- flist : list A list of region filenames. Returns ------- region : :class:`AegeanTools.regions.Region` The intersection of all regions, possibly empty.
[ "Construct", "a", "region", "which", "is", "the", "intersection", "of", "all", "regions", "described", "in", "the", "given", "list", "of", "file", "names", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L514-L534
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
save_region
def save_region(region, filename): """ Save the given region to a file Parameters ---------- region : :class:`AegeanTools.regions.Region` A region. filename : str Output file name. """ region.save(filename) logging.info("Wrote {0}".format(filename)) return
python
def save_region(region, filename): """ Save the given region to a file Parameters ---------- region : :class:`AegeanTools.regions.Region` A region. filename : str Output file name. """ region.save(filename) logging.info("Wrote {0}".format(filename)) return
[ "def", "save_region", "(", "region", ",", "filename", ")", ":", "region", ".", "save", "(", "filename", ")", "logging", ".", "info", "(", "\"Wrote {0}\"", ".", "format", "(", "filename", ")", ")", "return" ]
Save the given region to a file Parameters ---------- region : :class:`AegeanTools.regions.Region` A region. filename : str Output file name.
[ "Save", "the", "given", "region", "to", "a", "file" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L537-L551
train
PaulHancock/Aegean
AegeanTools/MIMAS.py
save_as_image
def save_as_image(region, filename): """ Convert a MIMAS region (.mim) file into a image (eg .png) Parameters ---------- region : :class:`AegeanTools.regions.Region` Region of interest. filename : str Output filename. """ import healpy as hp pixels = list(region.get_demoted()) order = region.maxdepth m = np.arange(hp.nside2npix(2**order)) m[:] = 0 m[pixels] = 1 hp.write_map(filename, m, nest=True, coord='C') return
python
def save_as_image(region, filename): """ Convert a MIMAS region (.mim) file into a image (eg .png) Parameters ---------- region : :class:`AegeanTools.regions.Region` Region of interest. filename : str Output filename. """ import healpy as hp pixels = list(region.get_demoted()) order = region.maxdepth m = np.arange(hp.nside2npix(2**order)) m[:] = 0 m[pixels] = 1 hp.write_map(filename, m, nest=True, coord='C') return
[ "def", "save_as_image", "(", "region", ",", "filename", ")", ":", "import", "healpy", "as", "hp", "pixels", "=", "list", "(", "region", ".", "get_demoted", "(", ")", ")", "order", "=", "region", ".", "maxdepth", "m", "=", "np", ".", "arange", "(", "h...
Convert a MIMAS region (.mim) file into a image (eg .png) Parameters ---------- region : :class:`AegeanTools.regions.Region` Region of interest. filename : str Output filename.
[ "Convert", "a", "MIMAS", "region", "(", ".", "mim", ")", "file", "into", "a", "image", "(", "eg", ".", "png", ")" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/MIMAS.py#L554-L573
train
PaulHancock/Aegean
AegeanTools/fits_image.py
get_pixinfo
def get_pixinfo(header): """ Return some pixel information based on the given hdu header pixarea - the area of a single pixel in deg2 pixscale - the side lengths of a pixel (assuming they are square) Parameters ---------- header : HDUHeader or dict FITS header information Returns ------- pixarea : float The are of a single pixel at the reference location, in square degrees. pixscale : (float, float) The pixel scale in degrees, at the reference location. Notes ----- The reference location is not always at the image center, and the pixel scale/area may change over the image, depending on the projection. """ if all(a in header for a in ["CDELT1", "CDELT2"]): pixarea = abs(header["CDELT1"]*header["CDELT2"]) pixscale = (header["CDELT1"], header["CDELT2"]) elif all(a in header for a in ["CD1_1", "CD1_2", "CD2_1", "CD2_2"]): pixarea = abs(header["CD1_1"]*header["CD2_2"] - header["CD1_2"]*header["CD2_1"]) pixscale = (header["CD1_1"], header["CD2_2"]) if not (header["CD1_2"] == 0 and header["CD2_1"] == 0): log.warning("Pixels don't appear to be square -> pixscale is wrong") elif all(a in header for a in ["CD1_1", "CD2_2"]): pixarea = abs(header["CD1_1"]*header["CD2_2"]) pixscale = (header["CD1_1"], header["CD2_2"]) else: log.critical("cannot determine pixel area, using zero EVEN THOUGH THIS IS WRONG!") pixarea = 0 pixscale = (0, 0) return pixarea, pixscale
python
def get_pixinfo(header): """ Return some pixel information based on the given hdu header pixarea - the area of a single pixel in deg2 pixscale - the side lengths of a pixel (assuming they are square) Parameters ---------- header : HDUHeader or dict FITS header information Returns ------- pixarea : float The are of a single pixel at the reference location, in square degrees. pixscale : (float, float) The pixel scale in degrees, at the reference location. Notes ----- The reference location is not always at the image center, and the pixel scale/area may change over the image, depending on the projection. """ if all(a in header for a in ["CDELT1", "CDELT2"]): pixarea = abs(header["CDELT1"]*header["CDELT2"]) pixscale = (header["CDELT1"], header["CDELT2"]) elif all(a in header for a in ["CD1_1", "CD1_2", "CD2_1", "CD2_2"]): pixarea = abs(header["CD1_1"]*header["CD2_2"] - header["CD1_2"]*header["CD2_1"]) pixscale = (header["CD1_1"], header["CD2_2"]) if not (header["CD1_2"] == 0 and header["CD2_1"] == 0): log.warning("Pixels don't appear to be square -> pixscale is wrong") elif all(a in header for a in ["CD1_1", "CD2_2"]): pixarea = abs(header["CD1_1"]*header["CD2_2"]) pixscale = (header["CD1_1"], header["CD2_2"]) else: log.critical("cannot determine pixel area, using zero EVEN THOUGH THIS IS WRONG!") pixarea = 0 pixscale = (0, 0) return pixarea, pixscale
[ "def", "get_pixinfo", "(", "header", ")", ":", "if", "all", "(", "a", "in", "header", "for", "a", "in", "[", "\"CDELT1\"", ",", "\"CDELT2\"", "]", ")", ":", "pixarea", "=", "abs", "(", "header", "[", "\"CDELT1\"", "]", "*", "header", "[", "\"CDELT2\"...
Return some pixel information based on the given hdu header pixarea - the area of a single pixel in deg2 pixscale - the side lengths of a pixel (assuming they are square) Parameters ---------- header : HDUHeader or dict FITS header information Returns ------- pixarea : float The are of a single pixel at the reference location, in square degrees. pixscale : (float, float) The pixel scale in degrees, at the reference location. Notes ----- The reference location is not always at the image center, and the pixel scale/area may change over the image, depending on the projection.
[ "Return", "some", "pixel", "information", "based", "on", "the", "given", "hdu", "header", "pixarea", "-", "the", "area", "of", "a", "single", "pixel", "in", "deg2", "pixscale", "-", "the", "side", "lengths", "of", "a", "pixel", "(", "assuming", "they", "...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L20-L60
train
PaulHancock/Aegean
AegeanTools/fits_image.py
get_beam
def get_beam(header): """ Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees. """ if "BPA" not in header: log.warning("BPA not present in fits header, using 0") bpa = 0 else: bpa = header["BPA"] if "BMAJ" not in header: log.warning("BMAJ not present in fits header.") bmaj = None else: bmaj = header["BMAJ"] if "BMIN" not in header: log.warning("BMIN not present in fits header.") bmin = None else: bmin = header["BMIN"] if None in [bmaj, bmin, bpa]: return None beam = Beam(bmaj, bmin, bpa) return beam
python
def get_beam(header): """ Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees. """ if "BPA" not in header: log.warning("BPA not present in fits header, using 0") bpa = 0 else: bpa = header["BPA"] if "BMAJ" not in header: log.warning("BMAJ not present in fits header.") bmaj = None else: bmaj = header["BMAJ"] if "BMIN" not in header: log.warning("BMIN not present in fits header.") bmin = None else: bmin = header["BMIN"] if None in [bmaj, bmin, bpa]: return None beam = Beam(bmaj, bmin, bpa) return beam
[ "def", "get_beam", "(", "header", ")", ":", "if", "\"BPA\"", "not", "in", "header", ":", "log", ".", "warning", "(", "\"BPA not present in fits header, using 0\"", ")", "bpa", "=", "0", "else", ":", "bpa", "=", "header", "[", "\"BPA\"", "]", "if", "\"BMAJ\...
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header. BPA may be missing but will be assumed to be zero. if BMAJ or BMIN are missing then return None instead of a beam object. Parameters ---------- header : HDUHeader The fits header. Returns ------- beam : :class:`AegeanTools.fits_image.Beam` Beam object, with a, b, and pa in degrees.
[ "Create", "a", ":", "class", ":", "AegeanTools", ".", "fits_image", ".", "Beam", "object", "from", "a", "fits", "header", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L63-L102
train
PaulHancock/Aegean
AegeanTools/fits_image.py
fix_aips_header
def fix_aips_header(header): """ Search through an image header. If the keywords BMAJ/BMIN/BPA are not set, but there are AIPS history cards, then we can populate the BMAJ/BMIN/BPA. Fix the header if possible, otherwise don't. Either way, don't complain. Parameters ---------- header : HDUHeader Fits header which may or may not have AIPS history cards. Returns ------- header : HDUHeader A header which has BMAJ, BMIN, and BPA keys, as well as a new HISTORY card. """ if 'BMAJ' in header and 'BMIN' in header and 'BPA' in header: # The header already has the required keys so there is nothing to do return header aips_hist = [a for a in header['HISTORY'] if a.startswith("AIPS")] if len(aips_hist) == 0: # There are no AIPS history items to process return header for a in aips_hist: if "BMAJ" in a: # this line looks like # 'AIPS CLEAN BMAJ= 1.2500E-02 BMIN= 1.2500E-02 BPA= 0.00' words = a.split() bmaj = float(words[3]) bmin = float(words[5]) bpa = float(words[7]) break else: # there are AIPS cards but there is no BMAJ/BMIN/BPA return header header['BMAJ'] = bmaj header['BMIN'] = bmin header['BPA'] = bpa header['HISTORY'] = 'Beam information AIPS->fits by AegeanTools' return header
python
def fix_aips_header(header): """ Search through an image header. If the keywords BMAJ/BMIN/BPA are not set, but there are AIPS history cards, then we can populate the BMAJ/BMIN/BPA. Fix the header if possible, otherwise don't. Either way, don't complain. Parameters ---------- header : HDUHeader Fits header which may or may not have AIPS history cards. Returns ------- header : HDUHeader A header which has BMAJ, BMIN, and BPA keys, as well as a new HISTORY card. """ if 'BMAJ' in header and 'BMIN' in header and 'BPA' in header: # The header already has the required keys so there is nothing to do return header aips_hist = [a for a in header['HISTORY'] if a.startswith("AIPS")] if len(aips_hist) == 0: # There are no AIPS history items to process return header for a in aips_hist: if "BMAJ" in a: # this line looks like # 'AIPS CLEAN BMAJ= 1.2500E-02 BMIN= 1.2500E-02 BPA= 0.00' words = a.split() bmaj = float(words[3]) bmin = float(words[5]) bpa = float(words[7]) break else: # there are AIPS cards but there is no BMAJ/BMIN/BPA return header header['BMAJ'] = bmaj header['BMIN'] = bmin header['BPA'] = bpa header['HISTORY'] = 'Beam information AIPS->fits by AegeanTools' return header
[ "def", "fix_aips_header", "(", "header", ")", ":", "if", "'BMAJ'", "in", "header", "and", "'BMIN'", "in", "header", "and", "'BPA'", "in", "header", ":", "# The header already has the required keys so there is nothing to do", "return", "header", "aips_hist", "=", "[", ...
Search through an image header. If the keywords BMAJ/BMIN/BPA are not set, but there are AIPS history cards, then we can populate the BMAJ/BMIN/BPA. Fix the header if possible, otherwise don't. Either way, don't complain. Parameters ---------- header : HDUHeader Fits header which may or may not have AIPS history cards. Returns ------- header : HDUHeader A header which has BMAJ, BMIN, and BPA keys, as well as a new HISTORY card.
[ "Search", "through", "an", "image", "header", ".", "If", "the", "keywords", "BMAJ", "/", "BMIN", "/", "BPA", "are", "not", "set", "but", "there", "are", "AIPS", "history", "cards", "then", "we", "can", "populate", "the", "BMAJ", "/", "BMIN", "/", "BPA"...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L105-L145
train
PaulHancock/Aegean
AegeanTools/fits_image.py
FitsImage.set_pixels
def set_pixels(self, pixels): """ Set the image data. Will not work if the new image has a different shape than the current image. Parameters ---------- pixels : numpy.ndarray New image data Returns ------- None """ if not (pixels.shape == self._pixels.shape): raise AssertionError("Shape mismatch between pixels supplied {0} and existing image pixels {1}".format(pixels.shape,self._pixels.shape)) self._pixels = pixels # reset this so that it is calculated next time the function is called self._rms = None return
python
def set_pixels(self, pixels): """ Set the image data. Will not work if the new image has a different shape than the current image. Parameters ---------- pixels : numpy.ndarray New image data Returns ------- None """ if not (pixels.shape == self._pixels.shape): raise AssertionError("Shape mismatch between pixels supplied {0} and existing image pixels {1}".format(pixels.shape,self._pixels.shape)) self._pixels = pixels # reset this so that it is calculated next time the function is called self._rms = None return
[ "def", "set_pixels", "(", "self", ",", "pixels", ")", ":", "if", "not", "(", "pixels", ".", "shape", "==", "self", ".", "_pixels", ".", "shape", ")", ":", "raise", "AssertionError", "(", "\"Shape mismatch between pixels supplied {0} and existing image pixels {1}\"",...
Set the image data. Will not work if the new image has a different shape than the current image. Parameters ---------- pixels : numpy.ndarray New image data Returns ------- None
[ "Set", "the", "image", "data", ".", "Will", "not", "work", "if", "the", "new", "image", "has", "a", "different", "shape", "than", "the", "current", "image", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L235-L254
train
PaulHancock/Aegean
AegeanTools/fits_image.py
FitsImage.get_background_rms
def get_background_rms(self): """ Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to reduce bias from source pixels. Returns ------- rms : float The image rms. Notes ----- The rms value is cached after first calculation. """ # TODO: return a proper background RMS ignoring the sources # This is an approximate method suggested by PaulH. # I have no idea where this magic 1.34896 number comes from... if self._rms is None: # Get the pixels values without the NaNs data = numpy.extract(self.hdu.data > -9999999, self.hdu.data) p25 = scipy.stats.scoreatpercentile(data, 25) p75 = scipy.stats.scoreatpercentile(data, 75) iqr = p75 - p25 self._rms = iqr / 1.34896 return self._rms
python
def get_background_rms(self): """ Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to reduce bias from source pixels. Returns ------- rms : float The image rms. Notes ----- The rms value is cached after first calculation. """ # TODO: return a proper background RMS ignoring the sources # This is an approximate method suggested by PaulH. # I have no idea where this magic 1.34896 number comes from... if self._rms is None: # Get the pixels values without the NaNs data = numpy.extract(self.hdu.data > -9999999, self.hdu.data) p25 = scipy.stats.scoreatpercentile(data, 25) p75 = scipy.stats.scoreatpercentile(data, 75) iqr = p75 - p25 self._rms = iqr / 1.34896 return self._rms
[ "def", "get_background_rms", "(", "self", ")", ":", "# TODO: return a proper background RMS ignoring the sources", "# This is an approximate method suggested by PaulH.", "# I have no idea where this magic 1.34896 number comes from...", "if", "self", ".", "_rms", "is", "None", ":", "#...
Calculate the rms of the image. The rms is calculated from the interqurtile range (IQR), to reduce bias from source pixels. Returns ------- rms : float The image rms. Notes ----- The rms value is cached after first calculation.
[ "Calculate", "the", "rms", "of", "the", "image", ".", "The", "rms", "is", "calculated", "from", "the", "interqurtile", "range", "(", "IQR", ")", "to", "reduce", "bias", "from", "source", "pixels", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L256-L280
train
PaulHancock/Aegean
AegeanTools/fits_image.py
FitsImage.pix2sky
def pix2sky(self, pixel): """ Get the sky coordinates for a given image pixel. Parameters ---------- pixel : (float, float) Image coordinates. Returns ------- ra,dec : float Sky coordinates (degrees) """ pixbox = numpy.array([pixel, pixel]) skybox = self.wcs.all_pix2world(pixbox, 1) return [float(skybox[0][0]), float(skybox[0][1])]
python
def pix2sky(self, pixel): """ Get the sky coordinates for a given image pixel. Parameters ---------- pixel : (float, float) Image coordinates. Returns ------- ra,dec : float Sky coordinates (degrees) """ pixbox = numpy.array([pixel, pixel]) skybox = self.wcs.all_pix2world(pixbox, 1) return [float(skybox[0][0]), float(skybox[0][1])]
[ "def", "pix2sky", "(", "self", ",", "pixel", ")", ":", "pixbox", "=", "numpy", ".", "array", "(", "[", "pixel", ",", "pixel", "]", ")", "skybox", "=", "self", ".", "wcs", ".", "all_pix2world", "(", "pixbox", ",", "1", ")", "return", "[", "float", ...
Get the sky coordinates for a given image pixel. Parameters ---------- pixel : (float, float) Image coordinates. Returns ------- ra,dec : float Sky coordinates (degrees)
[ "Get", "the", "sky", "coordinates", "for", "a", "given", "image", "pixel", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L282-L299
train
PaulHancock/Aegean
AegeanTools/fits_image.py
FitsImage.sky2pix
def sky2pix(self, skypos): """ Get the pixel coordinates for a given sky position (degrees). Parameters ---------- skypos : (float,float) ra,dec position in degrees. Returns ------- x,y : float Pixel coordinates. """ skybox = [skypos, skypos] pixbox = self.wcs.all_world2pix(skybox, 1) return [float(pixbox[0][0]), float(pixbox[0][1])]
python
def sky2pix(self, skypos): """ Get the pixel coordinates for a given sky position (degrees). Parameters ---------- skypos : (float,float) ra,dec position in degrees. Returns ------- x,y : float Pixel coordinates. """ skybox = [skypos, skypos] pixbox = self.wcs.all_world2pix(skybox, 1) return [float(pixbox[0][0]), float(pixbox[0][1])]
[ "def", "sky2pix", "(", "self", ",", "skypos", ")", ":", "skybox", "=", "[", "skypos", ",", "skypos", "]", "pixbox", "=", "self", ".", "wcs", ".", "all_world2pix", "(", "skybox", ",", "1", ")", "return", "[", "float", "(", "pixbox", "[", "0", "]", ...
Get the pixel coordinates for a given sky position (degrees). Parameters ---------- skypos : (float,float) ra,dec position in degrees. Returns ------- x,y : float Pixel coordinates.
[ "Get", "the", "pixel", "coordinates", "for", "a", "given", "sky", "position", "(", "degrees", ")", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/fits_image.py#L307-L324
train
PaulHancock/Aegean
AegeanTools/AeRes.py
load_sources
def load_sources(filename): """ Open a file, read contents, return a list of all the sources in that file. @param filename: @return: list of OutputSource objects """ catalog = catalogs.table_to_source_list(catalogs.load_table(filename)) logging.info("read {0} sources from {1}".format(len(catalog), filename)) return catalog
python
def load_sources(filename): """ Open a file, read contents, return a list of all the sources in that file. @param filename: @return: list of OutputSource objects """ catalog = catalogs.table_to_source_list(catalogs.load_table(filename)) logging.info("read {0} sources from {1}".format(len(catalog), filename)) return catalog
[ "def", "load_sources", "(", "filename", ")", ":", "catalog", "=", "catalogs", ".", "table_to_source_list", "(", "catalogs", ".", "load_table", "(", "filename", ")", ")", "logging", ".", "info", "(", "\"read {0} sources from {1}\"", ".", "format", "(", "len", "...
Open a file, read contents, return a list of all the sources in that file. @param filename: @return: list of OutputSource objects
[ "Open", "a", "file", "read", "contents", "return", "a", "list", "of", "all", "the", "sources", "in", "that", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/AeRes.py#L19-L27
train
PaulHancock/Aegean
scripts/fix_beam.py
search_beam
def search_beam(hdulist): """ Will search the beam info from the HISTORY :param hdulist: :return: """ header = hdulist[0].header history = header['HISTORY'] history_str = str(history) #AIPS CLEAN BMAJ= 1.2500E-02 BMIN= 1.2500E-02 BPA= 0.00 if 'BMAJ' in history_str: return True else: return False
python
def search_beam(hdulist): """ Will search the beam info from the HISTORY :param hdulist: :return: """ header = hdulist[0].header history = header['HISTORY'] history_str = str(history) #AIPS CLEAN BMAJ= 1.2500E-02 BMIN= 1.2500E-02 BPA= 0.00 if 'BMAJ' in history_str: return True else: return False
[ "def", "search_beam", "(", "hdulist", ")", ":", "header", "=", "hdulist", "[", "0", "]", ".", "header", "history", "=", "header", "[", "'HISTORY'", "]", "history_str", "=", "str", "(", "history", ")", "#AIPS CLEAN BMAJ= 1.2500E-02 BMIN= 1.2500E-02 BPA= 0.00"...
Will search the beam info from the HISTORY :param hdulist: :return:
[ "Will", "search", "the", "beam", "info", "from", "the", "HISTORY", ":", "param", "hdulist", ":", ":", "return", ":" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/scripts/fix_beam.py#L22-L35
train
PaulHancock/Aegean
AegeanTools/source_finder.py
fix_shape
def fix_shape(source): """ Ensure that a>=b for a given source object. If a<b then swap a/b and increment pa by 90. err_a/err_b are also swapped as needed. Parameters ---------- source : object any object with a/b/pa/err_a/err_b properties """ if source.a < source.b: source.a, source.b = source.b, source.a source.err_a, source.err_b = source.err_b, source.err_a source.pa += 90 return
python
def fix_shape(source): """ Ensure that a>=b for a given source object. If a<b then swap a/b and increment pa by 90. err_a/err_b are also swapped as needed. Parameters ---------- source : object any object with a/b/pa/err_a/err_b properties """ if source.a < source.b: source.a, source.b = source.b, source.a source.err_a, source.err_b = source.err_b, source.err_a source.pa += 90 return
[ "def", "fix_shape", "(", "source", ")", ":", "if", "source", ".", "a", "<", "source", ".", "b", ":", "source", ".", "a", ",", "source", ".", "b", "=", "source", ".", "b", ",", "source", ".", "a", "source", ".", "err_a", ",", "source", ".", "err...
Ensure that a>=b for a given source object. If a<b then swap a/b and increment pa by 90. err_a/err_b are also swapped as needed. Parameters ---------- source : object any object with a/b/pa/err_a/err_b properties
[ "Ensure", "that", "a", ">", "=", "b", "for", "a", "given", "source", "object", ".", "If", "a<b", "then", "swap", "a", "/", "b", "and", "increment", "pa", "by", "90", ".", "err_a", "/", "err_b", "are", "also", "swapped", "as", "needed", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1786-L1802
train
PaulHancock/Aegean
AegeanTools/source_finder.py
theta_limit
def theta_limit(theta): """ Angle theta is periodic with period pi. Constrain theta such that -pi/2<theta<=pi/2. Parameters ---------- theta : float Input angle. Returns ------- theta : float Rotate angle. """ while theta <= -1 * np.pi / 2: theta += np.pi while theta > np.pi / 2: theta -= np.pi return theta
python
def theta_limit(theta): """ Angle theta is periodic with period pi. Constrain theta such that -pi/2<theta<=pi/2. Parameters ---------- theta : float Input angle. Returns ------- theta : float Rotate angle. """ while theta <= -1 * np.pi / 2: theta += np.pi while theta > np.pi / 2: theta -= np.pi return theta
[ "def", "theta_limit", "(", "theta", ")", ":", "while", "theta", "<=", "-", "1", "*", "np", ".", "pi", "/", "2", ":", "theta", "+=", "np", ".", "pi", "while", "theta", ">", "np", ".", "pi", "/", "2", ":", "theta", "-=", "np", ".", "pi", "retur...
Angle theta is periodic with period pi. Constrain theta such that -pi/2<theta<=pi/2. Parameters ---------- theta : float Input angle. Returns ------- theta : float Rotate angle.
[ "Angle", "theta", "is", "periodic", "with", "period", "pi", ".", "Constrain", "theta", "such", "that", "-", "pi", "/", "2<theta<", "=", "pi", "/", "2", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1827-L1846
train
PaulHancock/Aegean
AegeanTools/source_finder.py
scope2lat
def scope2lat(telescope): """ Convert a telescope name into a latitude returns None when the telescope is unknown. Parameters ---------- telescope : str Acronym (name) of telescope, eg MWA. Returns ------- lat : float The latitude of the telescope. Notes ----- These values were taken from wikipedia so have varying precision/accuracy """ scopes = {'MWA': -26.703319, "ATCA": -30.3128, "VLA": 34.0790, "LOFAR": 52.9088, "KAT7": -30.721, "MEERKAT": -30.721, "PAPER": -30.7224, "GMRT": 19.096516666667, "OOTY": 11.383404, "ASKAP": -26.7, "MOST": -35.3707, "PARKES": -32.999944, "WSRT": 52.914722, "AMILA": 52.16977, "AMISA": 52.164303, "ATA": 40.817, "CHIME": 49.321, "CARMA": 37.28044, "DRAO": 49.321, "GBT": 38.433056, "LWA": 34.07, "ALMA": -23.019283, "FAST": 25.6525 } if telescope.upper() in scopes: return scopes[telescope.upper()] else: log = logging.getLogger("Aegean") log.warn("Telescope {0} is unknown".format(telescope)) log.warn("integrated fluxes may be incorrect") return None
python
def scope2lat(telescope): """ Convert a telescope name into a latitude returns None when the telescope is unknown. Parameters ---------- telescope : str Acronym (name) of telescope, eg MWA. Returns ------- lat : float The latitude of the telescope. Notes ----- These values were taken from wikipedia so have varying precision/accuracy """ scopes = {'MWA': -26.703319, "ATCA": -30.3128, "VLA": 34.0790, "LOFAR": 52.9088, "KAT7": -30.721, "MEERKAT": -30.721, "PAPER": -30.7224, "GMRT": 19.096516666667, "OOTY": 11.383404, "ASKAP": -26.7, "MOST": -35.3707, "PARKES": -32.999944, "WSRT": 52.914722, "AMILA": 52.16977, "AMISA": 52.164303, "ATA": 40.817, "CHIME": 49.321, "CARMA": 37.28044, "DRAO": 49.321, "GBT": 38.433056, "LWA": 34.07, "ALMA": -23.019283, "FAST": 25.6525 } if telescope.upper() in scopes: return scopes[telescope.upper()] else: log = logging.getLogger("Aegean") log.warn("Telescope {0} is unknown".format(telescope)) log.warn("integrated fluxes may be incorrect") return None
[ "def", "scope2lat", "(", "telescope", ")", ":", "scopes", "=", "{", "'MWA'", ":", "-", "26.703319", ",", "\"ATCA\"", ":", "-", "30.3128", ",", "\"VLA\"", ":", "34.0790", ",", "\"LOFAR\"", ":", "52.9088", ",", "\"KAT7\"", ":", "-", "30.721", ",", "\"MEE...
Convert a telescope name into a latitude returns None when the telescope is unknown. Parameters ---------- telescope : str Acronym (name) of telescope, eg MWA. Returns ------- lat : float The latitude of the telescope. Notes ----- These values were taken from wikipedia so have varying precision/accuracy
[ "Convert", "a", "telescope", "name", "into", "a", "latitude", "returns", "None", "when", "the", "telescope", "is", "unknown", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1849-L1898
train
PaulHancock/Aegean
AegeanTools/source_finder.py
check_cores
def check_cores(cores): """ Determine how many cores we are able to use. Return 1 if we are not able to make a queue via pprocess. Parameters ---------- cores : int The number of cores that are requested. Returns ------- cores : int The number of cores available. """ cores = min(multiprocessing.cpu_count(), cores) if six.PY3: log = logging.getLogger("Aegean") log.info("Multi-cores not supported in python 3+, using one core") return 1 try: queue = pprocess.Queue(limit=cores, reuse=1) except: # TODO: figure out what error is being thrown cores = 1 else: try: _ = queue.manage(pprocess.MakeReusable(fix_shape)) except: cores = 1 return cores
python
def check_cores(cores): """ Determine how many cores we are able to use. Return 1 if we are not able to make a queue via pprocess. Parameters ---------- cores : int The number of cores that are requested. Returns ------- cores : int The number of cores available. """ cores = min(multiprocessing.cpu_count(), cores) if six.PY3: log = logging.getLogger("Aegean") log.info("Multi-cores not supported in python 3+, using one core") return 1 try: queue = pprocess.Queue(limit=cores, reuse=1) except: # TODO: figure out what error is being thrown cores = 1 else: try: _ = queue.manage(pprocess.MakeReusable(fix_shape)) except: cores = 1 return cores
[ "def", "check_cores", "(", "cores", ")", ":", "cores", "=", "min", "(", "multiprocessing", ".", "cpu_count", "(", ")", ",", "cores", ")", "if", "six", ".", "PY3", ":", "log", "=", "logging", ".", "getLogger", "(", "\"Aegean\"", ")", "log", ".", "info...
Determine how many cores we are able to use. Return 1 if we are not able to make a queue via pprocess. Parameters ---------- cores : int The number of cores that are requested. Returns ------- cores : int The number of cores available.
[ "Determine", "how", "many", "cores", "we", "are", "able", "to", "use", ".", "Return", "1", "if", "we", "are", "not", "able", "to", "make", "a", "queue", "via", "pprocess", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1901-L1931
train
PaulHancock/Aegean
AegeanTools/source_finder.py
get_aux_files
def get_aux_files(basename): """ Look for and return all the aux files that are associated witht this filename. Will look for: background (_bkg.fits) rms (_rms.fits) mask (.mim) catalogue (_comp.fits) psf map (_psf.fits) will return filenames if they exist, or None where they do not. Parameters ---------- basename : str The name/path of the input image. Returns ------- aux : dict Dict of filenames or None with keys (bkg, rms, mask, cat, psf) """ base = os.path.splitext(basename)[0] files = {"bkg": base + "_bkg.fits", "rms": base + "_rms.fits", "mask": base + ".mim", "cat": base + "_comp.fits", "psf": base + "_psf.fits"} for k in files.keys(): if not os.path.exists(files[k]): files[k] = None return files
python
def get_aux_files(basename): """ Look for and return all the aux files that are associated witht this filename. Will look for: background (_bkg.fits) rms (_rms.fits) mask (.mim) catalogue (_comp.fits) psf map (_psf.fits) will return filenames if they exist, or None where they do not. Parameters ---------- basename : str The name/path of the input image. Returns ------- aux : dict Dict of filenames or None with keys (bkg, rms, mask, cat, psf) """ base = os.path.splitext(basename)[0] files = {"bkg": base + "_bkg.fits", "rms": base + "_rms.fits", "mask": base + ".mim", "cat": base + "_comp.fits", "psf": base + "_psf.fits"} for k in files.keys(): if not os.path.exists(files[k]): files[k] = None return files
[ "def", "get_aux_files", "(", "basename", ")", ":", "base", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "[", "0", "]", "files", "=", "{", "\"bkg\"", ":", "base", "+", "\"_bkg.fits\"", ",", "\"rms\"", ":", "base", "+", "\"_rms.fits\"",...
Look for and return all the aux files that are associated witht this filename. Will look for: background (_bkg.fits) rms (_rms.fits) mask (.mim) catalogue (_comp.fits) psf map (_psf.fits) will return filenames if they exist, or None where they do not. Parameters ---------- basename : str The name/path of the input image. Returns ------- aux : dict Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
[ "Look", "for", "and", "return", "all", "the", "aux", "files", "that", "are", "associated", "witht", "this", "filename", ".", "Will", "look", "for", ":", "background", "(", "_bkg", ".", "fits", ")", "rms", "(", "_rms", ".", "fits", ")", "mask", "(", "...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1934-L1966
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._gen_flood_wrap
def _gen_flood_wrap(self, data, rmsimg, innerclip, outerclip=None, domask=False): """ Generator function. Segment an image into islands and return one island at a time. Needs to work for entire image, and also for components within an island. Parameters ---------- data : 2d-array Image array. rmsimg : 2d-array Noise image. innerclip, outerclip :float Seed (inner) and flood (outer) clipping values. domask : bool If True then look for a region mask in globals, only return islands that are within the region. Default = False. Yields ------ data_box : 2d-array A island of sources with subthreshold values masked. xmin, xmax, ymin, ymax : int The corners of the data_box within the initial data array. """ if outerclip is None: outerclip = innerclip # compute SNR image (data has already been background subtracted) snr = abs(data) / rmsimg # mask of pixles that are above the outerclip a = snr >= outerclip # segmentation a la scipy l, n = label(a) f = find_objects(l) if n == 0: self.log.debug("There are no pixels above the clipping limit") return self.log.debug("{1} Found {0} islands total above flood limit".format(n, data.shape)) # Yield values as before, though they are not sorted by flux for i in range(n): xmin, xmax = f[i][0].start, f[i][0].stop ymin, ymax = f[i][1].start, f[i][1].stop if np.any(snr[xmin:xmax, ymin:ymax] > innerclip): # obey inner clip constraint # self.log.info("{1} Island {0} is above the inner clip limit".format(i, data.shape)) data_box = copy.copy(data[xmin:xmax, ymin:ymax]) # copy so that we don't blank the master data data_box[np.where( snr[xmin:xmax, ymin:ymax] < outerclip)] = np.nan # blank pixels that are outside the outerclip data_box[np.where(l[xmin:xmax, ymin:ymax] != i + 1)] = np.nan # blank out other summits # check if there are any pixels left unmasked if not np.any(np.isfinite(data_box)): # self.log.info("{1} Island {0} has no non-masked pixels".format(i,data.shape)) continue if domask and (self.global_data.region is not None): y, x = np.where(snr[xmin:xmax, ymin:ymax] >= outerclip) # convert indices of this sub region to indices in the greater image yx = list(zip(y + ymin, x + xmin)) ra, dec = self.global_data.wcshelper.wcs.wcs_pix2world(yx, 1).transpose() mask = self.global_data.region.sky_within(ra, dec, degin=True) # if there are no un-masked pixels within the region then we skip this island. if not np.any(mask): continue self.log.debug("Mask {0}".format(mask)) # self.log.info("{1} Island {0} will be fit".format(i, data.shape)) yield data_box, xmin, xmax, ymin, ymax
python
def _gen_flood_wrap(self, data, rmsimg, innerclip, outerclip=None, domask=False): """ Generator function. Segment an image into islands and return one island at a time. Needs to work for entire image, and also for components within an island. Parameters ---------- data : 2d-array Image array. rmsimg : 2d-array Noise image. innerclip, outerclip :float Seed (inner) and flood (outer) clipping values. domask : bool If True then look for a region mask in globals, only return islands that are within the region. Default = False. Yields ------ data_box : 2d-array A island of sources with subthreshold values masked. xmin, xmax, ymin, ymax : int The corners of the data_box within the initial data array. """ if outerclip is None: outerclip = innerclip # compute SNR image (data has already been background subtracted) snr = abs(data) / rmsimg # mask of pixles that are above the outerclip a = snr >= outerclip # segmentation a la scipy l, n = label(a) f = find_objects(l) if n == 0: self.log.debug("There are no pixels above the clipping limit") return self.log.debug("{1} Found {0} islands total above flood limit".format(n, data.shape)) # Yield values as before, though they are not sorted by flux for i in range(n): xmin, xmax = f[i][0].start, f[i][0].stop ymin, ymax = f[i][1].start, f[i][1].stop if np.any(snr[xmin:xmax, ymin:ymax] > innerclip): # obey inner clip constraint # self.log.info("{1} Island {0} is above the inner clip limit".format(i, data.shape)) data_box = copy.copy(data[xmin:xmax, ymin:ymax]) # copy so that we don't blank the master data data_box[np.where( snr[xmin:xmax, ymin:ymax] < outerclip)] = np.nan # blank pixels that are outside the outerclip data_box[np.where(l[xmin:xmax, ymin:ymax] != i + 1)] = np.nan # blank out other summits # check if there are any pixels left unmasked if not np.any(np.isfinite(data_box)): # self.log.info("{1} Island {0} has no non-masked pixels".format(i,data.shape)) continue if domask and (self.global_data.region is not None): y, x = np.where(snr[xmin:xmax, ymin:ymax] >= outerclip) # convert indices of this sub region to indices in the greater image yx = list(zip(y + ymin, x + xmin)) ra, dec = self.global_data.wcshelper.wcs.wcs_pix2world(yx, 1).transpose() mask = self.global_data.region.sky_within(ra, dec, degin=True) # if there are no un-masked pixels within the region then we skip this island. if not np.any(mask): continue self.log.debug("Mask {0}".format(mask)) # self.log.info("{1} Island {0} will be fit".format(i, data.shape)) yield data_box, xmin, xmax, ymin, ymax
[ "def", "_gen_flood_wrap", "(", "self", ",", "data", ",", "rmsimg", ",", "innerclip", ",", "outerclip", "=", "None", ",", "domask", "=", "False", ")", ":", "if", "outerclip", "is", "None", ":", "outerclip", "=", "innerclip", "# compute SNR image (data has alrea...
Generator function. Segment an image into islands and return one island at a time. Needs to work for entire image, and also for components within an island. Parameters ---------- data : 2d-array Image array. rmsimg : 2d-array Noise image. innerclip, outerclip :float Seed (inner) and flood (outer) clipping values. domask : bool If True then look for a region mask in globals, only return islands that are within the region. Default = False. Yields ------ data_box : 2d-array A island of sources with subthreshold values masked. xmin, xmax, ymin, ymax : int The corners of the data_box within the initial data array.
[ "Generator", "function", ".", "Segment", "an", "image", "into", "islands", "and", "return", "one", "island", "at", "a", "time", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L83-L154
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.estimate_lmfit_parinfo
def estimate_lmfit_parinfo(self, data, rmsimg, curve, beam, innerclip, outerclip=None, offsets=(0, 0), max_summits=None): """ Estimates the number of sources in an island and returns initial parameters for the fit as well as limits on those parameters. Parameters ---------- data : 2d-array (sub) image of flux values. Background should be subtracted. rmsimg : 2d-array Image of 1sigma values curve : 2d-array Image of curvature values [-1,0,+1] beam : :class:`AegeanTools.fits_image.Beam` The beam information for the image. innerclip, outerclip : float Inerr and outer level for clipping (sigmas). offsets : (int, int) The (x,y) offset of data within it's parent image max_summits : int If not None, only this many summits/components will be fit. More components may be present in the island, but subsequent components will not have free parameters. Returns ------- model : lmfit.Parameters The initial estimate of parameters for the components within this island. """ debug_on = self.log.isEnabledFor(logging.DEBUG) is_flag = 0 global_data = self.global_data # check to see if this island is a negative peak since we need to treat such cases slightly differently isnegative = max(data[np.where(np.isfinite(data))]) < 0 if isnegative: self.log.debug("[is a negative island]") if outerclip is None: outerclip = innerclip self.log.debug(" - shape {0}".format(data.shape)) if not data.shape == curve.shape: self.log.error("data and curvature are mismatched") self.log.error("data:{0} curve:{1}".format(data.shape, curve.shape)) raise AssertionError() # For small islands we can't do a 6 param fit # Don't count the NaN values as part of the island non_nan_pix = len(data[np.where(np.isfinite(data))].ravel()) if 4 <= non_nan_pix <= 6: self.log.debug("FIXED2PSF") is_flag |= flags.FIXED2PSF elif non_nan_pix < 4: self.log.debug("FITERRSMALL!") is_flag |= flags.FITERRSMALL else: is_flag = 0 if debug_on: self.log.debug(" - size {0}".format(len(data.ravel()))) if min(data.shape) <= 2 or (is_flag & flags.FITERRSMALL) or (is_flag & flags.FIXED2PSF): # 1d islands or small islands only get one source if debug_on: self.log.debug("Tiny summit detected") self.log.debug("{0}".format(data)) summits = [[data, 0, data.shape[0], 0, data.shape[1]]] # and are constrained to be point sources is_flag |= flags.FIXED2PSF else: if isnegative: # the summit should be able to include all pixels within the island not just those above innerclip kappa_sigma = np.where(curve > 0.5, np.where(data + outerclip * rmsimg < 0, data, np.nan), np.nan) else: kappa_sigma = np.where(-1 * curve > 0.5, np.where(data - outerclip * rmsimg > 0, data, np.nan), np.nan) summits = list(self._gen_flood_wrap(kappa_sigma, np.ones(kappa_sigma.shape), 0, domask=False)) params = lmfit.Parameters() i = 0 summits_considered = 0 # This can happen when the image contains regions of nans # the data/noise indicate an island, but the curvature doesn't back it up. if len(summits) < 1: self.log.debug("Island has {0} summits".format(len(summits))) return None # add summits in reverse order of peak SNR - ie brightest first for summit, xmin, xmax, ymin, ymax in sorted(summits, key=lambda x: np.nanmax(-1. * abs(x[0]))): summits_considered += 1 summit_flag = is_flag if debug_on: self.log.debug( "Summit({5}) - shape:{0} x:[{1}-{2}] y:[{3}-{4}]".format(summit.shape, ymin, ymax, xmin, xmax, i)) try: if isnegative: amp = np.nanmin(summit) xpeak, ypeak = np.unravel_index(np.nanargmin(summit), summit.shape) else: amp = np.nanmax(summit) xpeak, ypeak = np.unravel_index(np.nanargmax(summit), summit.shape) except ValueError as e: if "All-NaN" in e.message: self.log.warn("Summit of nan's detected - this shouldn't happen") continue else: raise e if debug_on: self.log.debug(" - max is {0:f}".format(amp)) self.log.debug(" - peak at {0},{1}".format(xpeak, ypeak)) yo = ypeak + ymin xo = xpeak + xmin # Summits are allowed to include pixels that are between the outer and inner clip # This means that sometimes we get a summit that has all it's pixels below the inner clip # So we test for that here. snr = np.nanmax(abs(data[xmin:xmax + 1, ymin:ymax + 1] / rmsimg[xmin:xmax + 1, ymin:ymax + 1])) if snr < innerclip: self.log.debug("Summit has SNR {0} < innerclip {1}: skipping".format(snr, innerclip)) continue # allow amp to be 5% or (innerclip) sigma higher # TODO: the 5% should depend on the beam sampling # note: when innerclip is 400 this becomes rather stupid if amp > 0: amp_min, amp_max = 0.95 * min(outerclip * rmsimg[xo, yo], amp), amp * 1.05 + innerclip * rmsimg[xo, yo] else: amp_max, amp_min = 0.95 * max(-outerclip * rmsimg[xo, yo], amp), amp * 1.05 - innerclip * rmsimg[xo, yo] if debug_on: self.log.debug("a_min {0}, a_max {1}".format(amp_min, amp_max)) pixbeam = global_data.psfhelper.get_pixbeam_pixel(yo + offsets[0], xo + offsets[1]) if pixbeam is None: self.log.debug(" Summit has invalid WCS/Beam - Skipping.") continue # set a square limit based on the size of the pixbeam xo_lim = 0.5 * np.hypot(pixbeam.a, pixbeam.b) yo_lim = xo_lim yo_min, yo_max = yo - yo_lim, yo + yo_lim # if yo_min == yo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix # yo_min, yo_max = yo_min - 0.5, yo_max + 0.5 xo_min, xo_max = xo - xo_lim, xo + xo_lim # if xo_min == xo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix # xo_min, xo_max = xo_min - 0.5, xo_max + 0.5 # the size of the island xsize = data.shape[0] ysize = data.shape[1] # initial shape is the psf sx = pixbeam.a * FWHM2CC sy = pixbeam.b * FWHM2CC # lmfit does silly things if we start with these two parameters being equal sx = max(sx, sy * 1.01) # constraints are based on the shape of the island # sx,sy can become flipped so we set the min/max account for this sx_min, sx_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1) sy_min, sy_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1) theta = pixbeam.pa # Degrees flag = summit_flag # check to see if we are going to fit this component if max_summits is not None: maxxed = i >= max_summits else: maxxed = False # components that are not fit need appropriate flags if maxxed: summit_flag |= flags.NOTFIT summit_flag |= flags.FIXED2PSF if debug_on: self.log.debug(" - var val min max | min max") self.log.debug(" - amp {0} {1} {2} ".format(amp, amp_min, amp_max)) self.log.debug(" - xo {0} {1} {2} ".format(xo, xo_min, xo_max)) self.log.debug(" - yo {0} {1} {2} ".format(yo, yo_min, yo_max)) self.log.debug(" - sx {0} {1} {2} | {3} {4}".format(sx, sx_min, sx_max, sx_min * CC2FHWM, sx_max * CC2FHWM)) self.log.debug(" - sy {0} {1} {2} | {3} {4}".format(sy, sy_min, sy_max, sy_min * CC2FHWM, sy_max * CC2FHWM)) self.log.debug(" - theta {0} {1} {2}".format(theta, -180, 180)) self.log.debug(" - flags {0}".format(flag)) self.log.debug(" - fit? {0}".format(not maxxed)) # TODO: figure out how incorporate the circular constraint on sx/sy prefix = "c{0}_".format(i) params.add(prefix + 'amp', value=amp, min=amp_min, max=amp_max, vary=not maxxed) params.add(prefix + 'xo', value=xo, min=float(xo_min), max=float(xo_max), vary=not maxxed) params.add(prefix + 'yo', value=yo, min=float(yo_min), max=float(yo_max), vary=not maxxed) if summit_flag & flags.FIXED2PSF > 0: psf_vary = False else: psf_vary = not maxxed params.add(prefix + 'sx', value=sx, min=sx_min, max=sx_max, vary=psf_vary) params.add(prefix + 'sy', value=sy, min=sy_min, max=sy_max, vary=psf_vary) params.add(prefix + 'theta', value=theta, vary=psf_vary) params.add(prefix + 'flags', value=summit_flag, vary=False) # starting at zero allows the maj/min axes to be fit better. # if params[prefix + 'theta'].vary: # params[prefix + 'theta'].value = 0 i += 1 if debug_on: self.log.debug("Estimated sources: {0}".format(i)) # remember how many components are fit. params.add('components', value=i, vary=False) # params.components=i if params['components'].value < 1: self.log.debug("Considered {0} summits, accepted {1}".format(summits_considered, i)) return params
python
def estimate_lmfit_parinfo(self, data, rmsimg, curve, beam, innerclip, outerclip=None, offsets=(0, 0), max_summits=None): """ Estimates the number of sources in an island and returns initial parameters for the fit as well as limits on those parameters. Parameters ---------- data : 2d-array (sub) image of flux values. Background should be subtracted. rmsimg : 2d-array Image of 1sigma values curve : 2d-array Image of curvature values [-1,0,+1] beam : :class:`AegeanTools.fits_image.Beam` The beam information for the image. innerclip, outerclip : float Inerr and outer level for clipping (sigmas). offsets : (int, int) The (x,y) offset of data within it's parent image max_summits : int If not None, only this many summits/components will be fit. More components may be present in the island, but subsequent components will not have free parameters. Returns ------- model : lmfit.Parameters The initial estimate of parameters for the components within this island. """ debug_on = self.log.isEnabledFor(logging.DEBUG) is_flag = 0 global_data = self.global_data # check to see if this island is a negative peak since we need to treat such cases slightly differently isnegative = max(data[np.where(np.isfinite(data))]) < 0 if isnegative: self.log.debug("[is a negative island]") if outerclip is None: outerclip = innerclip self.log.debug(" - shape {0}".format(data.shape)) if not data.shape == curve.shape: self.log.error("data and curvature are mismatched") self.log.error("data:{0} curve:{1}".format(data.shape, curve.shape)) raise AssertionError() # For small islands we can't do a 6 param fit # Don't count the NaN values as part of the island non_nan_pix = len(data[np.where(np.isfinite(data))].ravel()) if 4 <= non_nan_pix <= 6: self.log.debug("FIXED2PSF") is_flag |= flags.FIXED2PSF elif non_nan_pix < 4: self.log.debug("FITERRSMALL!") is_flag |= flags.FITERRSMALL else: is_flag = 0 if debug_on: self.log.debug(" - size {0}".format(len(data.ravel()))) if min(data.shape) <= 2 or (is_flag & flags.FITERRSMALL) or (is_flag & flags.FIXED2PSF): # 1d islands or small islands only get one source if debug_on: self.log.debug("Tiny summit detected") self.log.debug("{0}".format(data)) summits = [[data, 0, data.shape[0], 0, data.shape[1]]] # and are constrained to be point sources is_flag |= flags.FIXED2PSF else: if isnegative: # the summit should be able to include all pixels within the island not just those above innerclip kappa_sigma = np.where(curve > 0.5, np.where(data + outerclip * rmsimg < 0, data, np.nan), np.nan) else: kappa_sigma = np.where(-1 * curve > 0.5, np.where(data - outerclip * rmsimg > 0, data, np.nan), np.nan) summits = list(self._gen_flood_wrap(kappa_sigma, np.ones(kappa_sigma.shape), 0, domask=False)) params = lmfit.Parameters() i = 0 summits_considered = 0 # This can happen when the image contains regions of nans # the data/noise indicate an island, but the curvature doesn't back it up. if len(summits) < 1: self.log.debug("Island has {0} summits".format(len(summits))) return None # add summits in reverse order of peak SNR - ie brightest first for summit, xmin, xmax, ymin, ymax in sorted(summits, key=lambda x: np.nanmax(-1. * abs(x[0]))): summits_considered += 1 summit_flag = is_flag if debug_on: self.log.debug( "Summit({5}) - shape:{0} x:[{1}-{2}] y:[{3}-{4}]".format(summit.shape, ymin, ymax, xmin, xmax, i)) try: if isnegative: amp = np.nanmin(summit) xpeak, ypeak = np.unravel_index(np.nanargmin(summit), summit.shape) else: amp = np.nanmax(summit) xpeak, ypeak = np.unravel_index(np.nanargmax(summit), summit.shape) except ValueError as e: if "All-NaN" in e.message: self.log.warn("Summit of nan's detected - this shouldn't happen") continue else: raise e if debug_on: self.log.debug(" - max is {0:f}".format(amp)) self.log.debug(" - peak at {0},{1}".format(xpeak, ypeak)) yo = ypeak + ymin xo = xpeak + xmin # Summits are allowed to include pixels that are between the outer and inner clip # This means that sometimes we get a summit that has all it's pixels below the inner clip # So we test for that here. snr = np.nanmax(abs(data[xmin:xmax + 1, ymin:ymax + 1] / rmsimg[xmin:xmax + 1, ymin:ymax + 1])) if snr < innerclip: self.log.debug("Summit has SNR {0} < innerclip {1}: skipping".format(snr, innerclip)) continue # allow amp to be 5% or (innerclip) sigma higher # TODO: the 5% should depend on the beam sampling # note: when innerclip is 400 this becomes rather stupid if amp > 0: amp_min, amp_max = 0.95 * min(outerclip * rmsimg[xo, yo], amp), amp * 1.05 + innerclip * rmsimg[xo, yo] else: amp_max, amp_min = 0.95 * max(-outerclip * rmsimg[xo, yo], amp), amp * 1.05 - innerclip * rmsimg[xo, yo] if debug_on: self.log.debug("a_min {0}, a_max {1}".format(amp_min, amp_max)) pixbeam = global_data.psfhelper.get_pixbeam_pixel(yo + offsets[0], xo + offsets[1]) if pixbeam is None: self.log.debug(" Summit has invalid WCS/Beam - Skipping.") continue # set a square limit based on the size of the pixbeam xo_lim = 0.5 * np.hypot(pixbeam.a, pixbeam.b) yo_lim = xo_lim yo_min, yo_max = yo - yo_lim, yo + yo_lim # if yo_min == yo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix # yo_min, yo_max = yo_min - 0.5, yo_max + 0.5 xo_min, xo_max = xo - xo_lim, xo + xo_lim # if xo_min == xo_max: # if we have a 1d summit then allow the position to vary by +/-0.5pix # xo_min, xo_max = xo_min - 0.5, xo_max + 0.5 # the size of the island xsize = data.shape[0] ysize = data.shape[1] # initial shape is the psf sx = pixbeam.a * FWHM2CC sy = pixbeam.b * FWHM2CC # lmfit does silly things if we start with these two parameters being equal sx = max(sx, sy * 1.01) # constraints are based on the shape of the island # sx,sy can become flipped so we set the min/max account for this sx_min, sx_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1) sy_min, sy_max = sy * 0.8, max((max(xsize, ysize) + 1) * math.sqrt(2) * FWHM2CC, sx * 1.1) theta = pixbeam.pa # Degrees flag = summit_flag # check to see if we are going to fit this component if max_summits is not None: maxxed = i >= max_summits else: maxxed = False # components that are not fit need appropriate flags if maxxed: summit_flag |= flags.NOTFIT summit_flag |= flags.FIXED2PSF if debug_on: self.log.debug(" - var val min max | min max") self.log.debug(" - amp {0} {1} {2} ".format(amp, amp_min, amp_max)) self.log.debug(" - xo {0} {1} {2} ".format(xo, xo_min, xo_max)) self.log.debug(" - yo {0} {1} {2} ".format(yo, yo_min, yo_max)) self.log.debug(" - sx {0} {1} {2} | {3} {4}".format(sx, sx_min, sx_max, sx_min * CC2FHWM, sx_max * CC2FHWM)) self.log.debug(" - sy {0} {1} {2} | {3} {4}".format(sy, sy_min, sy_max, sy_min * CC2FHWM, sy_max * CC2FHWM)) self.log.debug(" - theta {0} {1} {2}".format(theta, -180, 180)) self.log.debug(" - flags {0}".format(flag)) self.log.debug(" - fit? {0}".format(not maxxed)) # TODO: figure out how incorporate the circular constraint on sx/sy prefix = "c{0}_".format(i) params.add(prefix + 'amp', value=amp, min=amp_min, max=amp_max, vary=not maxxed) params.add(prefix + 'xo', value=xo, min=float(xo_min), max=float(xo_max), vary=not maxxed) params.add(prefix + 'yo', value=yo, min=float(yo_min), max=float(yo_max), vary=not maxxed) if summit_flag & flags.FIXED2PSF > 0: psf_vary = False else: psf_vary = not maxxed params.add(prefix + 'sx', value=sx, min=sx_min, max=sx_max, vary=psf_vary) params.add(prefix + 'sy', value=sy, min=sy_min, max=sy_max, vary=psf_vary) params.add(prefix + 'theta', value=theta, vary=psf_vary) params.add(prefix + 'flags', value=summit_flag, vary=False) # starting at zero allows the maj/min axes to be fit better. # if params[prefix + 'theta'].vary: # params[prefix + 'theta'].value = 0 i += 1 if debug_on: self.log.debug("Estimated sources: {0}".format(i)) # remember how many components are fit. params.add('components', value=i, vary=False) # params.components=i if params['components'].value < 1: self.log.debug("Considered {0} summits, accepted {1}".format(summits_considered, i)) return params
[ "def", "estimate_lmfit_parinfo", "(", "self", ",", "data", ",", "rmsimg", ",", "curve", ",", "beam", ",", "innerclip", ",", "outerclip", "=", "None", ",", "offsets", "=", "(", "0", ",", "0", ")", ",", "max_summits", "=", "None", ")", ":", "debug_on", ...
Estimates the number of sources in an island and returns initial parameters for the fit as well as limits on those parameters. Parameters ---------- data : 2d-array (sub) image of flux values. Background should be subtracted. rmsimg : 2d-array Image of 1sigma values curve : 2d-array Image of curvature values [-1,0,+1] beam : :class:`AegeanTools.fits_image.Beam` The beam information for the image. innerclip, outerclip : float Inerr and outer level for clipping (sigmas). offsets : (int, int) The (x,y) offset of data within it's parent image max_summits : int If not None, only this many summits/components will be fit. More components may be present in the island, but subsequent components will not have free parameters. Returns ------- model : lmfit.Parameters The initial estimate of parameters for the components within this island.
[ "Estimates", "the", "number", "of", "sources", "in", "an", "island", "and", "returns", "initial", "parameters", "for", "the", "fit", "as", "well", "as", "limits", "on", "those", "parameters", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L159-L386
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.result_to_components
def result_to_components(self, result, model, island_data, isflags): """ Convert fitting results into a set of components Parameters ---------- result : lmfit.MinimizerResult The fitting results. model : lmfit.Parameters The model that was fit. island_data : :class:`AegeanTools.models.IslandFittingData` Data about the island that was fit. isflags : int Flags that should be added to this island (in addition to those within the model) Returns ------- sources : list A list of components, and islands if requested. """ global_data = self.global_data # island data isle_num = island_data.isle_num idata = island_data.i xmin, xmax, ymin, ymax = island_data.offsets box = slice(int(xmin), int(xmax)), slice(int(ymin), int(ymax)) rms = global_data.rmsimg[box] bkg = global_data.bkgimg[box] residual = np.median(result.residual), np.std(result.residual) is_flag = isflags sources = [] j = 0 for j in range(model['components'].value): src_flags = is_flag source = OutputSource() source.island = isle_num source.source = j self.log.debug(" component {0}".format(j)) prefix = "c{0}_".format(j) xo = model[prefix + 'xo'].value yo = model[prefix + 'yo'].value sx = model[prefix + 'sx'].value sy = model[prefix + 'sy'].value theta = model[prefix + 'theta'].value amp = model[prefix + 'amp'].value src_flags |= model[prefix + 'flags'].value # these are goodness of fit statistics for the entire island. source.residual_mean = residual[0] source.residual_std = residual[1] # set the flags source.flags = src_flags # #pixel pos within island + # island offset within region + # region offset within image + # 1 for luck # (pyfits->fits conversion = luck) x_pix = xo + xmin + 1 y_pix = yo + ymin + 1 # update the source xo/yo so the error calculations can be done correctly # Note that you have to update the max or the value you set will be clipped at the max allowed value model[prefix + 'xo'].set(value=x_pix, max=np.inf) model[prefix + 'yo'].set(value=y_pix, max=np.inf) # ------ extract source parameters ------ # fluxes # the background is taken from background map # Clamp the pixel location to the edge of the background map y = max(min(int(round(y_pix - ymin)), bkg.shape[1] - 1), 0) x = max(min(int(round(x_pix - xmin)), bkg.shape[0] - 1), 0) source.background = bkg[x, y] source.local_rms = rms[x, y] source.peak_flux = amp # all params are in degrees source.ra, source.dec, source.a, source.b, source.pa = global_data.wcshelper.pix2sky_ellipse((x_pix, y_pix), sx * CC2FHWM, sy * CC2FHWM, theta) source.a *= 3600 # arcseconds source.b *= 3600 # force a>=b fix_shape(source) # limit the pa to be in (-90,90] source.pa = pa_limit(source.pa) # if one of these values are nan then there has been some problem with the WCS handling if not all(np.isfinite((source.ra, source.dec, source.a, source.b, source.pa))): src_flags |= flags.WCSERR # negative degrees is valid for RA, but I don't want them. if source.ra < 0: source.ra += 360 source.ra_str = dec2hms(source.ra) source.dec_str = dec2dms(source.dec) # calculate integrated flux source.int_flux = source.peak_flux * sx * sy * CC2FHWM ** 2 * np.pi # scale Jy/beam -> Jy using the area of the beam source.int_flux /= global_data.psfhelper.get_beamarea_pix(source.ra, source.dec) # Calculate errors for params that were fit (as well as int_flux) errors(source, model, global_data.wcshelper) source.flags = src_flags # add psf info local_beam = global_data.psfhelper.get_beam(source.ra, source.dec) if local_beam is not None: source.psf_a = local_beam.a * 3600 source.psf_b = local_beam.b * 3600 source.psf_pa = local_beam.pa else: source.psf_a = 0 source.psf_b = 0 source.psf_pa = 0 sources.append(source) self.log.debug(source) if global_data.blank: outerclip = island_data.scalars[1] idx, idy = np.where(abs(idata) - outerclip * rms > 0) idx += xmin idy += ymin self.global_data.img._pixels[[idx, idy]] = np.nan # calculate the integrated island flux if required if island_data.doislandflux: _, outerclip, _ = island_data.scalars self.log.debug("Integrated flux for island {0}".format(isle_num)) kappa_sigma = np.where(abs(idata) - outerclip * rms > 0, idata, np.NaN) self.log.debug("- island shape is {0}".format(kappa_sigma.shape)) source = IslandSource() source.flags = 0 source.island = isle_num source.components = j + 1 source.peak_flux = np.nanmax(kappa_sigma) # check for negative islands if source.peak_flux < 0: source.peak_flux = np.nanmin(kappa_sigma) self.log.debug("- peak flux {0}".format(source.peak_flux)) # positions and background if np.isfinite(source.peak_flux): positions = np.where(kappa_sigma == source.peak_flux) else: # if a component has been refit then it might have flux = np.nan positions = [[kappa_sigma.shape[0] / 2], [kappa_sigma.shape[1] / 2]] xy = positions[0][0] + xmin, positions[1][0] + ymin radec = global_data.wcshelper.pix2sky(xy) source.ra = radec[0] # convert negative ra's to positive ones if source.ra < 0: source.ra += 360 source.dec = radec[1] source.ra_str = dec2hms(source.ra) source.dec_str = dec2dms(source.dec) source.background = bkg[positions[0][0], positions[1][0]] source.local_rms = rms[positions[0][0], positions[1][0]] source.x_width, source.y_width = idata.shape source.pixels = int(sum(np.isfinite(kappa_sigma).ravel() * 1.0)) source.extent = [xmin, xmax, ymin, ymax] # TODO: investigate what happens when the sky coords are skewed w.r.t the pixel coords # calculate the area of the island as a fraction of the area of the bounding box bl = global_data.wcshelper.pix2sky([xmax, ymin]) tl = global_data.wcshelper.pix2sky([xmax, ymax]) tr = global_data.wcshelper.pix2sky([xmin, ymax]) height = gcd(tl[0], tl[1], bl[0], bl[1]) width = gcd(tl[0], tl[1], tr[0], tr[1]) area = height * width source.area = area * source.pixels / source.x_width / source.y_width # area is in deg^2 # create contours msq = MarchingSquares(idata) source.contour = [(a[0] + xmin, a[1] + ymin) for a in msq.perimeter] # calculate the maximum angular size of this island, brute force method source.max_angular_size = 0 for i, pos1 in enumerate(source.contour): radec1 = global_data.wcshelper.pix2sky(pos1) for j, pos2 in enumerate(source.contour[i:]): radec2 = global_data.wcshelper.pix2sky(pos2) dist = gcd(radec1[0], radec1[1], radec2[0], radec2[1]) if dist > source.max_angular_size: source.max_angular_size = dist source.pa = bear(radec1[0], radec1[1], radec2[0], radec2[1]) source.max_angular_size_anchors = [pos1[0], pos1[1], pos2[0], pos2[1]] self.log.debug("- peak position {0}, {1} [{2},{3}]".format(source.ra_str, source.dec_str, positions[0][0], positions[1][0])) # integrated flux beam_area = global_data.psfhelper.get_beamarea_deg2(source.ra, source.dec) # beam in deg^2 # get_beamarea_pix(source.ra, source.dec) # beam is in pix^2 isize = source.pixels # number of non zero pixels self.log.debug("- pixels used {0}".format(isize)) source.int_flux = np.nansum(kappa_sigma) # total flux Jy/beam self.log.debug("- sum of pixles {0}".format(source.int_flux)) source.int_flux *= beam_area # total flux in Jy self.log.debug("- integrated flux {0}".format(source.int_flux)) eta = erf(np.sqrt(-1 * np.log(abs(source.local_rms * outerclip / source.peak_flux)))) ** 2 self.log.debug("- eta {0}".format(eta)) source.eta = eta source.beam_area = beam_area # I don't know how to calculate this error so we'll set it to nan source.err_int_flux = np.nan sources.append(source) return sources
python
def result_to_components(self, result, model, island_data, isflags): """ Convert fitting results into a set of components Parameters ---------- result : lmfit.MinimizerResult The fitting results. model : lmfit.Parameters The model that was fit. island_data : :class:`AegeanTools.models.IslandFittingData` Data about the island that was fit. isflags : int Flags that should be added to this island (in addition to those within the model) Returns ------- sources : list A list of components, and islands if requested. """ global_data = self.global_data # island data isle_num = island_data.isle_num idata = island_data.i xmin, xmax, ymin, ymax = island_data.offsets box = slice(int(xmin), int(xmax)), slice(int(ymin), int(ymax)) rms = global_data.rmsimg[box] bkg = global_data.bkgimg[box] residual = np.median(result.residual), np.std(result.residual) is_flag = isflags sources = [] j = 0 for j in range(model['components'].value): src_flags = is_flag source = OutputSource() source.island = isle_num source.source = j self.log.debug(" component {0}".format(j)) prefix = "c{0}_".format(j) xo = model[prefix + 'xo'].value yo = model[prefix + 'yo'].value sx = model[prefix + 'sx'].value sy = model[prefix + 'sy'].value theta = model[prefix + 'theta'].value amp = model[prefix + 'amp'].value src_flags |= model[prefix + 'flags'].value # these are goodness of fit statistics for the entire island. source.residual_mean = residual[0] source.residual_std = residual[1] # set the flags source.flags = src_flags # #pixel pos within island + # island offset within region + # region offset within image + # 1 for luck # (pyfits->fits conversion = luck) x_pix = xo + xmin + 1 y_pix = yo + ymin + 1 # update the source xo/yo so the error calculations can be done correctly # Note that you have to update the max or the value you set will be clipped at the max allowed value model[prefix + 'xo'].set(value=x_pix, max=np.inf) model[prefix + 'yo'].set(value=y_pix, max=np.inf) # ------ extract source parameters ------ # fluxes # the background is taken from background map # Clamp the pixel location to the edge of the background map y = max(min(int(round(y_pix - ymin)), bkg.shape[1] - 1), 0) x = max(min(int(round(x_pix - xmin)), bkg.shape[0] - 1), 0) source.background = bkg[x, y] source.local_rms = rms[x, y] source.peak_flux = amp # all params are in degrees source.ra, source.dec, source.a, source.b, source.pa = global_data.wcshelper.pix2sky_ellipse((x_pix, y_pix), sx * CC2FHWM, sy * CC2FHWM, theta) source.a *= 3600 # arcseconds source.b *= 3600 # force a>=b fix_shape(source) # limit the pa to be in (-90,90] source.pa = pa_limit(source.pa) # if one of these values are nan then there has been some problem with the WCS handling if not all(np.isfinite((source.ra, source.dec, source.a, source.b, source.pa))): src_flags |= flags.WCSERR # negative degrees is valid for RA, but I don't want them. if source.ra < 0: source.ra += 360 source.ra_str = dec2hms(source.ra) source.dec_str = dec2dms(source.dec) # calculate integrated flux source.int_flux = source.peak_flux * sx * sy * CC2FHWM ** 2 * np.pi # scale Jy/beam -> Jy using the area of the beam source.int_flux /= global_data.psfhelper.get_beamarea_pix(source.ra, source.dec) # Calculate errors for params that were fit (as well as int_flux) errors(source, model, global_data.wcshelper) source.flags = src_flags # add psf info local_beam = global_data.psfhelper.get_beam(source.ra, source.dec) if local_beam is not None: source.psf_a = local_beam.a * 3600 source.psf_b = local_beam.b * 3600 source.psf_pa = local_beam.pa else: source.psf_a = 0 source.psf_b = 0 source.psf_pa = 0 sources.append(source) self.log.debug(source) if global_data.blank: outerclip = island_data.scalars[1] idx, idy = np.where(abs(idata) - outerclip * rms > 0) idx += xmin idy += ymin self.global_data.img._pixels[[idx, idy]] = np.nan # calculate the integrated island flux if required if island_data.doislandflux: _, outerclip, _ = island_data.scalars self.log.debug("Integrated flux for island {0}".format(isle_num)) kappa_sigma = np.where(abs(idata) - outerclip * rms > 0, idata, np.NaN) self.log.debug("- island shape is {0}".format(kappa_sigma.shape)) source = IslandSource() source.flags = 0 source.island = isle_num source.components = j + 1 source.peak_flux = np.nanmax(kappa_sigma) # check for negative islands if source.peak_flux < 0: source.peak_flux = np.nanmin(kappa_sigma) self.log.debug("- peak flux {0}".format(source.peak_flux)) # positions and background if np.isfinite(source.peak_flux): positions = np.where(kappa_sigma == source.peak_flux) else: # if a component has been refit then it might have flux = np.nan positions = [[kappa_sigma.shape[0] / 2], [kappa_sigma.shape[1] / 2]] xy = positions[0][0] + xmin, positions[1][0] + ymin radec = global_data.wcshelper.pix2sky(xy) source.ra = radec[0] # convert negative ra's to positive ones if source.ra < 0: source.ra += 360 source.dec = radec[1] source.ra_str = dec2hms(source.ra) source.dec_str = dec2dms(source.dec) source.background = bkg[positions[0][0], positions[1][0]] source.local_rms = rms[positions[0][0], positions[1][0]] source.x_width, source.y_width = idata.shape source.pixels = int(sum(np.isfinite(kappa_sigma).ravel() * 1.0)) source.extent = [xmin, xmax, ymin, ymax] # TODO: investigate what happens when the sky coords are skewed w.r.t the pixel coords # calculate the area of the island as a fraction of the area of the bounding box bl = global_data.wcshelper.pix2sky([xmax, ymin]) tl = global_data.wcshelper.pix2sky([xmax, ymax]) tr = global_data.wcshelper.pix2sky([xmin, ymax]) height = gcd(tl[0], tl[1], bl[0], bl[1]) width = gcd(tl[0], tl[1], tr[0], tr[1]) area = height * width source.area = area * source.pixels / source.x_width / source.y_width # area is in deg^2 # create contours msq = MarchingSquares(idata) source.contour = [(a[0] + xmin, a[1] + ymin) for a in msq.perimeter] # calculate the maximum angular size of this island, brute force method source.max_angular_size = 0 for i, pos1 in enumerate(source.contour): radec1 = global_data.wcshelper.pix2sky(pos1) for j, pos2 in enumerate(source.contour[i:]): radec2 = global_data.wcshelper.pix2sky(pos2) dist = gcd(radec1[0], radec1[1], radec2[0], radec2[1]) if dist > source.max_angular_size: source.max_angular_size = dist source.pa = bear(radec1[0], radec1[1], radec2[0], radec2[1]) source.max_angular_size_anchors = [pos1[0], pos1[1], pos2[0], pos2[1]] self.log.debug("- peak position {0}, {1} [{2},{3}]".format(source.ra_str, source.dec_str, positions[0][0], positions[1][0])) # integrated flux beam_area = global_data.psfhelper.get_beamarea_deg2(source.ra, source.dec) # beam in deg^2 # get_beamarea_pix(source.ra, source.dec) # beam is in pix^2 isize = source.pixels # number of non zero pixels self.log.debug("- pixels used {0}".format(isize)) source.int_flux = np.nansum(kappa_sigma) # total flux Jy/beam self.log.debug("- sum of pixles {0}".format(source.int_flux)) source.int_flux *= beam_area # total flux in Jy self.log.debug("- integrated flux {0}".format(source.int_flux)) eta = erf(np.sqrt(-1 * np.log(abs(source.local_rms * outerclip / source.peak_flux)))) ** 2 self.log.debug("- eta {0}".format(eta)) source.eta = eta source.beam_area = beam_area # I don't know how to calculate this error so we'll set it to nan source.err_int_flux = np.nan sources.append(source) return sources
[ "def", "result_to_components", "(", "self", ",", "result", ",", "model", ",", "island_data", ",", "isflags", ")", ":", "global_data", "=", "self", ".", "global_data", "# island data", "isle_num", "=", "island_data", ".", "isle_num", "idata", "=", "island_data", ...
Convert fitting results into a set of components Parameters ---------- result : lmfit.MinimizerResult The fitting results. model : lmfit.Parameters The model that was fit. island_data : :class:`AegeanTools.models.IslandFittingData` Data about the island that was fit. isflags : int Flags that should be added to this island (in addition to those within the model) Returns ------- sources : list A list of components, and islands if requested.
[ "Convert", "fitting", "results", "into", "a", "set", "of", "components" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L388-L603
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.load_globals
def load_globals(self, filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, verb=False, rms=None, bkg=None, cores=1, do_curve=True, mask=None, lat=None, psf=None, blank=False, docov=True, cube_index=None): """ Populate the global_data object by loading or calculating the various components Parameters ---------- filename : str or HDUList Main image which source finding is run on hdu_index : int HDU index of the image within the fits file, default is 0 (first) bkgin, rmsin : str or HDUList background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. verb : bool Verbose. Write extra lines to INFO level log. rms, bkg : float A float that represents a constant rms/bkg levels for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. do_curve : bool If True a curvature map will be created, default=True. mask : str or :class:`AegeanTools.regions.Region` filename or Region object lat : float Latitude of the observing telescope (declination of zenith) psf : str or HDUList Filename or HDUList of a psf image blank : bool True = blank output image where islands are found. Default = False. docov : bool True = use covariance matrix in fitting. Default = True. cube_index : int For an image cube, which slice to use. """ # don't reload already loaded data if self.global_data.img is not None: return img = FitsImage(filename, hdu_index=hdu_index, beam=beam, cube_index=cube_index) beam = img.beam debug = logging.getLogger('Aegean').isEnabledFor(logging.DEBUG) if mask is None: self.global_data.region = None else: # allow users to supply and object instead of a filename if isinstance(mask, Region): self.global_data.region = mask elif os.path.exists(mask): self.log.info("Loading mask from {0}".format(mask)) self.global_data.region = Region.load(mask) else: self.log.error("File {0} not found for loading".format(mask)) self.global_data.region = None self.global_data.wcshelper = WCSHelper.from_header(img.get_hdu_header(), beam, lat) self.global_data.psfhelper = PSFHelper(psf, self.global_data.wcshelper) self.global_data.beam = self.global_data.wcshelper.beam self.global_data.img = img self.global_data.data_pix = img.get_pixels() self.global_data.dtype = type(self.global_data.data_pix[0][0]) self.global_data.bkgimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype) self.global_data.rmsimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype) self.global_data.pixarea = img.pixarea self.global_data.dcurve = None if do_curve: self.log.info("Calculating curvature") # calculate curvature but store it as -1,0,+1 dcurve = np.zeros(self.global_data.data_pix.shape, dtype=np.int8) peaks = scipy.ndimage.filters.maximum_filter(self.global_data.data_pix, size=3) troughs = scipy.ndimage.filters.minimum_filter(self.global_data.data_pix, size=3) pmask = np.where(self.global_data.data_pix == peaks) tmask = np.where(self.global_data.data_pix == troughs) dcurve[pmask] = -1 dcurve[tmask] = 1 self.global_data.dcurve = dcurve # if either of rms or bkg images are not supplied then calculate them both if not (rmsin and bkgin): if verb: self.log.info("Calculating background and rms data") self._make_bkg_rms(mesh_size=20, forced_rms=rms, forced_bkg=bkg, cores=cores) # replace the calculated images with input versions, if the user has supplied them. if bkgin: if verb: self.log.info("Loading background data from file {0}".format(bkgin)) self.global_data.bkgimg = self._load_aux_image(img, bkgin) if rmsin: if verb: self.log.info("Loading rms data from file {0}".format(rmsin)) self.global_data.rmsimg = self._load_aux_image(img, rmsin) # subtract the background image from the data image and save if verb and debug: self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max())) self.log.debug("Doing background subtraction") img.set_pixels(img.get_pixels() - self.global_data.bkgimg) self.global_data.data_pix = img.get_pixels() if verb and debug: self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max())) self.global_data.blank = blank self.global_data.docov = docov # Default to false until I can verify that this is working self.global_data.dobias = False # check if the WCS is galactic if 'lon' in self.global_data.img._header['CTYPE1'].lower(): self.log.info("Galactic coordinates detected and noted") SimpleSource.galactic = True return
python
def load_globals(self, filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, verb=False, rms=None, bkg=None, cores=1, do_curve=True, mask=None, lat=None, psf=None, blank=False, docov=True, cube_index=None): """ Populate the global_data object by loading or calculating the various components Parameters ---------- filename : str or HDUList Main image which source finding is run on hdu_index : int HDU index of the image within the fits file, default is 0 (first) bkgin, rmsin : str or HDUList background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. verb : bool Verbose. Write extra lines to INFO level log. rms, bkg : float A float that represents a constant rms/bkg levels for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. do_curve : bool If True a curvature map will be created, default=True. mask : str or :class:`AegeanTools.regions.Region` filename or Region object lat : float Latitude of the observing telescope (declination of zenith) psf : str or HDUList Filename or HDUList of a psf image blank : bool True = blank output image where islands are found. Default = False. docov : bool True = use covariance matrix in fitting. Default = True. cube_index : int For an image cube, which slice to use. """ # don't reload already loaded data if self.global_data.img is not None: return img = FitsImage(filename, hdu_index=hdu_index, beam=beam, cube_index=cube_index) beam = img.beam debug = logging.getLogger('Aegean').isEnabledFor(logging.DEBUG) if mask is None: self.global_data.region = None else: # allow users to supply and object instead of a filename if isinstance(mask, Region): self.global_data.region = mask elif os.path.exists(mask): self.log.info("Loading mask from {0}".format(mask)) self.global_data.region = Region.load(mask) else: self.log.error("File {0} not found for loading".format(mask)) self.global_data.region = None self.global_data.wcshelper = WCSHelper.from_header(img.get_hdu_header(), beam, lat) self.global_data.psfhelper = PSFHelper(psf, self.global_data.wcshelper) self.global_data.beam = self.global_data.wcshelper.beam self.global_data.img = img self.global_data.data_pix = img.get_pixels() self.global_data.dtype = type(self.global_data.data_pix[0][0]) self.global_data.bkgimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype) self.global_data.rmsimg = np.zeros(self.global_data.data_pix.shape, dtype=self.global_data.dtype) self.global_data.pixarea = img.pixarea self.global_data.dcurve = None if do_curve: self.log.info("Calculating curvature") # calculate curvature but store it as -1,0,+1 dcurve = np.zeros(self.global_data.data_pix.shape, dtype=np.int8) peaks = scipy.ndimage.filters.maximum_filter(self.global_data.data_pix, size=3) troughs = scipy.ndimage.filters.minimum_filter(self.global_data.data_pix, size=3) pmask = np.where(self.global_data.data_pix == peaks) tmask = np.where(self.global_data.data_pix == troughs) dcurve[pmask] = -1 dcurve[tmask] = 1 self.global_data.dcurve = dcurve # if either of rms or bkg images are not supplied then calculate them both if not (rmsin and bkgin): if verb: self.log.info("Calculating background and rms data") self._make_bkg_rms(mesh_size=20, forced_rms=rms, forced_bkg=bkg, cores=cores) # replace the calculated images with input versions, if the user has supplied them. if bkgin: if verb: self.log.info("Loading background data from file {0}".format(bkgin)) self.global_data.bkgimg = self._load_aux_image(img, bkgin) if rmsin: if verb: self.log.info("Loading rms data from file {0}".format(rmsin)) self.global_data.rmsimg = self._load_aux_image(img, rmsin) # subtract the background image from the data image and save if verb and debug: self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max())) self.log.debug("Doing background subtraction") img.set_pixels(img.get_pixels() - self.global_data.bkgimg) self.global_data.data_pix = img.get_pixels() if verb and debug: self.log.debug("Data max is {0}".format(img.get_pixels()[np.isfinite(img.get_pixels())].max())) self.global_data.blank = blank self.global_data.docov = docov # Default to false until I can verify that this is working self.global_data.dobias = False # check if the WCS is galactic if 'lon' in self.global_data.img._header['CTYPE1'].lower(): self.log.info("Galactic coordinates detected and noted") SimpleSource.galactic = True return
[ "def", "load_globals", "(", "self", ",", "filename", ",", "hdu_index", "=", "0", ",", "bkgin", "=", "None", ",", "rmsin", "=", "None", ",", "beam", "=", "None", ",", "verb", "=", "False", ",", "rms", "=", "None", ",", "bkg", "=", "None", ",", "co...
Populate the global_data object by loading or calculating the various components Parameters ---------- filename : str or HDUList Main image which source finding is run on hdu_index : int HDU index of the image within the fits file, default is 0 (first) bkgin, rmsin : str or HDUList background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. verb : bool Verbose. Write extra lines to INFO level log. rms, bkg : float A float that represents a constant rms/bkg levels for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. do_curve : bool If True a curvature map will be created, default=True. mask : str or :class:`AegeanTools.regions.Region` filename or Region object lat : float Latitude of the observing telescope (declination of zenith) psf : str or HDUList Filename or HDUList of a psf image blank : bool True = blank output image where islands are found. Default = False. docov : bool True = use covariance matrix in fitting. Default = True. cube_index : int For an image cube, which slice to use.
[ "Populate", "the", "global_data", "object", "by", "loading", "or", "calculating", "the", "various", "components" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L608-L742
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.save_background_files
def save_background_files(self, image_filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, rms=None, bkg=None, cores=1, outbase=None): """ Generate and save the background and RMS maps as FITS files. They are saved in the current directly as aegean-background.fits and aegean-rms.fits. Parameters ---------- image_filename : str or HDUList Input image. hdu_index : int If fits file has more than one hdu, it can be specified here. Default = 0. bkgin, rmsin : str or HDUList Background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. rms, bkg : float A float that represents a constant rms/bkg level for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. outbase : str Basename for output files. """ self.log.info("Saving background / RMS maps") # load image, and load/create background/rms images self.load_globals(image_filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, verb=True, rms=rms, bkg=bkg, cores=cores, do_curve=True) img = self.global_data.img bkgimg, rmsimg = self.global_data.bkgimg, self.global_data.rmsimg curve = np.array(self.global_data.dcurve, dtype=bkgimg.dtype) # mask these arrays have the same mask the same as the data mask = np.where(np.isnan(self.global_data.data_pix)) bkgimg[mask] = np.NaN rmsimg[mask] = np.NaN curve[mask] = np.NaN # Generate the new FITS files by copying the existing HDU and assigning new data. # This gives the new files the same WCS projection and other header fields. new_hdu = img.hdu # Set the ORIGIN to indicate Aegean made this file new_hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__) for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']: if c in new_hdu.header: del new_hdu.header[c] if outbase is None: outbase, _ = os.path.splitext(os.path.basename(image_filename)) noise_out = outbase + '_rms.fits' background_out = outbase + '_bkg.fits' curve_out = outbase + '_crv.fits' snr_out = outbase + '_snr.fits' new_hdu.data = bkgimg new_hdu.writeto(background_out, overwrite=True) self.log.info("Wrote {0}".format(background_out)) new_hdu.data = rmsimg new_hdu.writeto(noise_out, overwrite=True) self.log.info("Wrote {0}".format(noise_out)) new_hdu.data = curve new_hdu.writeto(curve_out, overwrite=True) self.log.info("Wrote {0}".format(curve_out)) new_hdu.data = self.global_data.data_pix / rmsimg new_hdu.writeto(snr_out, overwrite=True) self.log.info("Wrote {0}".format(snr_out)) return
python
def save_background_files(self, image_filename, hdu_index=0, bkgin=None, rmsin=None, beam=None, rms=None, bkg=None, cores=1, outbase=None): """ Generate and save the background and RMS maps as FITS files. They are saved in the current directly as aegean-background.fits and aegean-rms.fits. Parameters ---------- image_filename : str or HDUList Input image. hdu_index : int If fits file has more than one hdu, it can be specified here. Default = 0. bkgin, rmsin : str or HDUList Background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. rms, bkg : float A float that represents a constant rms/bkg level for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. outbase : str Basename for output files. """ self.log.info("Saving background / RMS maps") # load image, and load/create background/rms images self.load_globals(image_filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, verb=True, rms=rms, bkg=bkg, cores=cores, do_curve=True) img = self.global_data.img bkgimg, rmsimg = self.global_data.bkgimg, self.global_data.rmsimg curve = np.array(self.global_data.dcurve, dtype=bkgimg.dtype) # mask these arrays have the same mask the same as the data mask = np.where(np.isnan(self.global_data.data_pix)) bkgimg[mask] = np.NaN rmsimg[mask] = np.NaN curve[mask] = np.NaN # Generate the new FITS files by copying the existing HDU and assigning new data. # This gives the new files the same WCS projection and other header fields. new_hdu = img.hdu # Set the ORIGIN to indicate Aegean made this file new_hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__) for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']: if c in new_hdu.header: del new_hdu.header[c] if outbase is None: outbase, _ = os.path.splitext(os.path.basename(image_filename)) noise_out = outbase + '_rms.fits' background_out = outbase + '_bkg.fits' curve_out = outbase + '_crv.fits' snr_out = outbase + '_snr.fits' new_hdu.data = bkgimg new_hdu.writeto(background_out, overwrite=True) self.log.info("Wrote {0}".format(background_out)) new_hdu.data = rmsimg new_hdu.writeto(noise_out, overwrite=True) self.log.info("Wrote {0}".format(noise_out)) new_hdu.data = curve new_hdu.writeto(curve_out, overwrite=True) self.log.info("Wrote {0}".format(curve_out)) new_hdu.data = self.global_data.data_pix / rmsimg new_hdu.writeto(snr_out, overwrite=True) self.log.info("Wrote {0}".format(snr_out)) return
[ "def", "save_background_files", "(", "self", ",", "image_filename", ",", "hdu_index", "=", "0", ",", "bkgin", "=", "None", ",", "rmsin", "=", "None", ",", "beam", "=", "None", ",", "rms", "=", "None", ",", "bkg", "=", "None", ",", "cores", "=", "1", ...
Generate and save the background and RMS maps as FITS files. They are saved in the current directly as aegean-background.fits and aegean-rms.fits. Parameters ---------- image_filename : str or HDUList Input image. hdu_index : int If fits file has more than one hdu, it can be specified here. Default = 0. bkgin, rmsin : str or HDUList Background and noise image filename or HDUList beam : :class:`AegeanTools.fits_image.Beam` Beam object representing the synthsized beam. Will replace what is in the FITS header. rms, bkg : float A float that represents a constant rms/bkg level for the entire image. Default = None, which causes the rms/bkg to be loaded or calculated. cores : int Number of cores to use if different from what is autodetected. outbase : str Basename for output files.
[ "Generate", "and", "save", "the", "background", "and", "RMS", "maps", "as", "FITS", "files", ".", "They", "are", "saved", "in", "the", "current", "directly", "as", "aegean", "-", "background", ".", "fits", "and", "aegean", "-", "rms", ".", "fits", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L744-L822
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.save_image
def save_image(self, outname): """ Save the image data. This is probably only useful if the image data has been blanked. Parameters ---------- outname : str Name for the output file. """ hdu = self.global_data.img.hdu hdu.data = self.global_data.img._pixels hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__) # delete some axes that we aren't going to need for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']: if c in hdu.header: del hdu.header[c] hdu.writeto(outname, overwrite=True) self.log.info("Wrote {0}".format(outname)) return
python
def save_image(self, outname): """ Save the image data. This is probably only useful if the image data has been blanked. Parameters ---------- outname : str Name for the output file. """ hdu = self.global_data.img.hdu hdu.data = self.global_data.img._pixels hdu.header["ORIGIN"] = "Aegean {0}-({1})".format(__version__, __date__) # delete some axes that we aren't going to need for c in ['CRPIX3', 'CRPIX4', 'CDELT3', 'CDELT4', 'CRVAL3', 'CRVAL4', 'CTYPE3', 'CTYPE4']: if c in hdu.header: del hdu.header[c] hdu.writeto(outname, overwrite=True) self.log.info("Wrote {0}".format(outname)) return
[ "def", "save_image", "(", "self", ",", "outname", ")", ":", "hdu", "=", "self", ".", "global_data", ".", "img", ".", "hdu", "hdu", ".", "data", "=", "self", ".", "global_data", ".", "img", ".", "_pixels", "hdu", ".", "header", "[", "\"ORIGIN\"", "]",...
Save the image data. This is probably only useful if the image data has been blanked. Parameters ---------- outname : str Name for the output file.
[ "Save", "the", "image", "data", ".", "This", "is", "probably", "only", "useful", "if", "the", "image", "data", "has", "been", "blanked", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L824-L843
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._make_bkg_rms
def _make_bkg_rms(self, mesh_size=20, forced_rms=None, forced_bkg=None, cores=None): """ Calculate an rms image and a bkg image. Parameters ---------- mesh_size : int Number of beams per box default = 20 forced_rms : float The rms of the image. If None: calculate the rms level (default). Otherwise assume a constant rms. forced_bkg : float The background level of the image. If None: calculate the background level (default). Otherwise assume a constant background. cores: int Number of cores to use if different from what is autodetected. """ if (forced_rms is not None): self.log.info("Forcing rms = {0}".format(forced_rms)) self.global_data.rmsimg[:] = forced_rms if (forced_bkg is not None): self.log.info("Forcing bkg = {0}".format(forced_bkg)) self.global_data.bkgimg[:] = forced_bkg # If we known both the rms and the bkg then there is nothing to compute if (forced_rms is not None) and (forced_bkg is not None): return data = self.global_data.data_pix beam = self.global_data.beam img_x, img_y = data.shape xcen = int(img_x / 2) ycen = int(img_y / 2) # calculate a local beam from the center of the data pixbeam = self.global_data.psfhelper.get_pixbeam_pixel(xcen, ycen) if pixbeam is None: self.log.error("Cannot determine the beam shape at the image center") sys.exit(1) width_x = mesh_size * max(abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.a), abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.b)) width_x = int(width_x) width_y = mesh_size * max(abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.a), abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.b)) width_y = int(width_y) self.log.debug("image size x,y:{0},{1}".format(img_x, img_y)) self.log.debug("beam: {0}".format(beam)) self.log.debug("mesh width (pix) x,y: {0},{1}".format(width_x, width_y)) # box centered at image center then tilling outwards xstart = int(xcen - width_x / 2) % width_x # the starting point of the first "full" box ystart = int(ycen - width_y / 2) % width_y xend = img_x - int(img_x - xstart) % width_x # the end point of the last "full" box yend = img_y - int(img_y - ystart) % width_y xmins = [0] xmins.extend(list(range(xstart, xend, width_x))) xmins.append(xend) xmaxs = [xstart] xmaxs.extend(list(range(xstart + width_x, xend + 1, width_x))) xmaxs.append(img_x) ymins = [0] ymins.extend(list(range(ystart, yend, width_y))) ymins.append(yend) ymaxs = [ystart] ymaxs.extend(list(range(ystart + width_y, yend + 1, width_y))) ymaxs.append(img_y) # if the image is smaller than our ideal mesh size, just use the whole image instead if width_x >= img_x: xmins = [0] xmaxs = [img_x] if width_y >= img_y: ymins = [0] ymaxs = [img_y] if cores > 1: # set up the queue queue = pprocess.Queue(limit=cores, reuse=1) estimate = queue.manage(pprocess.MakeReusable(self._estimate_bkg_rms)) # populate the queue for xmin, xmax in zip(xmins, xmaxs): for ymin, ymax in zip(ymins, ymaxs): estimate(ymin, ymax, xmin, xmax) else: queue = [] for xmin, xmax in zip(xmins, xmaxs): for ymin, ymax in zip(ymins, ymaxs): queue.append(self._estimate_bkg_rms(xmin, xmax, ymin, ymax)) # only copy across the bkg/rms if they are not already set # queue can only be traversed once so we have to put the if inside the loop for ymin, ymax, xmin, xmax, bkg, rms in queue: if (forced_rms is None): self.global_data.rmsimg[ymin:ymax, xmin:xmax] = rms if (forced_rms is None): self.global_data.bkgimg[ymin:ymax, xmin:xmax] = bkg return
python
def _make_bkg_rms(self, mesh_size=20, forced_rms=None, forced_bkg=None, cores=None): """ Calculate an rms image and a bkg image. Parameters ---------- mesh_size : int Number of beams per box default = 20 forced_rms : float The rms of the image. If None: calculate the rms level (default). Otherwise assume a constant rms. forced_bkg : float The background level of the image. If None: calculate the background level (default). Otherwise assume a constant background. cores: int Number of cores to use if different from what is autodetected. """ if (forced_rms is not None): self.log.info("Forcing rms = {0}".format(forced_rms)) self.global_data.rmsimg[:] = forced_rms if (forced_bkg is not None): self.log.info("Forcing bkg = {0}".format(forced_bkg)) self.global_data.bkgimg[:] = forced_bkg # If we known both the rms and the bkg then there is nothing to compute if (forced_rms is not None) and (forced_bkg is not None): return data = self.global_data.data_pix beam = self.global_data.beam img_x, img_y = data.shape xcen = int(img_x / 2) ycen = int(img_y / 2) # calculate a local beam from the center of the data pixbeam = self.global_data.psfhelper.get_pixbeam_pixel(xcen, ycen) if pixbeam is None: self.log.error("Cannot determine the beam shape at the image center") sys.exit(1) width_x = mesh_size * max(abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.a), abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.b)) width_x = int(width_x) width_y = mesh_size * max(abs(math.sin(np.radians(pixbeam.pa)) * pixbeam.a), abs(math.cos(np.radians(pixbeam.pa)) * pixbeam.b)) width_y = int(width_y) self.log.debug("image size x,y:{0},{1}".format(img_x, img_y)) self.log.debug("beam: {0}".format(beam)) self.log.debug("mesh width (pix) x,y: {0},{1}".format(width_x, width_y)) # box centered at image center then tilling outwards xstart = int(xcen - width_x / 2) % width_x # the starting point of the first "full" box ystart = int(ycen - width_y / 2) % width_y xend = img_x - int(img_x - xstart) % width_x # the end point of the last "full" box yend = img_y - int(img_y - ystart) % width_y xmins = [0] xmins.extend(list(range(xstart, xend, width_x))) xmins.append(xend) xmaxs = [xstart] xmaxs.extend(list(range(xstart + width_x, xend + 1, width_x))) xmaxs.append(img_x) ymins = [0] ymins.extend(list(range(ystart, yend, width_y))) ymins.append(yend) ymaxs = [ystart] ymaxs.extend(list(range(ystart + width_y, yend + 1, width_y))) ymaxs.append(img_y) # if the image is smaller than our ideal mesh size, just use the whole image instead if width_x >= img_x: xmins = [0] xmaxs = [img_x] if width_y >= img_y: ymins = [0] ymaxs = [img_y] if cores > 1: # set up the queue queue = pprocess.Queue(limit=cores, reuse=1) estimate = queue.manage(pprocess.MakeReusable(self._estimate_bkg_rms)) # populate the queue for xmin, xmax in zip(xmins, xmaxs): for ymin, ymax in zip(ymins, ymaxs): estimate(ymin, ymax, xmin, xmax) else: queue = [] for xmin, xmax in zip(xmins, xmaxs): for ymin, ymax in zip(ymins, ymaxs): queue.append(self._estimate_bkg_rms(xmin, xmax, ymin, ymax)) # only copy across the bkg/rms if they are not already set # queue can only be traversed once so we have to put the if inside the loop for ymin, ymax, xmin, xmax, bkg, rms in queue: if (forced_rms is None): self.global_data.rmsimg[ymin:ymax, xmin:xmax] = rms if (forced_rms is None): self.global_data.bkgimg[ymin:ymax, xmin:xmax] = bkg return
[ "def", "_make_bkg_rms", "(", "self", ",", "mesh_size", "=", "20", ",", "forced_rms", "=", "None", ",", "forced_bkg", "=", "None", ",", "cores", "=", "None", ")", ":", "if", "(", "forced_rms", "is", "not", "None", ")", ":", "self", ".", "log", ".", ...
Calculate an rms image and a bkg image. Parameters ---------- mesh_size : int Number of beams per box default = 20 forced_rms : float The rms of the image. If None: calculate the rms level (default). Otherwise assume a constant rms. forced_bkg : float The background level of the image. If None: calculate the background level (default). Otherwise assume a constant background. cores: int Number of cores to use if different from what is autodetected.
[ "Calculate", "an", "rms", "image", "and", "a", "bkg", "image", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L845-L956
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._estimate_bkg_rms
def _estimate_bkg_rms(self, xmin, xmax, ymin, ymax): """ Estimate the background noise mean and RMS. The mean is estimated as the median of data. The RMS is estimated as the IQR of data / 1.34896. Parameters ---------- xmin, xmax, ymin, ymax : int The bounding region over which the bkg/rms will be calculated. Returns ------- ymin, ymax, xmin, xmax : int A copy of the input parameters bkg, rms : float The calculated background and noise. """ data = self.global_data.data_pix[ymin:ymax, xmin:xmax] pixels = np.extract(np.isfinite(data), data).ravel() if len(pixels) < 4: bkg, rms = np.NaN, np.NaN else: pixels.sort() p25 = pixels[int(pixels.size / 4)] p50 = pixels[int(pixels.size / 2)] p75 = pixels[int(pixels.size / 4 * 3)] iqr = p75 - p25 bkg, rms = p50, iqr / 1.34896 # return the input and output data so we know what we are doing # when compiling the results of multiple processes return ymin, ymax, xmin, xmax, bkg, rms
python
def _estimate_bkg_rms(self, xmin, xmax, ymin, ymax): """ Estimate the background noise mean and RMS. The mean is estimated as the median of data. The RMS is estimated as the IQR of data / 1.34896. Parameters ---------- xmin, xmax, ymin, ymax : int The bounding region over which the bkg/rms will be calculated. Returns ------- ymin, ymax, xmin, xmax : int A copy of the input parameters bkg, rms : float The calculated background and noise. """ data = self.global_data.data_pix[ymin:ymax, xmin:xmax] pixels = np.extract(np.isfinite(data), data).ravel() if len(pixels) < 4: bkg, rms = np.NaN, np.NaN else: pixels.sort() p25 = pixels[int(pixels.size / 4)] p50 = pixels[int(pixels.size / 2)] p75 = pixels[int(pixels.size / 4 * 3)] iqr = p75 - p25 bkg, rms = p50, iqr / 1.34896 # return the input and output data so we know what we are doing # when compiling the results of multiple processes return ymin, ymax, xmin, xmax, bkg, rms
[ "def", "_estimate_bkg_rms", "(", "self", ",", "xmin", ",", "xmax", ",", "ymin", ",", "ymax", ")", ":", "data", "=", "self", ".", "global_data", ".", "data_pix", "[", "ymin", ":", "ymax", ",", "xmin", ":", "xmax", "]", "pixels", "=", "np", ".", "ext...
Estimate the background noise mean and RMS. The mean is estimated as the median of data. The RMS is estimated as the IQR of data / 1.34896. Parameters ---------- xmin, xmax, ymin, ymax : int The bounding region over which the bkg/rms will be calculated. Returns ------- ymin, ymax, xmin, xmax : int A copy of the input parameters bkg, rms : float The calculated background and noise.
[ "Estimate", "the", "background", "noise", "mean", "and", "RMS", ".", "The", "mean", "is", "estimated", "as", "the", "median", "of", "data", ".", "The", "RMS", "is", "estimated", "as", "the", "IQR", "of", "data", "/", "1", ".", "34896", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L958-L990
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._load_aux_image
def _load_aux_image(self, image, auxfile): """ Load a fits file (bkg/rms/curve) and make sure that it is the same shape as the main image. Parameters ---------- image : :class:`AegeanTools.fits_image.FitsImage` The main image that has already been loaded. auxfile : str or HDUList The auxiliary file to be loaded. Returns ------- aux : :class:`AegeanTools.fits_image.FitsImage` The loaded image. """ auximg = FitsImage(auxfile, beam=self.global_data.beam).get_pixels() if auximg.shape != image.get_pixels().shape: self.log.error("file {0} is not the same size as the image map".format(auxfile)) self.log.error("{0}= {1}, image = {2}".format(auxfile, auximg.shape, image.get_pixels().shape)) sys.exit(1) return auximg
python
def _load_aux_image(self, image, auxfile): """ Load a fits file (bkg/rms/curve) and make sure that it is the same shape as the main image. Parameters ---------- image : :class:`AegeanTools.fits_image.FitsImage` The main image that has already been loaded. auxfile : str or HDUList The auxiliary file to be loaded. Returns ------- aux : :class:`AegeanTools.fits_image.FitsImage` The loaded image. """ auximg = FitsImage(auxfile, beam=self.global_data.beam).get_pixels() if auximg.shape != image.get_pixels().shape: self.log.error("file {0} is not the same size as the image map".format(auxfile)) self.log.error("{0}= {1}, image = {2}".format(auxfile, auximg.shape, image.get_pixels().shape)) sys.exit(1) return auximg
[ "def", "_load_aux_image", "(", "self", ",", "image", ",", "auxfile", ")", ":", "auximg", "=", "FitsImage", "(", "auxfile", ",", "beam", "=", "self", ".", "global_data", ".", "beam", ")", ".", "get_pixels", "(", ")", "if", "auximg", ".", "shape", "!=", ...
Load a fits file (bkg/rms/curve) and make sure that it is the same shape as the main image. Parameters ---------- image : :class:`AegeanTools.fits_image.FitsImage` The main image that has already been loaded. auxfile : str or HDUList The auxiliary file to be loaded. Returns ------- aux : :class:`AegeanTools.fits_image.FitsImage` The loaded image.
[ "Load", "a", "fits", "file", "(", "bkg", "/", "rms", "/", "curve", ")", "and", "make", "sure", "that", "it", "is", "the", "same", "shape", "as", "the", "main", "image", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L992-L1015
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._refit_islands
def _refit_islands(self, group, stage, outerclip=None, istart=0): """ Do island refitting (priorized fitting) on a group of islands. Parameters ---------- group : list A list of components grouped by island. stage : int Refitting stage. outerclip : float Ignored, placed holder for future development. istart : int The starting island number. Returns ------- sources : list List of sources (and islands). """ global_data = self.global_data sources = [] data = global_data.data_pix rmsimg = global_data.rmsimg for inum, isle in enumerate(group, start=istart): self.log.debug("-=-") self.log.debug("input island = {0}, {1} components".format(isle[0].island, len(isle))) # set up the parameters for each of the sources within the island i = 0 params = lmfit.Parameters() shape = data.shape xmin, ymin = shape xmax = ymax = 0 # island_mask = [] src_valid_psf = None # keep track of the sources that are actually being refit # this may be a subset of all sources in the island included_sources = [] for src in isle: pixbeam = global_data.psfhelper.get_pixbeam(src.ra, src.dec) # find the right pixels from the ra/dec source_x, source_y = global_data.wcshelper.sky2pix([src.ra, src.dec]) source_x -= 1 source_y -= 1 x = int(round(source_x)) y = int(round(source_y)) self.log.debug("pixel location ({0:5.2f},{1:5.2f})".format(source_x, source_y)) # reject sources that are outside the image bounds, or which have nan data/rms values if not 0 <= x < shape[0] or not 0 <= y < shape[1] or \ not np.isfinite(data[x, y]) or \ not np.isfinite(rmsimg[x, y]) or \ pixbeam is None: self.log.debug("Source ({0},{1}) not within usable region: skipping".format(src.island, src.source)) continue else: # Keep track of the last source to have a valid psf so that we can use it later on src_valid_psf = src # determine the shape parameters in pixel values _, _, sx, sy, theta = global_data.wcshelper.sky2pix_ellipse([src.ra, src.dec], src.a / 3600, src.b / 3600, src.pa) sx *= FWHM2CC sy *= FWHM2CC self.log.debug("Source shape [sky coords] {0:5.2f}x{1:5.2f}@{2:05.2f}".format(src.a, src.b, src.pa)) self.log.debug("Source shape [pixel coords] {0:4.2f}x{1:4.2f}@{2:05.2f}".format(sx, sy, theta)) # choose a region that is 2x the major axis of the source, 4x semimajor axis a width = 4 * sx ywidth = int(round(width)) + 1 xwidth = int(round(width)) + 1 # adjust the size of the island to include this source xmin = min(xmin, max(0, x - xwidth / 2)) ymin = min(ymin, max(0, y - ywidth / 2)) xmax = max(xmax, min(shape[0], x + xwidth / 2 + 1)) ymax = max(ymax, min(shape[1], y + ywidth / 2 + 1)) s_lims = [0.8 * min(sx, pixbeam.b * FWHM2CC), max(sy, sx) * 1.25] # Set up the parameters for the fit, including constraints prefix = "c{0}_".format(i) params.add(prefix + 'amp', value=src.peak_flux, vary=True) # for now the xo/yo are locations within the main image, we correct this later params.add(prefix + 'xo', value=source_x, min=source_x - sx / 2., max=source_x + sx / 2., vary=stage >= 2) params.add(prefix + 'yo', value=source_y, min=source_y - sy / 2., max=source_y + sy / 2., vary=stage >= 2) params.add(prefix + 'sx', value=sx, min=s_lims[0], max=s_lims[1], vary=stage >= 3) params.add(prefix + 'sy', value=sy, min=s_lims[0], max=s_lims[1], vary=stage >= 3) params.add(prefix + 'theta', value=theta, vary=stage >= 3) params.add(prefix + 'flags', value=0, vary=False) # this source is being refit so add it to the list included_sources.append(src) i += 1 # TODO: Allow this mask to be used in conjunction with the FWHM mask that is defined further on # # Use pixels above outerclip sigmas.. # if outerclip>=0: # mask = np.where(data[xmin:xmax,ymin:ymax]-outerclip*rmsimg[xmin:xmax,ymin:ymax]>0) # else: # negative outer clip means use all the pixels # mask = np.where(data[xmin:xmax,ymin:ymax]) # # # convert the pixel indices to be pixels within the parent data set # xmask = mask[0] + xmin # ymask = mask[1] + ymin # island_mask.extend(zip(xmask,ymask)) if i == 0: self.log.debug("No sources found in island {0}".format(src.island)) continue params.add('components', value=i, vary=False) # params.components = i self.log.debug(" {0} components being fit".format(i)) # now we correct the xo/yo positions to be relative to the sub-image self.log.debug("xmxxymyx {0} {1} {2} {3}".format(xmin, xmax, ymin, ymax)) for i in range(params['components'].value): prefix = "c{0}_".format(i) params[prefix + 'xo'].value -= xmin params[prefix + 'xo'].min -= xmin params[prefix + 'xo'].max -= xmin params[prefix + 'yo'].value -= ymin params[prefix + 'yo'].min -= ymin params[prefix + 'yo'].max -= ymin # self.log.debug(params) # don't fit if there are no sources if params['components'].value < 1: self.log.info("Island {0} has no components".format(src.island)) continue # this .copy() will stop us from modifying the parent region when we later apply our mask. idata = data[int(xmin):int(xmax), int(ymin):int(ymax)].copy() # now convert these back to indices within the idata region # island_mask = np.array([(x-xmin, y-ymin) for x, y in island_mask]) allx, ally = np.indices(idata.shape) # mask to include pixels that are withn the FWHM of the sources being fit mask_params = copy.deepcopy(params) for i in range(mask_params['components'].value): prefix = 'c{0}_'.format(i) mask_params[prefix + 'amp'].value = 1 mask_model = ntwodgaussian_lmfit(mask_params) mask = np.where(mask_model(allx.ravel(), ally.ravel()) <= 0.1) mask = allx.ravel()[mask], ally.ravel()[mask] del mask_params idata[mask] = np.nan mx, my = np.where(np.isfinite(idata)) non_nan_pix = len(mx) total_pix = len(allx.ravel()) self.log.debug("island extracted:") self.log.debug(" x[{0}:{1}] y[{2}:{3}]".format(xmin, xmax, ymin, ymax)) self.log.debug(" max = {0}".format(np.nanmax(idata))) self.log.debug( " total {0}, masked {1}, not masked {2}".format(total_pix, total_pix - non_nan_pix, non_nan_pix)) # Check to see that each component has some data within the central 3x3 pixels of it's location # If not then we don't fit that component for i in range(params['components'].value): prefix = "c{0}_".format(i) # figure out a box around the center of this cx, cy = params[prefix + 'xo'].value, params[prefix + 'yo'].value # central pixel coords self.log.debug(" comp {0}".format(i)) self.log.debug(" x0, y0 {0} {1}".format(cx, cy)) xmx = int(round(np.clip(cx + 2, 0, idata.shape[0]))) xmn = int(round(np.clip(cx - 1, 0, idata.shape[0]))) ymx = int(round(np.clip(cy + 2, 0, idata.shape[1]))) ymn = int(round(np.clip(cy - 1, 0, idata.shape[1]))) square = idata[xmn:xmx, ymn:ymx] # if there are no not-nan pixels in this region then don't vary any parameters if not np.any(np.isfinite(square)): self.log.debug(" not fitting component {0}".format(i)) params[prefix + 'amp'].value = np.nan for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: params[prefix + p].vary = False params[prefix + p].stderr = np.nan # this results in an error of -1 later on params[prefix + 'flags'].value |= flags.NOTFIT # determine the number of free parameters and if we have enough data for a fit nfree = np.count_nonzero([params[p].vary for p in params.keys()]) self.log.debug(params) if nfree < 1: self.log.debug(" Island has no components to fit") result = DummyLM() model = params else: if non_nan_pix < nfree: self.log.debug("More free parameters {0} than available pixels {1}".format(nfree, non_nan_pix)) if non_nan_pix >= params['components'].value: self.log.debug("Fixing all parameters except amplitudes") for p in params.keys(): if 'amp' not in p: params[p].vary = False else: self.log.debug(" no not-masked pixels, skipping") continue # do the fit # if the pixel beam is not valid, then recalculate using the location of the last source to have a valid psf if pixbeam is None: if src_valid_psf is not None: pixbeam = global_data.psfhelper.get_pixbeam(src_valid_psf.ra, src_valid_psf.dec) else: self.log.critical("Cannot determine pixel beam") fac = 1 / np.sqrt(2) if self.global_data.docov: C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) B = Bmatrix(C) else: C = B = None errs = np.nanmax(rmsimg[int(xmin):int(xmax), int(ymin):int(ymax)]) result, _ = do_lmfit(idata, params, B=B) model = covar_errors(result.params, idata, errs=errs, B=B, C=C) # convert the results to a source object offsets = (xmin, xmax, ymin, ymax) # TODO allow for island fluxes in the refitting. island_data = IslandFittingData(inum, i=idata, offsets=offsets, doislandflux=False, scalars=(4, 4, None)) new_src = self.result_to_components(result, model, island_data, src.flags) for ns, s in zip(new_src, included_sources): # preserve the uuid so we can do exact matching between catalogs ns.uuid = s.uuid # flag the sources as having been priorized ns.flags |= flags.PRIORIZED # if the position wasn't fit then copy the errors from the input catalog if stage < 2: ns.err_ra = s.err_ra ns.err_dec = s.err_dec ns.flags |= flags.FIXED2PSF # if the shape wasn't fit then copy the errors from the input catalog if stage < 3: ns.err_a = s.err_a ns.err_b = s.err_b ns.err_pa = s.err_pa sources.extend(new_src) return sources
python
def _refit_islands(self, group, stage, outerclip=None, istart=0): """ Do island refitting (priorized fitting) on a group of islands. Parameters ---------- group : list A list of components grouped by island. stage : int Refitting stage. outerclip : float Ignored, placed holder for future development. istart : int The starting island number. Returns ------- sources : list List of sources (and islands). """ global_data = self.global_data sources = [] data = global_data.data_pix rmsimg = global_data.rmsimg for inum, isle in enumerate(group, start=istart): self.log.debug("-=-") self.log.debug("input island = {0}, {1} components".format(isle[0].island, len(isle))) # set up the parameters for each of the sources within the island i = 0 params = lmfit.Parameters() shape = data.shape xmin, ymin = shape xmax = ymax = 0 # island_mask = [] src_valid_psf = None # keep track of the sources that are actually being refit # this may be a subset of all sources in the island included_sources = [] for src in isle: pixbeam = global_data.psfhelper.get_pixbeam(src.ra, src.dec) # find the right pixels from the ra/dec source_x, source_y = global_data.wcshelper.sky2pix([src.ra, src.dec]) source_x -= 1 source_y -= 1 x = int(round(source_x)) y = int(round(source_y)) self.log.debug("pixel location ({0:5.2f},{1:5.2f})".format(source_x, source_y)) # reject sources that are outside the image bounds, or which have nan data/rms values if not 0 <= x < shape[0] or not 0 <= y < shape[1] or \ not np.isfinite(data[x, y]) or \ not np.isfinite(rmsimg[x, y]) or \ pixbeam is None: self.log.debug("Source ({0},{1}) not within usable region: skipping".format(src.island, src.source)) continue else: # Keep track of the last source to have a valid psf so that we can use it later on src_valid_psf = src # determine the shape parameters in pixel values _, _, sx, sy, theta = global_data.wcshelper.sky2pix_ellipse([src.ra, src.dec], src.a / 3600, src.b / 3600, src.pa) sx *= FWHM2CC sy *= FWHM2CC self.log.debug("Source shape [sky coords] {0:5.2f}x{1:5.2f}@{2:05.2f}".format(src.a, src.b, src.pa)) self.log.debug("Source shape [pixel coords] {0:4.2f}x{1:4.2f}@{2:05.2f}".format(sx, sy, theta)) # choose a region that is 2x the major axis of the source, 4x semimajor axis a width = 4 * sx ywidth = int(round(width)) + 1 xwidth = int(round(width)) + 1 # adjust the size of the island to include this source xmin = min(xmin, max(0, x - xwidth / 2)) ymin = min(ymin, max(0, y - ywidth / 2)) xmax = max(xmax, min(shape[0], x + xwidth / 2 + 1)) ymax = max(ymax, min(shape[1], y + ywidth / 2 + 1)) s_lims = [0.8 * min(sx, pixbeam.b * FWHM2CC), max(sy, sx) * 1.25] # Set up the parameters for the fit, including constraints prefix = "c{0}_".format(i) params.add(prefix + 'amp', value=src.peak_flux, vary=True) # for now the xo/yo are locations within the main image, we correct this later params.add(prefix + 'xo', value=source_x, min=source_x - sx / 2., max=source_x + sx / 2., vary=stage >= 2) params.add(prefix + 'yo', value=source_y, min=source_y - sy / 2., max=source_y + sy / 2., vary=stage >= 2) params.add(prefix + 'sx', value=sx, min=s_lims[0], max=s_lims[1], vary=stage >= 3) params.add(prefix + 'sy', value=sy, min=s_lims[0], max=s_lims[1], vary=stage >= 3) params.add(prefix + 'theta', value=theta, vary=stage >= 3) params.add(prefix + 'flags', value=0, vary=False) # this source is being refit so add it to the list included_sources.append(src) i += 1 # TODO: Allow this mask to be used in conjunction with the FWHM mask that is defined further on # # Use pixels above outerclip sigmas.. # if outerclip>=0: # mask = np.where(data[xmin:xmax,ymin:ymax]-outerclip*rmsimg[xmin:xmax,ymin:ymax]>0) # else: # negative outer clip means use all the pixels # mask = np.where(data[xmin:xmax,ymin:ymax]) # # # convert the pixel indices to be pixels within the parent data set # xmask = mask[0] + xmin # ymask = mask[1] + ymin # island_mask.extend(zip(xmask,ymask)) if i == 0: self.log.debug("No sources found in island {0}".format(src.island)) continue params.add('components', value=i, vary=False) # params.components = i self.log.debug(" {0} components being fit".format(i)) # now we correct the xo/yo positions to be relative to the sub-image self.log.debug("xmxxymyx {0} {1} {2} {3}".format(xmin, xmax, ymin, ymax)) for i in range(params['components'].value): prefix = "c{0}_".format(i) params[prefix + 'xo'].value -= xmin params[prefix + 'xo'].min -= xmin params[prefix + 'xo'].max -= xmin params[prefix + 'yo'].value -= ymin params[prefix + 'yo'].min -= ymin params[prefix + 'yo'].max -= ymin # self.log.debug(params) # don't fit if there are no sources if params['components'].value < 1: self.log.info("Island {0} has no components".format(src.island)) continue # this .copy() will stop us from modifying the parent region when we later apply our mask. idata = data[int(xmin):int(xmax), int(ymin):int(ymax)].copy() # now convert these back to indices within the idata region # island_mask = np.array([(x-xmin, y-ymin) for x, y in island_mask]) allx, ally = np.indices(idata.shape) # mask to include pixels that are withn the FWHM of the sources being fit mask_params = copy.deepcopy(params) for i in range(mask_params['components'].value): prefix = 'c{0}_'.format(i) mask_params[prefix + 'amp'].value = 1 mask_model = ntwodgaussian_lmfit(mask_params) mask = np.where(mask_model(allx.ravel(), ally.ravel()) <= 0.1) mask = allx.ravel()[mask], ally.ravel()[mask] del mask_params idata[mask] = np.nan mx, my = np.where(np.isfinite(idata)) non_nan_pix = len(mx) total_pix = len(allx.ravel()) self.log.debug("island extracted:") self.log.debug(" x[{0}:{1}] y[{2}:{3}]".format(xmin, xmax, ymin, ymax)) self.log.debug(" max = {0}".format(np.nanmax(idata))) self.log.debug( " total {0}, masked {1}, not masked {2}".format(total_pix, total_pix - non_nan_pix, non_nan_pix)) # Check to see that each component has some data within the central 3x3 pixels of it's location # If not then we don't fit that component for i in range(params['components'].value): prefix = "c{0}_".format(i) # figure out a box around the center of this cx, cy = params[prefix + 'xo'].value, params[prefix + 'yo'].value # central pixel coords self.log.debug(" comp {0}".format(i)) self.log.debug(" x0, y0 {0} {1}".format(cx, cy)) xmx = int(round(np.clip(cx + 2, 0, idata.shape[0]))) xmn = int(round(np.clip(cx - 1, 0, idata.shape[0]))) ymx = int(round(np.clip(cy + 2, 0, idata.shape[1]))) ymn = int(round(np.clip(cy - 1, 0, idata.shape[1]))) square = idata[xmn:xmx, ymn:ymx] # if there are no not-nan pixels in this region then don't vary any parameters if not np.any(np.isfinite(square)): self.log.debug(" not fitting component {0}".format(i)) params[prefix + 'amp'].value = np.nan for p in ['amp', 'xo', 'yo', 'sx', 'sy', 'theta']: params[prefix + p].vary = False params[prefix + p].stderr = np.nan # this results in an error of -1 later on params[prefix + 'flags'].value |= flags.NOTFIT # determine the number of free parameters and if we have enough data for a fit nfree = np.count_nonzero([params[p].vary for p in params.keys()]) self.log.debug(params) if nfree < 1: self.log.debug(" Island has no components to fit") result = DummyLM() model = params else: if non_nan_pix < nfree: self.log.debug("More free parameters {0} than available pixels {1}".format(nfree, non_nan_pix)) if non_nan_pix >= params['components'].value: self.log.debug("Fixing all parameters except amplitudes") for p in params.keys(): if 'amp' not in p: params[p].vary = False else: self.log.debug(" no not-masked pixels, skipping") continue # do the fit # if the pixel beam is not valid, then recalculate using the location of the last source to have a valid psf if pixbeam is None: if src_valid_psf is not None: pixbeam = global_data.psfhelper.get_pixbeam(src_valid_psf.ra, src_valid_psf.dec) else: self.log.critical("Cannot determine pixel beam") fac = 1 / np.sqrt(2) if self.global_data.docov: C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) B = Bmatrix(C) else: C = B = None errs = np.nanmax(rmsimg[int(xmin):int(xmax), int(ymin):int(ymax)]) result, _ = do_lmfit(idata, params, B=B) model = covar_errors(result.params, idata, errs=errs, B=B, C=C) # convert the results to a source object offsets = (xmin, xmax, ymin, ymax) # TODO allow for island fluxes in the refitting. island_data = IslandFittingData(inum, i=idata, offsets=offsets, doislandflux=False, scalars=(4, 4, None)) new_src = self.result_to_components(result, model, island_data, src.flags) for ns, s in zip(new_src, included_sources): # preserve the uuid so we can do exact matching between catalogs ns.uuid = s.uuid # flag the sources as having been priorized ns.flags |= flags.PRIORIZED # if the position wasn't fit then copy the errors from the input catalog if stage < 2: ns.err_ra = s.err_ra ns.err_dec = s.err_dec ns.flags |= flags.FIXED2PSF # if the shape wasn't fit then copy the errors from the input catalog if stage < 3: ns.err_a = s.err_a ns.err_b = s.err_b ns.err_pa = s.err_pa sources.extend(new_src) return sources
[ "def", "_refit_islands", "(", "self", ",", "group", ",", "stage", ",", "outerclip", "=", "None", ",", "istart", "=", "0", ")", ":", "global_data", "=", "self", ".", "global_data", "sources", "=", "[", "]", "data", "=", "global_data", ".", "data_pix", "...
Do island refitting (priorized fitting) on a group of islands. Parameters ---------- group : list A list of components grouped by island. stage : int Refitting stage. outerclip : float Ignored, placed holder for future development. istart : int The starting island number. Returns ------- sources : list List of sources (and islands).
[ "Do", "island", "refitting", "(", "priorized", "fitting", ")", "on", "a", "group", "of", "islands", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1020-L1268
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._fit_island
def _fit_island(self, island_data): """ Take an Island, do all the parameter estimation and fitting. Parameters ---------- island_data : :class:`AegeanTools.models.IslandFittingData` The island to be fit. Returns ------- sources : list The sources that were fit. """ global_data = self.global_data # global data dcurve = global_data.dcurve rmsimg = global_data.rmsimg # island data isle_num = island_data.isle_num idata = island_data.i innerclip, outerclip, max_summits = island_data.scalars xmin, xmax, ymin, ymax = island_data.offsets # get the beam parameters at the center of this island midra, middec = global_data.wcshelper.pix2sky([0.5 * (xmax + xmin), 0.5 * (ymax + ymin)]) beam = global_data.psfhelper.get_psf_pix(midra, middec) del middec, midra icurve = dcurve[xmin:xmax, ymin:ymax] rms = rmsimg[xmin:xmax, ymin:ymax] is_flag = 0 pixbeam = global_data.psfhelper.get_pixbeam_pixel((xmin + xmax) / 2., (ymin + ymax) / 2.) if pixbeam is None: # This island is not 'on' the sky, ignore it return [] self.log.debug("=====") self.log.debug("Island ({0})".format(isle_num)) params = self.estimate_lmfit_parinfo(idata, rms, icurve, beam, innerclip, outerclip, offsets=[xmin, ymin], max_summits=max_summits) # islands at the edge of a region of nans # result in no components if params is None or params['components'].value < 1: return [] self.log.debug("Rms is {0}".format(np.shape(rms))) self.log.debug("Isle is {0}".format(np.shape(idata))) self.log.debug(" of which {0} are masked".format(sum(np.isnan(idata).ravel() * 1))) # Check that there is enough data to do the fit mx, my = np.where(np.isfinite(idata)) non_blank_pix = len(mx) free_vars = len([1 for a in params.keys() if params[a].vary]) if non_blank_pix < free_vars or free_vars == 0: self.log.debug("Island {0} doesn't have enough pixels to fit the given model".format(isle_num)) self.log.debug("non_blank_pix {0}, free_vars {1}".format(non_blank_pix, free_vars)) result = DummyLM() model = params is_flag |= flags.NOTFIT else: # Model is the fitted parameters fac = 1 / np.sqrt(2) if self.global_data.docov: C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) B = Bmatrix(C) else: C = B = None self.log.debug( "C({0},{1},{2},{3},{4})".format(len(mx), len(my), pixbeam.a * FWHM2CC, pixbeam.b * FWHM2CC, pixbeam.pa)) errs = np.nanmax(rms) self.log.debug("Initial params") self.log.debug(params) result, _ = do_lmfit(idata, params, B=B) if not result.errorbars: is_flag |= flags.FITERR # get the real (sky) parameter errors model = covar_errors(result.params, idata, errs=errs, B=B, C=C) if self.global_data.dobias and self.global_data.docov: x, y = np.indices(idata.shape) acf = elliptical_gaussian(x, y, 1, 0, 0, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) bias_correct(model, idata, acf=acf * errs ** 2) if not result.success: is_flag |= flags.FITERR self.log.debug("Final params") self.log.debug(model) # convert the fitting results to a list of sources [and islands] sources = self.result_to_components(result, model, island_data, is_flag) return sources
python
def _fit_island(self, island_data): """ Take an Island, do all the parameter estimation and fitting. Parameters ---------- island_data : :class:`AegeanTools.models.IslandFittingData` The island to be fit. Returns ------- sources : list The sources that were fit. """ global_data = self.global_data # global data dcurve = global_data.dcurve rmsimg = global_data.rmsimg # island data isle_num = island_data.isle_num idata = island_data.i innerclip, outerclip, max_summits = island_data.scalars xmin, xmax, ymin, ymax = island_data.offsets # get the beam parameters at the center of this island midra, middec = global_data.wcshelper.pix2sky([0.5 * (xmax + xmin), 0.5 * (ymax + ymin)]) beam = global_data.psfhelper.get_psf_pix(midra, middec) del middec, midra icurve = dcurve[xmin:xmax, ymin:ymax] rms = rmsimg[xmin:xmax, ymin:ymax] is_flag = 0 pixbeam = global_data.psfhelper.get_pixbeam_pixel((xmin + xmax) / 2., (ymin + ymax) / 2.) if pixbeam is None: # This island is not 'on' the sky, ignore it return [] self.log.debug("=====") self.log.debug("Island ({0})".format(isle_num)) params = self.estimate_lmfit_parinfo(idata, rms, icurve, beam, innerclip, outerclip, offsets=[xmin, ymin], max_summits=max_summits) # islands at the edge of a region of nans # result in no components if params is None or params['components'].value < 1: return [] self.log.debug("Rms is {0}".format(np.shape(rms))) self.log.debug("Isle is {0}".format(np.shape(idata))) self.log.debug(" of which {0} are masked".format(sum(np.isnan(idata).ravel() * 1))) # Check that there is enough data to do the fit mx, my = np.where(np.isfinite(idata)) non_blank_pix = len(mx) free_vars = len([1 for a in params.keys() if params[a].vary]) if non_blank_pix < free_vars or free_vars == 0: self.log.debug("Island {0} doesn't have enough pixels to fit the given model".format(isle_num)) self.log.debug("non_blank_pix {0}, free_vars {1}".format(non_blank_pix, free_vars)) result = DummyLM() model = params is_flag |= flags.NOTFIT else: # Model is the fitted parameters fac = 1 / np.sqrt(2) if self.global_data.docov: C = Cmatrix(mx, my, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) B = Bmatrix(C) else: C = B = None self.log.debug( "C({0},{1},{2},{3},{4})".format(len(mx), len(my), pixbeam.a * FWHM2CC, pixbeam.b * FWHM2CC, pixbeam.pa)) errs = np.nanmax(rms) self.log.debug("Initial params") self.log.debug(params) result, _ = do_lmfit(idata, params, B=B) if not result.errorbars: is_flag |= flags.FITERR # get the real (sky) parameter errors model = covar_errors(result.params, idata, errs=errs, B=B, C=C) if self.global_data.dobias and self.global_data.docov: x, y = np.indices(idata.shape) acf = elliptical_gaussian(x, y, 1, 0, 0, pixbeam.a * FWHM2CC * fac, pixbeam.b * FWHM2CC * fac, pixbeam.pa) bias_correct(model, idata, acf=acf * errs ** 2) if not result.success: is_flag |= flags.FITERR self.log.debug("Final params") self.log.debug(model) # convert the fitting results to a list of sources [and islands] sources = self.result_to_components(result, model, island_data, is_flag) return sources
[ "def", "_fit_island", "(", "self", ",", "island_data", ")", ":", "global_data", "=", "self", ".", "global_data", "# global data", "dcurve", "=", "global_data", ".", "dcurve", "rmsimg", "=", "global_data", ".", "rmsimg", "# island data", "isle_num", "=", "island_...
Take an Island, do all the parameter estimation and fitting. Parameters ---------- island_data : :class:`AegeanTools.models.IslandFittingData` The island to be fit. Returns ------- sources : list The sources that were fit.
[ "Take", "an", "Island", "do", "all", "the", "parameter", "estimation", "and", "fitting", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1270-L1370
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder._fit_islands
def _fit_islands(self, islands): """ Execute fitting on a list of islands This function just wraps around fit_island, so that when we do multiprocesing a single process will fit multiple islands before returning results. Parameters ---------- islands : list of :class:`AegeanTools.models.IslandFittingData` The islands to be fit. Returns ------- sources : list The sources that were fit. """ self.log.debug("Fitting group of {0} islands".format(len(islands))) sources = [] for island in islands: res = self._fit_island(island) sources.extend(res) return sources
python
def _fit_islands(self, islands): """ Execute fitting on a list of islands This function just wraps around fit_island, so that when we do multiprocesing a single process will fit multiple islands before returning results. Parameters ---------- islands : list of :class:`AegeanTools.models.IslandFittingData` The islands to be fit. Returns ------- sources : list The sources that were fit. """ self.log.debug("Fitting group of {0} islands".format(len(islands))) sources = [] for island in islands: res = self._fit_island(island) sources.extend(res) return sources
[ "def", "_fit_islands", "(", "self", ",", "islands", ")", ":", "self", ".", "log", ".", "debug", "(", "\"Fitting group of {0} islands\"", ".", "format", "(", "len", "(", "islands", ")", ")", ")", "sources", "=", "[", "]", "for", "island", "in", "islands",...
Execute fitting on a list of islands This function just wraps around fit_island, so that when we do multiprocesing a single process will fit multiple islands before returning results. Parameters ---------- islands : list of :class:`AegeanTools.models.IslandFittingData` The islands to be fit. Returns ------- sources : list The sources that were fit.
[ "Execute", "fitting", "on", "a", "list", "of", "islands", "This", "function", "just", "wraps", "around", "fit_island", "so", "that", "when", "we", "do", "multiprocesing", "a", "single", "process", "will", "fit", "multiple", "islands", "before", "returning", "r...
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1372-L1394
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.find_sources_in_image
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5, outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False, nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False, docov=True, cube_index=None): """ Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found. """ # Tell numpy to be quiet np.seterr(invalid='ignore') if cores is not None: if not (cores >= 1): raise AssertionError("cores must be one or more") self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores, verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index) global_data = self.global_data rmsimg = global_data.rmsimg data = global_data.data_pix self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format( global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa)) # stop people from doing silly things. if outerclip > innerclip: outerclip = innerclip self.log.info("seedclip={0}".format(innerclip)) self.log.info("floodclip={0}".format(outerclip)) isle_num = 0 if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands)) island_group = [] group_size = 20 for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True): # ignore empty islands # This should now be impossible to trigger if np.size(i) < 1: self.log.warn("Empty island detected, this should be imposisble.") continue isle_num += 1 scalars = (innerclip, outerclip, max_summits) offsets = (xmin, xmax, ymin, ymax) island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux) # If cores==1 run fitting in main process. Otherwise build up groups of islands # and submit to queue for subprocesses. Passing a group of islands is more # efficient than passing single islands to the subprocesses. if cores == 1: res = self._fit_island(island_data) queue.append(res) else: island_group.append(island_data) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: fit_parallel(island_group) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: fit_parallel(island_group) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) sources = [] for srcs in queue: if srcs: # ignore empty lists for src in srcs: # ignore sources that we have been told to ignore if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative): continue sources.append(src) if outfile: print(str(src), file=outfile) self.sources.extend(sources) return sources
python
def find_sources_in_image(self, filename, hdu_index=0, outfile=None, rms=None, bkg=None, max_summits=None, innerclip=5, outerclip=4, cores=None, rmsin=None, bkgin=None, beam=None, doislandflux=False, nopositive=False, nonegative=False, mask=None, lat=None, imgpsf=None, blank=False, docov=True, cube_index=None): """ Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found. """ # Tell numpy to be quiet np.seterr(invalid='ignore') if cores is not None: if not (cores >= 1): raise AssertionError("cores must be one or more") self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, beam=beam, rms=rms, bkg=bkg, cores=cores, verb=True, mask=mask, lat=lat, psf=imgpsf, blank=blank, docov=docov, cube_index=cube_index) global_data = self.global_data rmsimg = global_data.rmsimg data = global_data.data_pix self.log.info("beam = {0:5.2f}'' x {1:5.2f}'' at {2:5.2f}deg".format( global_data.beam.a * 3600, global_data.beam.b * 3600, global_data.beam.pa)) # stop people from doing silly things. if outerclip > innerclip: outerclip = innerclip self.log.info("seedclip={0}".format(innerclip)) self.log.info("floodclip={0}".format(outerclip)) isle_num = 0 if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._fit_islands)) island_group = [] group_size = 20 for i, xmin, xmax, ymin, ymax in self._gen_flood_wrap(data, rmsimg, innerclip, outerclip, domask=True): # ignore empty islands # This should now be impossible to trigger if np.size(i) < 1: self.log.warn("Empty island detected, this should be imposisble.") continue isle_num += 1 scalars = (innerclip, outerclip, max_summits) offsets = (xmin, xmax, ymin, ymax) island_data = IslandFittingData(isle_num, i, scalars, offsets, doislandflux) # If cores==1 run fitting in main process. Otherwise build up groups of islands # and submit to queue for subprocesses. Passing a group of islands is more # efficient than passing single islands to the subprocesses. if cores == 1: res = self._fit_island(island_data) queue.append(res) else: island_group.append(island_data) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: fit_parallel(island_group) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: fit_parallel(island_group) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) sources = [] for srcs in queue: if srcs: # ignore empty lists for src in srcs: # ignore sources that we have been told to ignore if (src.peak_flux > 0 and nopositive) or (src.peak_flux < 0 and nonegative): continue sources.append(src) if outfile: print(str(src), file=outfile) self.sources.extend(sources) return sources
[ "def", "find_sources_in_image", "(", "self", ",", "filename", ",", "hdu_index", "=", "0", ",", "outfile", "=", "None", ",", "rms", "=", "None", ",", "bkg", "=", "None", ",", "max_summits", "=", "None", ",", "innerclip", "=", "5", ",", "outerclip", "=",...
Run the Aegean source finder. Parameters ---------- filename : str or HDUList Image filename or HDUList. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rms : float Use this rms for the entire image (will also assume that background is 0) max_summits : int Fit up to this many components to each island (extras are included but not fit) innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). cores : int Number of CPU cores to use. None means all cores. rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. doislandflux : bool If True then each island will also be characterized. nopositive, nonegative : bool Whether to return positive or negative sources. Default nopositive=False, nonegative=True. mask : str The filename of a region file created by MIMAS. Islands outside of this region will be ignored. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. blank : bool Cause the output image to be blanked where islands are found. docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, cube_index determines which slice is used. Returns ------- sources : list List of sources found.
[ "Run", "the", "Aegean", "source", "finder", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1396-L1540
train
PaulHancock/Aegean
AegeanTools/source_finder.py
SourceFinder.priorized_fit_islands
def priorized_fit_islands(self, filename, catalogue, hdu_index=0, outfile=None, bkgin=None, rmsin=None, cores=1, rms=None, bkg=None, beam=None, lat=None, imgpsf=None, catpsf=None, stage=3, ratio=None, outerclip=3, doregroup=True, docov=True, cube_index=None): """ Take an input catalog, and image, and optional background/noise images fit the flux and ra/dec for each of the given sources, keeping the morphology fixed if doregroup is true the groups will be recreated based on a matching radius/probability. if doregroup is false then the islands of the input catalog will be preserved. Multiple cores can be specified, and will be used. Parameters ---------- filename : str or HDUList Image filename or HDUList. catalogue : str or list Input catalogue file name or list of OutputSource objects. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. cores : int Number of CPU cores to use. None means all cores. rms : float Use this rms for the entire image (will also assume that background is 0) beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. catpsf : str or HDUList Filename or HDUList for the catalogue psf image. stage : int Refitting stage ratio : float If not None - ratio of image psf to catalog psf, otherwise interpret from catalogue or image if possible innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, slice determines which slice is used. Returns ------- sources : list List of sources measured. """ from AegeanTools.cluster import regroup self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, rms=rms, bkg=bkg, cores=cores, verb=True, do_curve=False, beam=beam, lat=lat, psf=imgpsf, docov=docov, cube_index=cube_index) global_data = self.global_data far = 10 * global_data.beam.a # degrees # load the table and convert to an input source list if isinstance(catalogue, six.string_types): input_table = load_table(catalogue) input_sources = np.array(table_to_source_list(input_table)) else: input_sources = np.array(catalogue) if len(input_sources) < 1: self.log.debug("No input sources for priorized fitting") return [] # reject sources with missing params ok = True for param in ['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']: if np.isnan(getattr(input_sources[0], param)): self.log.info("Source 0, is missing param '{0}'".format(param)) ok = False if not ok: self.log.error("Missing parameters! Not fitting.") self.log.error("Maybe your table is missing or mis-labeled columns?") return [] del ok src_mask = np.ones(len(input_sources), dtype=bool) # check to see if the input catalog contains psf information has_psf = getattr(input_sources[0], 'psf_a', None) is not None # the input sources are the initial conditions for our fits. # Expand each source size if needed. # If ratio is provided we just the psf by this amount if ratio is not None: self.log.info("Using ratio of {0} to scale input source shapes".format(ratio)) far *= ratio for i, src in enumerate(input_sources): # Sources with an unknown psf are rejected as they are either outside the image # or outside the region covered by the psf skybeam = global_data.psfhelper.get_beam(src.ra, src.dec) if skybeam is None: src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src)) continue # the new source size is the previous size, convolved with the expanded psf src.a = np.sqrt(src.a ** 2 + (skybeam.a * 3600) ** 2 * (1 - 1 / ratio ** 2)) src.b = np.sqrt(src.b ** 2 + (skybeam.b * 3600) ** 2 * (1 - 1 / ratio ** 2)) # source with funky a/b are also rejected if not np.all(np.isfinite((src.a, src.b))): self.log.info("Excluding source ({0.island},{0.source}) due to funky psf ({0.a},{0.b},{0.pa})".format(src)) src_mask[i] = False # if we know the psf from the input catalogue (has_psf), or if it was provided via a psf map # then we use that psf. elif catpsf is not None or has_psf: if catpsf is not None: self.log.info("Using catalog PSF from {0}".format(catpsf)) psf_helper = PSFHelper(catpsf, None) # might need to set the WCSHelper to be not None else: self.log.info("Using catalog PSF from input catalog") psf_helper = None for i, src in enumerate(input_sources): if (src.psf_a <=0) or (src.psf_b <=0): src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to psf_a/b <=0".format(src)) continue if has_psf: catbeam = Beam(src.psf_a / 3600, src.psf_b / 3600, src.psf_pa) else: catbeam = psf_helper.get_beam(src.ra, src.dec) imbeam = global_data.psfhelper.get_beam(src.ra, src.dec) # If either of the above are None then we skip this source. if catbeam is None or imbeam is None: src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src)) continue # TODO: The following assumes that the various psf's are scaled versions of each other # and makes no account for differing position angles. This needs to be checked and/or addressed. # deconvolve the source shape from the catalogue psf src.a = (src.a / 3600) ** 2 - catbeam.a ** 2 + imbeam.a ** 2 # degrees # clip the minimum source shape to be the image psf if src.a < 0: src.a = imbeam.a * 3600 # arcsec else: src.a = np.sqrt(src.a) * 3600 # arcsec src.b = (src.b / 3600) ** 2 - catbeam.b ** 2 + imbeam.b ** 2 if src.b < 0: src.b = imbeam.b * 3600 # arcsec else: src.b = np.sqrt(src.b) * 3600 # arcsec else: self.log.info("Not scaling input source sizes") self.log.info("{0} sources in catalog".format(len(input_sources))) self.log.info("{0} sources accepted".format(sum(src_mask))) if len(src_mask) < 1: self.log.debug("No sources accepted for priorized fitting") return [] input_sources = input_sources[src_mask] # redo the grouping if required if doregroup: groups = regroup(input_sources, eps=np.sqrt(2), far=far) else: groups = list(island_itergen(input_sources)) if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._refit_islands)) sources = [] island_group = [] group_size = 20 for i, island in enumerate(groups): island_group.append(island) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: if cores > 1: fit_parallel(island_group, stage, outerclip, istart=i) else: res = self._refit_islands(island_group, stage, outerclip, istart=i) queue.append(res) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: if cores > 1: fit_parallel(island_group, stage, outerclip, istart=i) else: res = self._refit_islands(island_group, stage, outerclip, istart=i) queue.append(res) # now unpack the fitting results in to a list of sources for s in queue: sources.extend(s) sources = sorted(sources) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) components = 0 for source in sources: if isinstance(source, OutputSource): components += 1 if outfile: print(str(source), file=outfile) self.log.info("fit {0} components".format(components)) self.sources.extend(sources) return sources
python
def priorized_fit_islands(self, filename, catalogue, hdu_index=0, outfile=None, bkgin=None, rmsin=None, cores=1, rms=None, bkg=None, beam=None, lat=None, imgpsf=None, catpsf=None, stage=3, ratio=None, outerclip=3, doregroup=True, docov=True, cube_index=None): """ Take an input catalog, and image, and optional background/noise images fit the flux and ra/dec for each of the given sources, keeping the morphology fixed if doregroup is true the groups will be recreated based on a matching radius/probability. if doregroup is false then the islands of the input catalog will be preserved. Multiple cores can be specified, and will be used. Parameters ---------- filename : str or HDUList Image filename or HDUList. catalogue : str or list Input catalogue file name or list of OutputSource objects. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. cores : int Number of CPU cores to use. None means all cores. rms : float Use this rms for the entire image (will also assume that background is 0) beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. catpsf : str or HDUList Filename or HDUList for the catalogue psf image. stage : int Refitting stage ratio : float If not None - ratio of image psf to catalog psf, otherwise interpret from catalogue or image if possible innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, slice determines which slice is used. Returns ------- sources : list List of sources measured. """ from AegeanTools.cluster import regroup self.load_globals(filename, hdu_index=hdu_index, bkgin=bkgin, rmsin=rmsin, rms=rms, bkg=bkg, cores=cores, verb=True, do_curve=False, beam=beam, lat=lat, psf=imgpsf, docov=docov, cube_index=cube_index) global_data = self.global_data far = 10 * global_data.beam.a # degrees # load the table and convert to an input source list if isinstance(catalogue, six.string_types): input_table = load_table(catalogue) input_sources = np.array(table_to_source_list(input_table)) else: input_sources = np.array(catalogue) if len(input_sources) < 1: self.log.debug("No input sources for priorized fitting") return [] # reject sources with missing params ok = True for param in ['ra', 'dec', 'peak_flux', 'a', 'b', 'pa']: if np.isnan(getattr(input_sources[0], param)): self.log.info("Source 0, is missing param '{0}'".format(param)) ok = False if not ok: self.log.error("Missing parameters! Not fitting.") self.log.error("Maybe your table is missing or mis-labeled columns?") return [] del ok src_mask = np.ones(len(input_sources), dtype=bool) # check to see if the input catalog contains psf information has_psf = getattr(input_sources[0], 'psf_a', None) is not None # the input sources are the initial conditions for our fits. # Expand each source size if needed. # If ratio is provided we just the psf by this amount if ratio is not None: self.log.info("Using ratio of {0} to scale input source shapes".format(ratio)) far *= ratio for i, src in enumerate(input_sources): # Sources with an unknown psf are rejected as they are either outside the image # or outside the region covered by the psf skybeam = global_data.psfhelper.get_beam(src.ra, src.dec) if skybeam is None: src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src)) continue # the new source size is the previous size, convolved with the expanded psf src.a = np.sqrt(src.a ** 2 + (skybeam.a * 3600) ** 2 * (1 - 1 / ratio ** 2)) src.b = np.sqrt(src.b ** 2 + (skybeam.b * 3600) ** 2 * (1 - 1 / ratio ** 2)) # source with funky a/b are also rejected if not np.all(np.isfinite((src.a, src.b))): self.log.info("Excluding source ({0.island},{0.source}) due to funky psf ({0.a},{0.b},{0.pa})".format(src)) src_mask[i] = False # if we know the psf from the input catalogue (has_psf), or if it was provided via a psf map # then we use that psf. elif catpsf is not None or has_psf: if catpsf is not None: self.log.info("Using catalog PSF from {0}".format(catpsf)) psf_helper = PSFHelper(catpsf, None) # might need to set the WCSHelper to be not None else: self.log.info("Using catalog PSF from input catalog") psf_helper = None for i, src in enumerate(input_sources): if (src.psf_a <=0) or (src.psf_b <=0): src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to psf_a/b <=0".format(src)) continue if has_psf: catbeam = Beam(src.psf_a / 3600, src.psf_b / 3600, src.psf_pa) else: catbeam = psf_helper.get_beam(src.ra, src.dec) imbeam = global_data.psfhelper.get_beam(src.ra, src.dec) # If either of the above are None then we skip this source. if catbeam is None or imbeam is None: src_mask[i] = False self.log.info("Excluding source ({0.island},{0.source}) due to lack of psf knowledge".format(src)) continue # TODO: The following assumes that the various psf's are scaled versions of each other # and makes no account for differing position angles. This needs to be checked and/or addressed. # deconvolve the source shape from the catalogue psf src.a = (src.a / 3600) ** 2 - catbeam.a ** 2 + imbeam.a ** 2 # degrees # clip the minimum source shape to be the image psf if src.a < 0: src.a = imbeam.a * 3600 # arcsec else: src.a = np.sqrt(src.a) * 3600 # arcsec src.b = (src.b / 3600) ** 2 - catbeam.b ** 2 + imbeam.b ** 2 if src.b < 0: src.b = imbeam.b * 3600 # arcsec else: src.b = np.sqrt(src.b) * 3600 # arcsec else: self.log.info("Not scaling input source sizes") self.log.info("{0} sources in catalog".format(len(input_sources))) self.log.info("{0} sources accepted".format(sum(src_mask))) if len(src_mask) < 1: self.log.debug("No sources accepted for priorized fitting") return [] input_sources = input_sources[src_mask] # redo the grouping if required if doregroup: groups = regroup(input_sources, eps=np.sqrt(2), far=far) else: groups = list(island_itergen(input_sources)) if cores == 1: # single-threaded, no parallel processing queue = [] else: queue = pprocess.Queue(limit=cores, reuse=1) fit_parallel = queue.manage(pprocess.MakeReusable(self._refit_islands)) sources = [] island_group = [] group_size = 20 for i, island in enumerate(groups): island_group.append(island) # If the island group is full queue it for the subprocesses to fit if len(island_group) >= group_size: if cores > 1: fit_parallel(island_group, stage, outerclip, istart=i) else: res = self._refit_islands(island_group, stage, outerclip, istart=i) queue.append(res) island_group = [] # The last partially-filled island group also needs to be queued for fitting if len(island_group) > 0: if cores > 1: fit_parallel(island_group, stage, outerclip, istart=i) else: res = self._refit_islands(island_group, stage, outerclip, istart=i) queue.append(res) # now unpack the fitting results in to a list of sources for s in queue: sources.extend(s) sources = sorted(sources) # Write the output to the output file if outfile: print(header.format("{0}-({1})".format(__version__, __date__), filename), file=outfile) print(OutputSource.header, file=outfile) components = 0 for source in sources: if isinstance(source, OutputSource): components += 1 if outfile: print(str(source), file=outfile) self.log.info("fit {0} components".format(components)) self.sources.extend(sources) return sources
[ "def", "priorized_fit_islands", "(", "self", ",", "filename", ",", "catalogue", ",", "hdu_index", "=", "0", ",", "outfile", "=", "None", ",", "bkgin", "=", "None", ",", "rmsin", "=", "None", ",", "cores", "=", "1", ",", "rms", "=", "None", ",", "bkg"...
Take an input catalog, and image, and optional background/noise images fit the flux and ra/dec for each of the given sources, keeping the morphology fixed if doregroup is true the groups will be recreated based on a matching radius/probability. if doregroup is false then the islands of the input catalog will be preserved. Multiple cores can be specified, and will be used. Parameters ---------- filename : str or HDUList Image filename or HDUList. catalogue : str or list Input catalogue file name or list of OutputSource objects. hdu_index : int The index of the FITS HDU (extension). outfile : str file for printing catalog (NOT a table, just a text file of my own design) rmsin, bkgin : str or HDUList Filename or HDUList for the noise and background images. If either are None, then it will be calculated internally. cores : int Number of CPU cores to use. None means all cores. rms : float Use this rms for the entire image (will also assume that background is 0) beam : (major, minor, pa) Floats representing the synthesised beam (degrees). Replaces whatever is given in the FITS header. If the FITS header has no BMAJ/BMIN then this is required. lat : float The latitude of the telescope (declination of zenith). imgpsf : str or HDUList Filename or HDUList for a psf image. catpsf : str or HDUList Filename or HDUList for the catalogue psf image. stage : int Refitting stage ratio : float If not None - ratio of image psf to catalog psf, otherwise interpret from catalogue or image if possible innerclip, outerclip : float The seed (inner) and flood (outer) clipping level (sigmas). docov : bool If True then include covariance matrix in the fitting process. (default=True) cube_index : int For image cubes, slice determines which slice is used. Returns ------- sources : list List of sources measured.
[ "Take", "an", "input", "catalog", "and", "image", "and", "optional", "background", "/", "noise", "images", "fit", "the", "flux", "and", "ra", "/", "dec", "for", "each", "of", "the", "given", "sources", "keeping", "the", "morphology", "fixed" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/source_finder.py#L1542-L1782
train
PaulHancock/Aegean
AegeanTools/catalogs.py
check_table_formats
def check_table_formats(files): """ Determine whether a list of files are of a recognizable output type. Parameters ---------- files : str A list of file names Returns ------- result : bool True if *all* the file names are supported """ cont = True formats = get_table_formats() for t in files.split(','): _, ext = os.path.splitext(t) ext = ext[1:].lower() if ext not in formats: cont = False log.warn("Format not supported for {0} ({1})".format(t, ext)) if not cont: log.error("Invalid table format specified.") return cont
python
def check_table_formats(files): """ Determine whether a list of files are of a recognizable output type. Parameters ---------- files : str A list of file names Returns ------- result : bool True if *all* the file names are supported """ cont = True formats = get_table_formats() for t in files.split(','): _, ext = os.path.splitext(t) ext = ext[1:].lower() if ext not in formats: cont = False log.warn("Format not supported for {0} ({1})".format(t, ext)) if not cont: log.error("Invalid table format specified.") return cont
[ "def", "check_table_formats", "(", "files", ")", ":", "cont", "=", "True", "formats", "=", "get_table_formats", "(", ")", "for", "t", "in", "files", ".", "split", "(", "','", ")", ":", "_", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", ...
Determine whether a list of files are of a recognizable output type. Parameters ---------- files : str A list of file names Returns ------- result : bool True if *all* the file names are supported
[ "Determine", "whether", "a", "list", "of", "files", "are", "of", "a", "recognizable", "output", "type", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L45-L69
train
PaulHancock/Aegean
AegeanTools/catalogs.py
show_formats
def show_formats(): """ Print a list of all the file formats that are supported for writing. The file formats are determined by their extensions. Returns ------- None """ fmts = { "ann": "Kvis annotation", "reg": "DS9 regions file", "fits": "FITS Binary Table", "csv": "Comma separated values", "tab": "tabe separated values", "tex": "LaTeX table format", "html": "HTML table", "vot": "VO-Table", "xml": "VO-Table", "db": "Sqlite3 database", "sqlite": "Sqlite3 database"} supported = get_table_formats() print("Extension | Description | Supported?") for k in sorted(fmts.keys()): print("{0:10s} {1:24s} {2}".format(k, fmts[k], k in supported)) return
python
def show_formats(): """ Print a list of all the file formats that are supported for writing. The file formats are determined by their extensions. Returns ------- None """ fmts = { "ann": "Kvis annotation", "reg": "DS9 regions file", "fits": "FITS Binary Table", "csv": "Comma separated values", "tab": "tabe separated values", "tex": "LaTeX table format", "html": "HTML table", "vot": "VO-Table", "xml": "VO-Table", "db": "Sqlite3 database", "sqlite": "Sqlite3 database"} supported = get_table_formats() print("Extension | Description | Supported?") for k in sorted(fmts.keys()): print("{0:10s} {1:24s} {2}".format(k, fmts[k], k in supported)) return
[ "def", "show_formats", "(", ")", ":", "fmts", "=", "{", "\"ann\"", ":", "\"Kvis annotation\"", ",", "\"reg\"", ":", "\"DS9 regions file\"", ",", "\"fits\"", ":", "\"FITS Binary Table\"", ",", "\"csv\"", ":", "\"Comma separated values\"", ",", "\"tab\"", ":", "\"ta...
Print a list of all the file formats that are supported for writing. The file formats are determined by their extensions. Returns ------- None
[ "Print", "a", "list", "of", "all", "the", "file", "formats", "that", "are", "supported", "for", "writing", ".", "The", "file", "formats", "are", "determined", "by", "their", "extensions", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L72-L97
train
PaulHancock/Aegean
AegeanTools/catalogs.py
update_meta_data
def update_meta_data(meta=None): """ Modify the metadata dictionary. DATE, PROGRAM, and PROGVER are added/modified. Parameters ---------- meta : dict The dictionary to be modified, default = None (empty) Returns ------- An updated dictionary. """ if meta is None: meta = {} if 'DATE' not in meta: meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime()) if 'PROGRAM' not in meta: meta['PROGRAM'] = "AegeanTools.catalogs" meta['PROGVER'] = "{0}-({1})".format(__version__, __date__) return meta
python
def update_meta_data(meta=None): """ Modify the metadata dictionary. DATE, PROGRAM, and PROGVER are added/modified. Parameters ---------- meta : dict The dictionary to be modified, default = None (empty) Returns ------- An updated dictionary. """ if meta is None: meta = {} if 'DATE' not in meta: meta['DATE'] = strftime("%Y-%m-%d %H:%M:%S", gmtime()) if 'PROGRAM' not in meta: meta['PROGRAM'] = "AegeanTools.catalogs" meta['PROGVER'] = "{0}-({1})".format(__version__, __date__) return meta
[ "def", "update_meta_data", "(", "meta", "=", "None", ")", ":", "if", "meta", "is", "None", ":", "meta", "=", "{", "}", "if", "'DATE'", "not", "in", "meta", ":", "meta", "[", "'DATE'", "]", "=", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ",", "gmtime", ...
Modify the metadata dictionary. DATE, PROGRAM, and PROGVER are added/modified. Parameters ---------- meta : dict The dictionary to be modified, default = None (empty) Returns ------- An updated dictionary.
[ "Modify", "the", "metadata", "dictionary", ".", "DATE", "PROGRAM", "and", "PROGVER", "are", "added", "/", "modified", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L121-L142
train
PaulHancock/Aegean
AegeanTools/catalogs.py
save_catalog
def save_catalog(filename, catalog, meta=None, prefix=None): """ Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None """ ascii_table_formats = {'csv': 'csv', 'tab': 'tab', 'tex': 'latex', 'html': 'html'} # .ann and .reg are handled by me meta = update_meta_data(meta) extension = os.path.splitext(filename)[1][1:].lower() if extension in ['ann', 'reg']: writeAnn(filename, catalog, extension) elif extension in ['db', 'sqlite']: writeDB(filename, catalog, meta) elif extension in ['hdf5', 'fits', 'vo', 'vot', 'xml']: write_catalog(filename, catalog, extension, meta, prefix=prefix) elif extension in ascii_table_formats.keys(): write_catalog(filename, catalog, fmt=ascii_table_formats[extension], meta=meta, prefix=prefix) else: log.warning("extension not recognised {0}".format(extension)) log.warning("You get tab format") write_catalog(filename, catalog, fmt='tab', prefix=prefix) return
python
def save_catalog(filename, catalog, meta=None, prefix=None): """ Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None """ ascii_table_formats = {'csv': 'csv', 'tab': 'tab', 'tex': 'latex', 'html': 'html'} # .ann and .reg are handled by me meta = update_meta_data(meta) extension = os.path.splitext(filename)[1][1:].lower() if extension in ['ann', 'reg']: writeAnn(filename, catalog, extension) elif extension in ['db', 'sqlite']: writeDB(filename, catalog, meta) elif extension in ['hdf5', 'fits', 'vo', 'vot', 'xml']: write_catalog(filename, catalog, extension, meta, prefix=prefix) elif extension in ascii_table_formats.keys(): write_catalog(filename, catalog, fmt=ascii_table_formats[extension], meta=meta, prefix=prefix) else: log.warning("extension not recognised {0}".format(extension)) log.warning("You get tab format") write_catalog(filename, catalog, fmt='tab', prefix=prefix) return
[ "def", "save_catalog", "(", "filename", ",", "catalog", ",", "meta", "=", "None", ",", "prefix", "=", "None", ")", ":", "ascii_table_formats", "=", "{", "'csv'", ":", "'csv'", ",", "'tab'", ":", "'tab'", ",", "'tex'", ":", "'latex'", ",", "'html'", ":"...
Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None
[ "Save", "a", "catalogue", "of", "sources", "using", "filename", "as", "a", "model", ".", "Meta", "data", "can", "be", "written", "to", "some", "file", "types", "(", "fits", "votable", ")", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L145-L194
train
PaulHancock/Aegean
AegeanTools/catalogs.py
load_catalog
def load_catalog(filename): """ Load a catalogue and extract the source positions (only) Parameters ---------- filename : str Filename to read. Supported types are csv, tab, tex, vo, vot, and xml. Returns ------- catalogue : list A list of [ (ra, dec), ...] """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) catalog = list(zip(t.columns['ra'], t.columns['dec'])) elif fmt in ['vo', 'vot', 'xml'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = parse_single_table(filename) catalog = list(zip(t.array['ra'].tolist(), t.array['dec'].tolist())) else: log.info("Assuming ascii format, reading first two columns") lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')] try: catalog = [(float(a[0]), float(a[1])) for a in lines] except: log.error("Expecting two columns of floats but failed to parse") log.error("Catalog file {0} not loaded".format(filename)) raise Exception("Could not determine file format") return catalog
python
def load_catalog(filename): """ Load a catalogue and extract the source positions (only) Parameters ---------- filename : str Filename to read. Supported types are csv, tab, tex, vo, vot, and xml. Returns ------- catalogue : list A list of [ (ra, dec), ...] """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) catalog = list(zip(t.columns['ra'], t.columns['dec'])) elif fmt in ['vo', 'vot', 'xml'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = parse_single_table(filename) catalog = list(zip(t.array['ra'].tolist(), t.array['dec'].tolist())) else: log.info("Assuming ascii format, reading first two columns") lines = [a.strip().split() for a in open(filename, 'r').readlines() if not a.startswith('#')] try: catalog = [(float(a[0]), float(a[1])) for a in lines] except: log.error("Expecting two columns of floats but failed to parse") log.error("Catalog file {0} not loaded".format(filename)) raise Exception("Could not determine file format") return catalog
[ "def", "load_catalog", "(", "filename", ")", ":", "supported", "=", "get_table_formats", "(", ")", "fmt", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "-", "1", "]", "[", "1", ":", "]", ".", "lower", "(", ")", "# extension sans...
Load a catalogue and extract the source positions (only) Parameters ---------- filename : str Filename to read. Supported types are csv, tab, tex, vo, vot, and xml. Returns ------- catalogue : list A list of [ (ra, dec), ...]
[ "Load", "a", "catalogue", "and", "extract", "the", "source", "positions", "(", "only", ")" ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L197-L236
train
PaulHancock/Aegean
AegeanTools/catalogs.py
load_table
def load_table(filename): """ Load a table from a given file. Supports csv, tab, tex, vo, vot, xml, fits, and hdf5. Parameters ---------- filename : str File to read Returns ------- table : Table Table of data. """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) elif fmt in ['vo', 'vot', 'xml', 'fits', 'hdf5'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = Table.read(filename) else: log.error("Table format not recognized or supported") log.error("{0} [{1}]".format(filename, fmt)) raise Exception("Table format not recognized or supported") return t
python
def load_table(filename): """ Load a table from a given file. Supports csv, tab, tex, vo, vot, xml, fits, and hdf5. Parameters ---------- filename : str File to read Returns ------- table : Table Table of data. """ supported = get_table_formats() fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' if fmt in ['csv', 'tab', 'tex'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = ascii.read(filename) elif fmt in ['vo', 'vot', 'xml', 'fits', 'hdf5'] and fmt in supported: log.info("Reading file {0}".format(filename)) t = Table.read(filename) else: log.error("Table format not recognized or supported") log.error("{0} [{1}]".format(filename, fmt)) raise Exception("Table format not recognized or supported") return t
[ "def", "load_table", "(", "filename", ")", ":", "supported", "=", "get_table_formats", "(", ")", "fmt", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "-", "1", "]", "[", "1", ":", "]", ".", "lower", "(", ")", "# extension sans '...
Load a table from a given file. Supports csv, tab, tex, vo, vot, xml, fits, and hdf5. Parameters ---------- filename : str File to read Returns ------- table : Table Table of data.
[ "Load", "a", "table", "from", "a", "given", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L239-L269
train
PaulHancock/Aegean
AegeanTools/catalogs.py
write_table
def write_table(table, filename): """ Write a table to a file. Parameters ---------- table : Table Table to be written filename : str Destination for saving table. Returns ------- None """ try: if os.path.exists(filename): os.remove(filename) table.write(filename) log.info("Wrote {0}".format(filename)) except Exception as e: if "Format could not be identified" not in e.message: raise e else: fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' raise Exception("Cannot auto-determine format for {0}".format(fmt)) return
python
def write_table(table, filename): """ Write a table to a file. Parameters ---------- table : Table Table to be written filename : str Destination for saving table. Returns ------- None """ try: if os.path.exists(filename): os.remove(filename) table.write(filename) log.info("Wrote {0}".format(filename)) except Exception as e: if "Format could not be identified" not in e.message: raise e else: fmt = os.path.splitext(filename)[-1][1:].lower() # extension sans '.' raise Exception("Cannot auto-determine format for {0}".format(fmt)) return
[ "def", "write_table", "(", "table", ",", "filename", ")", ":", "try", ":", "if", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "os", ".", "remove", "(", "filename", ")", "table", ".", "write", "(", "filename", ")", "log", ".", "info"...
Write a table to a file. Parameters ---------- table : Table Table to be written filename : str Destination for saving table. Returns ------- None
[ "Write", "a", "table", "to", "a", "file", "." ]
185d2b4a51b48441a1df747efc9a5271c79399fd
https://github.com/PaulHancock/Aegean/blob/185d2b4a51b48441a1df747efc9a5271c79399fd/AegeanTools/catalogs.py#L272-L299
train