text
stringlengths
81
112k
Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number. def annotate(self, framedata): """Annotates the processed axis with given annotations for the provided framedata. Args: framedata: The current frame number. """ for artist in self.annotation_artists: artist.remove() self.annotation_artists = [] for annotation in self.annotations: if annotation[2] > framedata: return if annotation[2] == framedata: pos = annotation[0:2] shape = self.annotations_default['shape'] color = self.annotations_default['color'] size = self.annotations_default['size'] line = self.annotations_default['line'] if len(annotation) > 3: shape = annotation[3].get('shape', shape) color = annotation[3].get('color', color) size = annotation[3].get('size', size) line = annotation[3].get('line', line) if shape == 'CIRC' and hasattr(size, '__len__'): size = 30 if not hasattr(color, '__len__'): color = (color,) * 3 if shape == 'RECT': patch = patches.Rectangle((pos[0] - size[0] // 2, pos[1] - size[1] // 2), size[0], size[1], fill=False, lw=line, fc='none', ec=color) elif shape == 'CIRC': patch = patches.CirclePolygon(pos, radius=size, fc='none', ec=color, lw=line) self.annotation_artists.append(patch) self.axes_processed.add_artist(self.annotation_artists[-1])
Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. def _draw_frame(self, framedata): """Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. """ original = self.read_frame() if original is None: self.update_info(self.info_string(message='Finished.', frame=framedata)) return if self.original is not None: processed = self.process_frame(original.copy()) if self.cmap_original is not None: original = to_gray(original) elif not is_color_image(original): self.original.set_cmap('gray') self.original.set_data(original) else: processed = self.process_frame(original) if self.cmap_processed is not None: processed = to_gray(processed) elif not is_color_image(processed): self.processed.set_cmap('gray') if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None. def update_info(self, custom=None): """Updates the figure's suptitle. Calls self.info_string() unless custom is provided. Args: custom: Overwrite it with this string, unless None. """ self.figure.suptitle(self.info_string() if custom is None else custom)
Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string. def info_string(self, size=None, message='', frame=-1): """Returns information about the stream. Generates a string containing size, frame number, and info messages. Omits unnecessary information (e.g. empty messages and frame -1). This method is primarily used to update the suptitle of the plot figure. Returns: An info string. """ info = [] if size is not None: info.append('Size: {1}x{0}'.format(*size)) elif self.size is not None: info.append('Size: {1}x{0}'.format(*self.size)) if frame >= 0: info.append('Frame: {}'.format(frame)) if message != '': info.append('{}'.format(message)) return ' '.join(info)
Sanitizes the loaded *.ipynb. def main(): """Sanitizes the loaded *.ipynb.""" with open(sys.argv[1], 'r') as nbfile: notebook = json.load(nbfile) # remove kernelspec (venvs) try: del notebook['metadata']['kernelspec'] except KeyError: pass # remove outputs and metadata, set execution counts to None for cell in notebook['cells']: try: if cell['cell_type'] == 'code': cell['outputs'] = [] cell['execution_count'] = None cell['metadata'] = {} except KeyError: pass with open(sys.argv[1], 'w') as nbfile: json.dump(notebook, nbfile, indent=1)
create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return: def create(self, comment, mentions=()): """ create comment :param comment: :param mentions: list of pair of code and type("USER", "GROUP", and so on) :return: """ data = { "app": self.app_id, "record": self.record_id, "comment": { "text": comment, } } if len(mentions) > 0: _mentions = [] for m in mentions: if isinstance(m, (list, tuple)): if len(m) == 2: _mentions.append({ "code": m[0], "type": m[1] }) else: raise Exception("mention have to have code and target type. ex.[('user_1', 'USER')]") elif isinstance(m, Mention): _mentions.append(m.serialize()) data["comment"]["mentions"] = _mentions resp = self._request("POST", self._url, data) r = cr.CreateCommentResult(resp) return r
Advance the iterator n-steps ahead. If n is none, consume entirely. def _consume(iterator, n=None): """Advance the iterator n-steps ahead. If n is none, consume entirely.""" # Use functions that consume iterators at C speed. if n is None: # feed the entire iterator into a zero-length deque collections.deque(iterator, maxlen=0) else: # advance to the empty slice starting at position n next(itertools.islice(iterator, n, n), None)
Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]` def _slice_required_len(slice_obj): """ Calculate how many items must be in the collection to satisfy this slice returns `None` for slices may vary based on the length of the underlying collection such as `lst[-1]` or `lst[::]` """ if slice_obj.step and slice_obj.step != 1: return None # (None, None, *) requires the entire list if slice_obj.start is None and slice_obj.stop is None: return None # Negative indexes are hard without knowing the collection length if slice_obj.start and slice_obj.start < 0: return None if slice_obj.stop and slice_obj.stop < 0: return None if slice_obj.stop: if slice_obj.start and slice_obj.start > slice_obj.stop: return 0 return slice_obj.stop return slice_obj.start + 1
conveniently styles your text as and resets ANSI codes at its end. def stylize(text, styles, reset=True): """conveniently styles your text as and resets ANSI codes at its end.""" terminator = attr("reset") if reset else "" return "{}{}{}".format("".join(styles), text, terminator)
stylize() variant that adds C0 control codes (SOH/STX) for readline safety. def stylize_interactive(text, styles, reset=True): """stylize() variant that adds C0 control codes (SOH/STX) for readline safety.""" # problem: readline includes bare ANSI codes in width calculations. # solution: wrap nonprinting codes in SOH/STX when necessary. # see: https://github.com/dslackw/colored/issues/5 terminator = _c0wrap(attr("reset")) if reset else "" return "{}{}{}".format(_c0wrap(styles), text, terminator)
Set or reset attributes def attribute(self): """Set or reset attributes""" paint = { "bold": self.ESC + "1" + self.END, 1: self.ESC + "1" + self.END, "dim": self.ESC + "2" + self.END, 2: self.ESC + "2" + self.END, "underlined": self.ESC + "4" + self.END, 4: self.ESC + "4" + self.END, "blink": self.ESC + "5" + self.END, 5: self.ESC + "5" + self.END, "reverse": self.ESC + "7" + self.END, 7: self.ESC + "7" + self.END, "hidden": self.ESC + "8" + self.END, 8: self.ESC + "8" + self.END, "reset": self.ESC + "0" + self.END, 0: self.ESC + "0" + self.END, "res_bold": self.ESC + "21" + self.END, 21: self.ESC + "21" + self.END, "res_dim": self.ESC + "22" + self.END, 22: self.ESC + "22" + self.END, "res_underlined": self.ESC + "24" + self.END, 24: self.ESC + "24" + self.END, "res_blink": self.ESC + "25" + self.END, 25: self.ESC + "25" + self.END, "res_reverse": self.ESC + "27" + self.END, 27: self.ESC + "27" + self.END, "res_hidden": self.ESC + "28" + self.END, 28: self.ESC + "28" + self.END, } return paint[self.color]
Print 256 foreground colors def foreground(self): """Print 256 foreground colors""" code = self.ESC + "38;5;" if str(self.color).isdigit(): self.reverse_dict() color = self.reserve_paint[str(self.color)] return code + self.paint[color] + self.END elif self.color.startswith("#"): return code + str(self.HEX) + self.END else: return code + self.paint[self.color] + self.END
reverse dictionary def reverse_dict(self): """reverse dictionary""" self.reserve_paint = dict(zip(self.paint.values(), self.paint.keys()))
Perform a reset and check for presence pulse. :param bool required: require presence pulse def reset(self, required=False): """ Perform a reset and check for presence pulse. :param bool required: require presence pulse """ reset = self._ow.reset() if required and reset: raise OneWireError("No presence pulse found. Check devices and wiring.") return not reset
Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ if end is None: end = len(buf) for i in range(start, end): buf[i] = self._readbyte()
Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ if end is None: end = len(buf) for i in range(start, end): self._writebyte(buf[i])
Scan for devices on the bus and return a list of addresses. def scan(self): """Scan for devices on the bus and return a list of addresses.""" devices = [] diff = 65 rom = False count = 0 for _ in range(0xff): rom, diff = self._search_rom(rom, diff) if rom: count += 1 if count > self.maximum_devices: raise RuntimeError( "Maximum device count of {} exceeded."\ .format(self.maximum_devices)) devices.append(OneWireAddress(rom)) if diff == 0: break return devices
Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code def crc8(data): """ Perform the 1-Wire CRC check on the provided data. :param bytearray data: 8 byte array representing 64 bit ROM code """ crc = 0 for byte in data: crc ^= byte for _ in range(8): if crc & 0x01: crc = (crc >> 1) ^ 0x8C else: crc >>= 1 crc &= 0xFF return crc
deserialize json to model :param json_body: json data :param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType) :return: def _deserialize(cls, json_body, get_value_and_type): """ deserialize json to model :param json_body: json data :param get_value_and_type: function(f: json_field) -> value, field_type_string(see FieldType) :return: """ instance = cls() is_set = False properties = cls._get_property_names(instance) def get_property_detail(name): p = [p for p in instance._property_details if p.name == name or p.field_name == name] return None if len(p) == 0 else p[0] for k in json_body: field = json_body[k] pd = get_property_detail(k) pn = k if not pd else pd.to_property_name(k) if pn in properties: v, t = get_value_and_type(field) initial_value = getattr(instance, pn) value = instance._field_to_property(v, t, pd, initial_value) setattr(instance, pn, value) is_set = True return instance if is_set else None
serialize model object to dictionary :param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value :return: def _serialize(self, convert_to_key_and_value, ignore_missing=False): """ serialize model object to dictionary :param convert_to_key_and_value: function(field_name, value, property_detail) -> key, value :return: """ serialized = {} properties = self._get_property_names(self) def get_property_detail(name): p = [p for p in self._property_details if p.name == name] return None if len(p) == 0 else p[0] for p in properties: pd = get_property_detail(p) value = self._property_to_field(p, pd) field_name = p if not pd else pd.to_field_name() if value is None or (ignore_missing and not value) or (pd and pd.unsent): continue else: key, value = convert_to_key_and_value(field_name, value, pd) if key: serialized[key] = value return serialized
Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include def readinto(self, buf, *, start=0, end=None): """ Read into ``buf`` from the device. The number of bytes read will be the length of ``buf``. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buf[start:end]``. This will not cause an allocation like ``buf[start:end]`` will so it saves memory. :param bytearray buf: buffer to write into :param int start: Index to start writing at :param int end: Index to write up to but not include """ self._bus.readinto(buf, start=start, end=end) if start == 0 and end is None and len(buf) >= 8: if self._bus.crc8(buf): raise RuntimeError('CRC error.')
Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include def write(self, buf, *, start=0, end=None): """ Write the bytes from ``buf`` to the device. If ``start`` or ``end`` is provided, then the buffer will be sliced as if ``buffer[start:end]``. This will not cause an allocation like ``buffer[start:end]`` will so it saves memory. :param bytearray buf: buffer containing the bytes to write :param int start: Index to start writing from :param int end: Index to read up to but not include """ return self._bus.write(buf, start=start, end=end)
Adds various preferences members to preferences.preferences, thus enabling easy access from code. def preferences_class_prepared(sender, *args, **kwargs): """ Adds various preferences members to preferences.preferences, thus enabling easy access from code. """ cls = sender if issubclass(cls, Preferences): # Add singleton manager to subclasses. cls.add_to_class('singleton', SingletonManager()) # Add property for preferences object to preferences.preferences. setattr(preferences.Preferences, cls._meta.object_name, property(lambda x: cls.singleton.get()))
Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. def site_cleanup(sender, action, instance, **kwargs): """ Make sure there is only a single preferences object per site. So remove sites from pre-existing preferences objects. """ if action == 'post_add': if isinstance(instance, Preferences) \ and hasattr(instance.__class__, 'objects'): site_conflicts = instance.__class__.objects.filter( sites__in=instance.sites.all() ).only('id').distinct() for conflict in site_conflicts: if conflict.id != instance.id: for site in instance.sites.all(): conflict.sites.remove(site)
Return the first preferences object for the current site. If preferences do not exist create it. def get_queryset(self): """ Return the first preferences object for the current site. If preferences do not exist create it. """ queryset = super(SingletonManager, self).get_queryset() # Get current site current_site = None if getattr(settings, 'SITE_ID', None) is not None: current_site = Site.objects.get_current() # If site found limit queryset to site. if current_site is not None: queryset = queryset.filter(sites=settings.SITE_ID) if not queryset.exists(): # Create object (for current site) if it doesn't exist. obj = self.model.objects.create() if current_site is not None: obj.sites.add(current_site) return queryset
Load an ``iterable``. By default it returns a generator of data loaded via the :meth:`loads` method. :param iterable: an iterable over data to load. :param session: Optional :class:`stdnet.odm.Session`. :return: an iterable over decoded data. def load_iterable(self, iterable, session=None): '''Load an ``iterable``. By default it returns a generator of data loaded via the :meth:`loads` method. :param iterable: an iterable over data to load. :param session: Optional :class:`stdnet.odm.Session`. :return: an iterable over decoded data. ''' data = [] load = self.loads for v in iterable: data.append(load(v)) return data
Implements :meth:`stdnet.odm.SearchEngine.search_model`. It return a new :class:`stdnet.odm.QueryElem` instance from the input :class:`Query` and the *text* to search. def search_model(self, q, text, lookup=None): '''Implements :meth:`stdnet.odm.SearchEngine.search_model`. It return a new :class:`stdnet.odm.QueryElem` instance from the input :class:`Query` and the *text* to search.''' words = self.words_from_text(text, for_search=True) if not words: return q qs = self._search(words, include=(q.model,), lookup=lookup) qs = tuple((q.get_field('object_id') for q in qs)) return odm.intersect((q,)+qs)
Full text search. Return a list of queries to intersect. def _search(self, words, include=None, exclude=None, lookup=None): '''Full text search. Return a list of queries to intersect.''' lookup = lookup or 'contains' query = self.router.worditem.query() if include: query = query.filter(model_type__in=include) if exclude: query = query.exclude(model_type__in=include) if not words: return [query] qs = [] if lookup == 'in': # we are looking for items with at least one word in it qs.append(query.filter(word__in=words)) elif lookup == 'contains': #we want to match every single words for word in words: qs.append(query.filter(word=word)) else: raise ValueError('Unknown lookup "{0}"'.format(lookup)) return qs
Get a new redis client. :param address: a ``host``, ``port`` tuple. :param connection_pool: optional connection pool. :param timeout: socket timeout. :param timeout: socket timeout. def redis_client(address=None, connection_pool=None, timeout=None, parser=None, **kwargs): '''Get a new redis client. :param address: a ``host``, ``port`` tuple. :param connection_pool: optional connection pool. :param timeout: socket timeout. :param timeout: socket timeout. ''' if not connection_pool: if timeout == 0: if not async: raise ImportError('Asynchronous connection requires async ' 'bindings installed.') return async.pool.redis(address, **kwargs) else: kwargs['socket_timeout'] = timeout return Redis(address[0], address[1], **kwargs) else: return Redis(connection_pool=connection_pool)
Returns a bytestring version of 's', encoded as specified in 'encoding'. def to_bytes(s, encoding=None, errors='strict'): """Returns a bytestring version of 's', encoded as specified in 'encoding'.""" encoding = encoding or 'utf-8' if isinstance(s, bytes): if encoding != 'utf-8': return s.decode('utf-8', errors).encode(encoding, errors) else: return s if not is_string(s): s = string_type(s) return s.encode(encoding, errors)
Inverse of to_bytes def to_string(s, encoding=None, errors='strict'): """Inverse of to_bytes""" encoding = encoding or 'utf-8' if isinstance(s, bytes): return s.decode(encoding, errors) if not is_string(s): s = string_type(s) return s
The default JSON decoder hook. It is the inverse of :class:`stdnet.utils.jsontools.JSONDateDecimalEncoder`. def date_decimal_hook(dct): '''The default JSON decoder hook. It is the inverse of :class:`stdnet.utils.jsontools.JSONDateDecimalEncoder`.''' if '__datetime__' in dct: return todatetime(dct['__datetime__']) elif '__date__' in dct: return todatetime(dct['__date__']).date() elif '__decimal__' in dct: return Decimal(dct['__decimal__']) else: return dct
Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary def flat_to_nested(data, instance=None, attname=None, separator=None, loads=None): '''Convert a flat representation of a dictionary to a nested representation. Fields in the flat representation are separated by the *splitter* parameters. :parameter data: a flat dictionary of key value pairs. :parameter instance: optional instance of a model. :parameter attribute: optional attribute of a model. :parameter separator: optional separator. Default ``"__"``. :parameter loads: optional data unserializer. :rtype: a nested dictionary''' separator = separator or JSPLITTER val = {} flat_vals = {} for key, value in iteritems(data): if value is None: continue keys = key.split(separator) # first key equal to the attribute name if attname: if keys.pop(0) != attname: continue if loads: value = loads(value) # if an instance is available, inject the flat attribute if not keys: if value is None: val = flat_vals = {} break else: continue else: flat_vals[key] = value d = val lk = keys[-1] for k in keys[:-1]: if k not in d: nd = {} d[k] = nd else: nd = d[k] if not isinstance(nd, dict): nd = {'': nd} d[k] = nd d = nd if lk not in d: d[lk] = value else: d[lk][''] = value if instance and flat_vals: for attr, value in iteritems(flat_vals): setattr(instance, attr, value) return val
Convert a nested dictionary into a flat dictionary representation def dict_flat_generator(value, attname=None, splitter=JSPLITTER, dumps=None, prefix=None, error=ValueError, recursive=True): '''Convert a nested dictionary into a flat dictionary representation''' if not isinstance(value, dict) or not recursive: if not prefix: raise error('Cannot assign a non dictionary to a JSON field') else: name = '%s%s%s' % (attname, splitter, prefix) if attname else prefix yield name, dumps(value) if dumps else value else: # loop over dictionary for field in value: val = value[field] key = prefix if field: key = '%s%s%s' % (prefix, splitter, field) if prefix else field for k, v2 in dict_flat_generator(val, attname, splitter, dumps, key, error, field): yield k, v2
Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either. def addmul_number_dicts(series): '''Multiply dictionaries by a numeric values and add them together. :parameter series: a tuple of two elements tuples. Each serie is of the form:: (weight,dictionary) where ``weight`` is a number and ``dictionary`` is a dictionary with numeric values. :parameter skip: optional list of field names to skip. Only common fields are aggregated. If a field has a non-numeric value it is not included either.''' if not series: return vtype = value_type((s[1] for s in series)) if vtype == 1: return sum((weight*float(d) for weight, d in series)) elif vtype == 3: keys = set(series[0][1]) for serie in series[1:]: keys.intersection_update(serie[1]) results = {} for key in keys: key_series = tuple((weight, d[key]) for weight, d in series) result = addmul_number_dicts(key_series) if result is not None: results[key] = result return results
Submits a cluster job to the build queue to download all TPFs for a given campaign. :param int campaign: The `K2` campaign to run :param str queue: The name of the queue to submit to. Default `build` :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `8` def Download(campaign=0, queue='build', email=None, walltime=8, **kwargs): ''' Submits a cluster job to the build queue to download all TPFs for a given campaign. :param int campaign: The `K2` campaign to run :param str queue: The name of the queue to submit to. Default `build` :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `8` ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'download.pbs') str_w = 'walltime=%d:00:00' % walltime str_v = 'EVEREST_DAT=%s,CAMPAIGN=%d,SUBCAMPAIGN=%d' % ( EVEREST_DAT, campaign, subcampaign) if subcampaign == -1: str_name = 'download_c%02d' % campaign else: str_name = 'download_c%02d.%d' % (campaign, subcampaign) str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-q', queue, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
Submits a cluster job to compute and plot data for all targets in a given campaign. :param campaign: The K2 campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` \ in the form `X.Y`, runs the `Y^th` decile of campaign `X`. :param str queue: The queue to submit to. Default `None` (default queue) :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `100` :param int nodes: The number of nodes to request. Default `5` :param int ppn: The number of processors per node to request. Default `12` :param int mpn: Memory per node in gb to request. Default no setting. def Run(campaign=0, EPIC=None, nodes=5, ppn=12, walltime=100, mpn=None, email=None, queue=None, **kwargs): ''' Submits a cluster job to compute and plot data for all targets in a given campaign. :param campaign: The K2 campaign number. If this is an :py:class:`int`, \ returns all targets in that campaign. If a :py:class:`float` \ in the form `X.Y`, runs the `Y^th` decile of campaign `X`. :param str queue: The queue to submit to. Default `None` (default queue) :param str email: The email to send job status notifications to. \ Default `None` :param int walltime: The number of hours to request. Default `100` :param int nodes: The number of nodes to request. Default `5` :param int ppn: The number of processors per node to request. Default `12` :param int mpn: Memory per node in gb to request. Default no setting. ''' # Figure out the subcampaign if type(campaign) is int: subcampaign = -1 elif type(campaign) is float: x, y = divmod(campaign, 1) campaign = int(x) subcampaign = round(y * 10) # DEV hack: limit backfill jobs to 10 hours if EVEREST_DEV and (queue == 'bf'): walltime = min(10, walltime) # Convert kwargs to string. This is really hacky. Pickle creates an array # of bytes, which we must convert into a regular string to pass to the pbs # script and then back into python. Decoding the bytes isn't enough, since # we have pesky escaped characters such as newlines that don't behave well # when passing this string around. My braindead hack is to replace newlines # with '%%%', then undo the replacement when reading the kwargs. This works # for most cases, but sometimes pickle creates a byte array that can't be # decoded into utf-8; this happens when trying to pass numpy arrays around, # for instance. This needs to be fixed in the future, but for now we'll # restrict the kwargs to be ints, floats, lists, and strings. try: strkwargs = pickle.dumps(kwargs, 0).decode( 'utf-8').replace('\n', '%%%') except UnicodeDecodeError: raise ValueError('Unable to pickle `kwargs`. Currently the `kwargs` ' + 'values may only be `int`s, `float`s, `string`s, ' + '`bool`s, or lists of these.') # Submit the cluster job pbsfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'run.pbs') if mpn is not None: str_n = 'nodes=%d:ppn=%d,feature=%dcore,mem=%dgb' % ( nodes, ppn, ppn, mpn * nodes) else: str_n = 'nodes=%d:ppn=%d,feature=%dcore' % (nodes, ppn, ppn) str_w = 'walltime=%d:00:00' % walltime str_v = "EVEREST_DAT=%s,NODES=%d," % (EVEREST_DAT, nodes) + \ "EPIC=%d," % (0 if EPIC is None else EPIC) + \ "CAMPAIGN=%d,SUBCAMPAIGN=%d,STRKWARGS='%s'" % \ (campaign, subcampaign, strkwargs) if EPIC is None: if subcampaign == -1: str_name = 'c%02d' % campaign else: str_name = 'c%02d.%d' % (campaign, subcampaign) else: str_name = 'EPIC%d' % EPIC str_out = os.path.join(EVEREST_DAT, 'k2', str_name + '.log') qsub_args = ['qsub', pbsfile, '-v', str_v, '-o', str_out, '-j', 'oe', '-N', str_name, '-l', str_n, '-l', str_w] if email is not None: qsub_args.append(['-M', email, '-m', 'ae']) if queue is not None: qsub_args += ['-q', queue] # Now we submit the job print("Submitting the job...") subprocess.call(qsub_args)
The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``. def _Publish(campaign, subcampaign, strkwargs): ''' The actual function that publishes a given campaign; this must be called from ``missions/k2/publish.pbs``. ''' # Get kwargs from string kwargs = pickle.loads(strkwargs.replace('%%%', '\n').encode('utf-8')) # Check the cadence cadence = kwargs.get('cadence', 'lc') # Model wrapper m = FunctionWrapper(EverestModel, season=campaign, publish=True, **kwargs) # Set up our custom exception handler sys.excepthook = ExceptionHook # Initialize our multiprocessing pool with Pool() as pool: # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all the stars stars = GetK2Campaign(campaign, epics_only=True, cadence=cadence) # Run pool.map(m, stars)
Shows the progress of the de-trending runs for the specified campaign(s). def Status(season=range(18), model='nPLD', purge=False, injection=False, cadence='lc', **kwargs): ''' Shows the progress of the de-trending runs for the specified campaign(s). ''' # Mission compatibility campaign = season # Injection? if injection: return InjectionStatus(campaign=campaign, model=model, purge=purge, **kwargs) # Cadence if cadence == 'sc': model = '%s.sc' % model if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True, cadence=cadence)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign( campaign, epics_only=True, cadence=cadence)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True, cadence=cadence)] for c in campaign] print("CAMP TOTAL DOWNLOADED PROCESSED FITS ERRORS") print("---- ----- ---------- --------- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue down = 0 proc = 0 err = 0 fits = 0 bad = [] remain = [] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in [f for f in os.listdir(path) if f.endswith('00000')]: for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) if ID in stars: if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, 'data.npz')): down += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, FITSFile( ID, c, cadence=cadence))): fits += 1 if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.npz')): proc += 1 elif os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')): err += 1 bad.append(folder[:4] + subfolder) if purge: os.remove(os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, model + '.err')) else: remain.append(folder[:4] + subfolder) if proc == total: cc = ct = cp = ce = GREEN cd = BLACK if down < total else GREEN else: cc = BLACK ct = BLACK cd = BLACK if down < total else BLUE cp = BLACK if proc < down or proc == 0 else BLUE ce = RED if err > 0 else BLACK cf = BLACK if fits < total else GREEN if type(c) is int: print("%s{:>4d} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) else: print("%s{:>4.1f} \033[0m%s{:>8d}\033[0m%s{:>16d}\033[0m%s{:>13d}\033[0m%s{:>10d}\033[0m%s{:>10d}\033[0m".format(c, total, down, proc, fits, err) % (cc, ct, cd, cp, cf, ce)) if len(remain) <= 25 and len(remain) > 0 and len(campaign) == 1: remain.extend([" "] * (4 - (len(remain) % 4))) print() for A, B, C, D in zip(remain[::4], remain[1::4], remain[2::4], remain[3::4]): if A == remain[0]: print("REMAIN: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print() if len(bad) and len(campaign) == 1: bad.extend([" "] * (4 - (len(bad) % 4))) print() for A, B, C, D in zip(bad[::4], bad[1::4], bad[2::4], bad[3::4]): if A == bad[0]: print("ERRORS: %s %s %s %s" % (A, B, C, D)) print() else: print(" %s %s %s %s" % (A, B, C, D)) print()
Shows the progress of the injection de-trending runs for the specified campaign(s). def InjectionStatus(campaign=range(18), model='nPLD', purge=False, depths=[0.01, 0.001, 0.0001], **kwargs): ''' Shows the progress of the injection de-trending runs for the specified campaign(s). ''' if not hasattr(campaign, '__len__'): if type(campaign) is int: # Return the subcampaigns all_stars = [s for s in GetK2Campaign( campaign, split=True, epics_only=True)] campaign = [campaign + 0.1 * n for n in range(10)] else: all_stars = [[s for s in GetK2Campaign(campaign, epics_only=True)]] campaign = [campaign] else: all_stars = [[s for s in GetK2Campaign( c, epics_only=True)] for c in campaign] print("CAMP MASK DEPTH TOTAL DONE ERRORS") print("---- ---- ----- ----- ---- ------") for c, stars in zip(campaign, all_stars): if len(stars) == 0: continue done = [[0 for d in depths], [0 for d in depths]] err = [[0 for d in depths], [0 for d in depths]] total = len(stars) if os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c)): path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % c) for folder in os.listdir(path): for subfolder in os.listdir(os.path.join(path, folder)): ID = int(folder[:4] + subfolder) for m, mask in enumerate(['U', 'M']): for d, depth in enumerate(depths): if os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.npz' % (model, mask, depth))): done[m][d] += 1 elif os.path.exists( os.path.join( EVEREST_DAT, 'k2', 'c%02d' % c, folder, subfolder, '%s_Inject_%s%g.err' % (model, mask, depth))): err[m][d] += 1 for d, depth in enumerate(depths): for m, mask in enumerate(['F', 'T']): if done[m][d] == total: color = GREEN else: color = BLACK if err[m][d] > 0: errcolor = RED else: errcolor = '' if type(c) is int: print("%s{:>4d}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor)) else: print("%s{:>4.1f}{:>8s}{:>14g}{:>10d}{:>10d}%s{:>9d}\033[0m".format( c, mask, depth, total, done[m][d], err[m][d]) % (color, errcolor))
A wrapper around an :py:obj:`everest` model for PBS runs. def EverestModel(ID, model='nPLD', publish=False, csv=False, **kwargs): ''' A wrapper around an :py:obj:`everest` model for PBS runs. ''' if model != 'Inject': from ... import detrender # HACK: We need to explicitly mask short cadence planets if kwargs.get('cadence', 'lc') == 'sc': EPIC, t0, period, duration = \ np.loadtxt(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'scmasks.tsv'), unpack=True) if ID in EPIC and kwargs.get('planets', None) is None: ii = np.where(EPIC == ID)[0] planets = [] for i in ii: planets.append([t0[i], period[i], 1.25 * duration[i]]) kwargs.update({'planets': planets}) # Run the model m = getattr(detrender, model)(ID, **kwargs) # Publish? if publish: if csv: m.publish_csv() else: m.publish() else: from ...inject import Inject Inject(ID, **kwargs) return True
Construct the primary HDU file containing basic header info. def PrimaryHDU(model): ''' Construct the primary HDU file containing basic header info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=0) if 'KEPMAG' not in [c[0] for c in cards]: cards.append(('KEPMAG', model.mag, 'Kepler magnitude')) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.PrimaryHDU(header=header) return hdu
Construct the data HDU file containing the arrays and the observing info. def LightcurveHDU(model): ''' Construct the data HDU file containing the arrays and the observing info. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=1) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) cards.append(('MODEL', model.name, 'Name of EVEREST model used')) cards.append(('APNAME', model.aperture_name, 'Name of aperture used')) cards.append(('BPAD', model.bpad, 'Chunk overlap in cadences')) for c in range(len(model.breakpoints)): cards.append( ('BRKPT%02d' % (c + 1), model.breakpoints[c], 'Light curve breakpoint')) cards.append(('CBVNUM', model.cbv_num, 'Number of CBV signals to recover')) cards.append(('CBVNITER', model.cbv_niter, 'Number of CBV SysRem iterations')) cards.append(('CBVWIN', model.cbv_win, 'Window size for smoothing CBVs')) cards.append(('CBVORD', model.cbv_order, 'Order when smoothing CBVs')) cards.append(('CDIVS', model.cdivs, 'Cross-validation subdivisions')) cards.append(('CDPP', model.cdpp, 'Average de-trended CDPP')) cards.append(('CDPPR', model.cdppr, 'Raw CDPP')) cards.append(('CDPPV', model.cdppv, 'Average validation CDPP')) cards.append(('CDPPG', model.cdppg, 'Average GP-de-trended CDPP')) for i in range(99): try: cards.append(('CDPP%02d' % (i + 1), model.cdpp_arr[i] if not np.isnan( model.cdpp_arr[i]) else 0, 'Chunk de-trended CDPP')) cards.append(('CDPPR%02d' % ( i + 1), model.cdppr_arr[i] if not np.isnan( model.cdppr_arr[i]) else 0, 'Chunk raw CDPP')) cards.append(('CDPPV%02d' % (i + 1), model.cdppv_arr[i] if not np.isnan( model.cdppv_arr[i]) else 0, 'Chunk validation CDPP')) except: break cards.append( ('CVMIN', model.cv_min, 'Cross-validation objective function')) cards.append( ('GITER', model.giter, 'Number of GP optimiziation iterations')) cards.append( ('GMAXF', model.giter, 'Max number of GP function evaluations')) cards.append(('GPFACTOR', model.gp_factor, 'GP amplitude initialization factor')) cards.append(('KERNEL', model.kernel, 'GP kernel name')) if model.kernel == 'Basic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append( ('GPTAU', model.kernel_params[2], 'GP red noise timescale (days)')) elif model.kernel == 'QuasiPeriodic': cards.append( ('GPWHITE', model.kernel_params[0], 'GP white noise amplitude (e-/s)')) cards.append( ('GPRED', model.kernel_params[1], 'GP red noise amplitude (e-/s)')) cards.append(('GPGAMMA', model.kernel_params[2], 'GP scale factor')) cards.append(('GPPER', model.kernel_params[3], 'GP period (days)')) for c in range(len(model.breakpoints)): for o in range(model.pld_order): cards.append(('LAMB%02d%02d' % (c + 1, o + 1), model.lam[c][o], 'Cross-validation parameter')) if model.name == 'iPLD': cards.append(('RECL%02d%02d' % (c + 1, o + 1), model.reclam[c][o], 'Cross-validation parameter')) cards.append(('LEPS', model.leps, 'Cross-validation tolerance')) cards.append(('MAXPIX', model.max_pixels, 'Maximum size of TPF aperture')) for i, source in enumerate(model.nearby[:99]): cards.append(('NRBY%02dID' % (i + 1), source['ID'], 'Nearby source ID')) cards.append( ('NRBY%02dX' % (i + 1), source['x'], 'Nearby source X position')) cards.append( ('NRBY%02dY' % (i + 1), source['y'], 'Nearby source Y position')) cards.append( ('NRBY%02dM' % (i + 1), source['mag'], 'Nearby source magnitude')) cards.append(('NRBY%02dX0' % (i + 1), source['x0'], 'Nearby source reference X')) cards.append(('NRBY%02dY0' % (i + 1), source['y0'], 'Nearby source reference Y')) for i, n in enumerate(model.neighbors): cards.append( ('NEIGH%02d' % i, model.neighbors[i], 'Neighboring star used to de-trend')) cards.append(('OITER', model.oiter, 'Number of outlier search iterations')) cards.append(('OPTGP', model.optimize_gp, 'GP optimization performed?')) cards.append( ('OSIGMA', model.osigma, 'Outlier tolerance (standard deviations)')) for i, planet in enumerate(model.planets): cards.append( ('P%02dT0' % (i + 1), planet[0], 'Planet transit time (days)')) cards.append( ('P%02dPER' % (i + 1), planet[1], 'Planet transit period (days)')) cards.append( ('P%02dDUR' % (i + 1), planet[2], 'Planet transit duration (days)')) cards.append(('PLDORDER', model.pld_order, 'PLD de-trending order')) cards.append(('SATUR', model.saturated, 'Is target saturated?')) cards.append(('SATTOL', model.saturation_tolerance, 'Fractional saturation tolerance')) # Add the EVEREST quality flags to the QUALITY array quality = np.array(model.quality) quality[np.array(model.badmask, dtype=int)] += 2 ** (QUALITY_BAD - 1) quality[np.array(model.nanmask, dtype=int)] += 2 ** (QUALITY_NAN - 1) quality[np.array(model.outmask, dtype=int)] += 2 ** (QUALITY_OUT - 1) quality[np.array(model.recmask, dtype=int)] += 2 ** (QUALITY_REC - 1) quality[np.array(model.transitmask, dtype=int)] += 2 ** (QUALITY_TRN - 1) # When de-trending, we interpolated to fill in NaN fluxes. Here # we insert the NaNs back in, since there's no actual physical # information at those cadences. flux = np.array(model.flux) flux[model.nanmask] = np.nan # Create the arrays list arrays = [pyfits.Column(name='CADN', format='D', array=model.cadn), pyfits.Column(name='FLUX', format='D', array=flux, unit='e-/s'), pyfits.Column(name='FRAW', format='D', array=model.fraw, unit='e-/s'), pyfits.Column(name='FRAW_ERR', format='D', array=model.fraw_err, unit='e-/s'), pyfits.Column(name='QUALITY', format='J', array=quality), pyfits.Column(name='TIME', format='D', array=model.time, unit='BJD - 2454833')] # Add the CBVs if model.fcor is not None: arrays += [pyfits.Column(name='FCOR', format='D', array=model.fcor, unit='e-/s')] for n in range(model.XCBV.shape[1]): arrays += [pyfits.Column(name='CBV%02d' % (n + 1), format='D', array=model.XCBV[:, n])] # Did we subtract a background term? if hasattr(model.bkg, '__len__'): arrays.append(pyfits.Column(name='BKG', format='D', array=model.bkg, unit='e-/s')) # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='ARRAYS') return hdu
Construct the HDU containing the pixel-level light curve. def PixelsHDU(model): ''' Construct the HDU containing the pixel-level light curve. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=2) # Add EVEREST info cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) # The pixel timeseries arrays = [pyfits.Column(name='FPIX', format='%dD' % model.fpix.shape[1], array=model.fpix)] # The first order PLD vectors for all the neighbors (npixels, ncadences) X1N = model.X1N if X1N is not None: arrays.append(pyfits.Column(name='X1N', format='%dD' % X1N.shape[1], array=X1N)) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns(cols, header=header, name='PIXELS') return hdu
Construct the HDU containing the aperture used to de-trend. def ApertureHDU(model): ''' Construct the HDU containing the aperture used to de-trend. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=3) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) hdu = pyfits.ImageHDU(data=model.aperture, header=header, name='APERTURE MASK') return hdu
Construct the HDU containing sample postage stamp images of the target. def ImagesHDU(model): ''' Construct the HDU containing sample postage stamp images of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=4) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # The images format = '%dD' % model.pixel_images[0].shape[1] arrays = [pyfits.Column(name='STAMP1', format=format, array=model.pixel_images[0]), pyfits.Column(name='STAMP2', format=format, array=model.pixel_images[1]), pyfits.Column(name='STAMP3', format=format, array=model.pixel_images[2])] # Create the HDU header = pyfits.Header(cards=cards) cols = pyfits.ColDefs(arrays) hdu = pyfits.BinTableHDU.from_columns( cols, header=header, name='POSTAGE STAMPS') return hdu
Construct the HDU containing the hi res image of the target. def HiResHDU(model): ''' Construct the HDU containing the hi res image of the target. ''' # Get mission cards cards = model._mission.HDUCards(model.meta, hdu=5) # Add EVEREST info cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* EVEREST INFO *')) cards.append(('COMMENT', '************************')) cards.append(('MISSION', model.mission, 'Mission name')) cards.append(('VERSION', EVEREST_MAJOR_MINOR, 'EVEREST pipeline version')) cards.append(('SUBVER', EVEREST_VERSION, 'EVEREST pipeline subversion')) cards.append(('DATE', strftime('%Y-%m-%d'), 'EVEREST file creation date (YYYY-MM-DD)')) # Create the HDU header = pyfits.Header(cards=cards) if model.hires is not None: hdu = pyfits.ImageHDU( data=model.hires, header=header, name='HI RES IMAGE') else: hdu = pyfits.ImageHDU(data=np.empty( (0, 0), dtype=float), header=header, name='HI RES IMAGE') return hdu
Generate a FITS file for a given :py:mod:`everest` run. :param model: An :py:mod:`everest` model instance def MakeFITS(model, fitsfile=None): ''' Generate a FITS file for a given :py:mod:`everest` run. :param model: An :py:mod:`everest` model instance ''' # Get the fits file name if fitsfile is None: outfile = os.path.join(model.dir, model._mission.FITSFile( model.ID, model.season, model.cadence)) else: outfile = os.path.join(model.dir, fitsfile) if os.path.exists(outfile) and not model.clobber: return elif os.path.exists(outfile): os.remove(outfile) log.info('Generating FITS file...') # Create the HDUs primary = PrimaryHDU(model) lightcurve = LightcurveHDU(model) pixels = PixelsHDU(model) aperture = ApertureHDU(model) images = ImagesHDU(model) hires = HiResHDU(model) # Combine to get the HDUList hdulist = pyfits.HDUList( [primary, lightcurve, pixels, aperture, images, hires]) # Output to the FITS file hdulist.writeto(outfile) return
Retrieve a serializer register as *name*. If the serializer is not available a ``ValueError`` exception will raise. A common usage pattern:: qs = MyModel.objects.query().sort_by('id') s = odm.get_serializer('json') s.dump(qs) def get_serializer(name, **options): '''Retrieve a serializer register as *name*. If the serializer is not available a ``ValueError`` exception will raise. A common usage pattern:: qs = MyModel.objects.query().sort_by('id') s = odm.get_serializer('json') s.dump(qs) ''' if name in _serializers: serializer = _serializers[name] return serializer(**options) else: raise ValueError('Unknown serializer {0}.'.format(name))
\ Register a new serializer to the library. :parameter name: serializer name (it can override existing serializers). :parameter serializer: an instance or a derived class of a :class:`stdnet.odm.Serializer` class or a callable. def register_serializer(name, serializer): '''\ Register a new serializer to the library. :parameter name: serializer name (it can override existing serializers). :parameter serializer: an instance or a derived class of a :class:`stdnet.odm.Serializer` class or a callable. ''' if not isclass(serializer): serializer = serializer.__class__ _serializers[name] = serializer
Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked. def MaskSolve(A, b, w=5, progress=True, niter=None): ''' Finds the solution `x` to the linear problem A x = b for all contiguous `w`-sized masks applied to the rows and columns of `A` and to the entries of `b`. Returns an array `X` of shape `(N - w + 1, N - w)`, where the `nth` row is the solution to the equation A[![n,n+w)] x = b[![n,n+w)] where ![n,n+w) indicates that indices in the range [n,n+w) have been masked. ''' # Ensure we have choldate installed if cholupdate is None: log.info("Running the slow version of `MaskSolve`.") log.info("Install the `choldate` package for better performance.") log.info("https://github.com/rodluger/choldate") return MaskSolveSlow(A, b, w=w, progress=progress, niter=niter) # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Solve the first two steps explicitly. for n in range(2): mask = np.arange(n, w + n) A_ = np.delete(np.delete(A, mask, axis=0), mask, axis=1) b_ = np.delete(b, mask) U = cholesky(A_) X[n] = cho_solve((U, False), b_) # Iterate! for n in prange(1, niter - 1): # Update the data vector. b_[n] = b[n] # Remove a row. S33 = U[n + 1:, n + 1:] S23 = U[n, n + 1:] cholupdate(S33, S23) # Add a row. A12 = A[:n, n] A22 = A[n, n] A23 = A[n, n + w + 1:] S11 = U[:n, :n] S12 = solve_triangular(S11.T, A12, lower=True, check_finite=False, trans=0, overwrite_b=True) S22 = np.sqrt(A22 - np.dot(S12.T, S12)) S13 = U[:n, n + 1:] S23 = (A23 - np.dot(S12.T, S13)) / S22 choldowndate(S33, np.array(S23)) U[:n, n] = S12 U[n, n] = S22 U[n, n + 1:] = S23 U[n + 1:, n + 1:] = S33 # Now we can solve our linear equation X[n + 1] = cho_solve((U, False), b_) # Return the matrix return X
Identical to `MaskSolve`, but computes the solution the brute-force way. def MaskSolveSlow(A, b, w=5, progress=True, niter=None): ''' Identical to `MaskSolve`, but computes the solution the brute-force way. ''' # Number of data points N = b.shape[0] # How many iterations? Default is to go through # the entire dataset if niter is None: niter = N - w + 1 # Our result matrix X = np.empty((niter, N - w)) # Iterate! The mask at step `n` goes from # data index `n` to data index `n+w-1` (inclusive). for n in prange(niter): mask = np.arange(n, n + w) An = np.delete(np.delete(A, mask, axis=0), mask, axis=1) Un = cholesky(An) bn = np.delete(b, mask) X[n] = cho_solve((Un, False), bn) return X
Return the unmasked overfitting metric for a given transit depth. def unmasked(self, depth=0.01): """Return the unmasked overfitting metric for a given transit depth.""" return 1 - (np.hstack(self._O2) + np.hstack(self._O3) / depth) / np.hstack(self._O1)
Show the overfitting PDF summary. def show(self): """Show the overfitting PDF summary.""" try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', self.pdf]) elif os.name == 'nt': os.startfile(self.pdf) elif os.name == 'posix': subprocess.call(['xdg-open', self.pdf]) else: raise IOError("") except IOError: log.info("Unable to open the pdf. Try opening it manually:") log.info(self.pdf)
Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter. def season(self): """ Return the current observing season. For *K2*, this is the observing campaign, while for *Kepler*, it is the current quarter. """ try: self._season except AttributeError: self._season = self._mission.Season(self.ID) if hasattr(self._season, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % self._season) return self._season
The CBV-corrected de-trended flux. def fcor(self): ''' The CBV-corrected de-trended flux. ''' if self.XCBV is None: return None else: return self.flux - self._mission.FitCBVs(self)
The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences. def mask(self): ''' The array of indices to be masked. This is the union of the sets of outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN` cadences. ''' return np.array(list(set(np.concatenate([self.outmask, self.badmask, self.transitmask, self.nanmask]))), dtype=int)
Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order. def X(self, i, j=slice(None, None, None)): ''' Computes the design matrix at the given *PLD* order and the given indices. The columns are the *PLD* vectors for the target at the corresponding order, computed as the product of the fractional pixel flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD* order. ''' X1 = self.fpix[j] / self.norm[j].reshape(-1, 1) X = np.product(list(multichoose(X1.T, i + 1)), axis=1).T if self.X1N is not None: return np.hstack([X, self.X1N[j] ** (i + 1)]) else: return X
Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance def plot_info(self, dvs): ''' Plots miscellaneous de-trending information on the data validation summary figure. :param dvs: A :py:class:`dvs.DVS` figure instance ''' axl, axc, axr = dvs.title() axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=18) axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" % (self.cdppr, self.cdpp), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axl.annotate("%s %s%02d: %s" % (self.mission.upper(), self._mission.SEASONCHAR, self.season, self.name), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') axl.annotate(self.aperture_name if len(self.neighbors) == 0 else "%s, %d neighbors" % (self.aperture_name, len(self.neighbors)), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic') axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag), xy=(0.5, 0.5), xycoords='axes fraction', ha='center', va='center', fontsize=12, color='k') if not np.isnan(self.cdppg) and self.cdppg > 0: axr.annotate(r"GP %.3f ppm" % (self.cdppg), xy=(0.5, 0.2), xycoords='axes fraction', ha='center', va='center', fontsize=8, color='k', fontstyle='italic')
Compute the model for the current value of lambda. def compute(self): ''' Compute the model for the current value of lambda. ''' # Is there a transit model? if self.transit_model is not None: return self.compute_joint() log.info('Computing the model...') # Loop over all chunks model = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A += self.lam[b][n] * np.dot(XM, XM.T) B += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Compute the model W = np.linalg.solve(mK + A, f) model[b] = np.dot(B, W) # Free up some memory del A, B, W # Join the chunks after applying the correct offset if len(model) > 1: # First chunk self.model = model[0][:-self.bpad] # Center chunks for m in model[1:-1]: # Join the chunks at the first non-outlier cadence i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - m[self.bpad - i] self.model = np.concatenate( [self.model, m[self.bpad:-self.bpad] + offset]) # Last chunk i = 1 while len(self.model) - i in self.mask: i += 1 offset = self.model[-i] - model[-1][self.bpad - i] self.model = np.concatenate( [self.model, model[-1][self.bpad:] + offset]) else: self.model = model[0] # Subtract the global median self.model -= np.nanmedian(self.model) # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
Compute the model in a single step, allowing for a light curve-wide transit model. This is a bit more expensive to compute. def compute_joint(self): ''' Compute the model in a single step, allowing for a light curve-wide transit model. This is a bit more expensive to compute. ''' # Init log.info('Computing the joint model...') A = [None for b in self.breakpoints] B = [None for b in self.breakpoints] # We need to make sure that we're not masking the transits we are # trying to fit! # NOTE: If there happens to be an index that *SHOULD* be masked during # a transit (cosmic ray, detector anomaly), update `self.badmask` # to include that index. # Bad data points are *never* used in the regression. if self.transit_model is not None: outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) transit_inds = np.where( np.sum([tm(self.time) for tm in self.transit_model], axis=0) < 0)[0] self.outmask = np.array( [i for i in self.outmask if i not in transit_inds]) self.transitmask = np.array( [i for i in self.transitmask if i not in transit_inds]) # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) c = self.get_chunk(b, pad=False) # The X^2 matrices A[b] = np.zeros((len(m), len(m))) B[b] = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): # Only compute up to the current PLD order if (self.lam_idx >= n) and (self.lam[b][n] is not None): XM = self.X(n, m) XC = self.X(n, c) A[b] += self.lam[b][n] * np.dot(XM, XM.T) B[b] += self.lam[b][n] * np.dot(XC, XM.T) del XM, XC # Merge chunks. BIGA and BIGB are sparse, but unfortunately # scipy.sparse doesn't handle sparse matrix inversion all that # well when the *result* is not itself sparse. So we're sticking # with regular np.linalg. BIGA = block_diag(*A) del A BIGB = block_diag(*B) del B # Compute the full covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.apply_mask( self.time), self.apply_mask(self.fraw_err)) # The normalized, masked flux array f = self.apply_mask(self.fraw) med = np.nanmedian(f) f -= med # Are we computing a joint transit model? if self.transit_model is not None: # Get the unmasked indices m = self.apply_mask() # Subtract off the mean total transit model mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m]) for tm in self.transit_model], axis=0) f -= mean_transit_model # Now add each transit model to the matrix of regressors for tm in self.transit_model: XM = tm(self.time[m]).reshape(-1, 1) XC = tm(self.time).reshape(-1, 1) BIGA += med ** 2 * tm.var_depth * np.dot(XM, XM.T) BIGB += med ** 2 * tm.var_depth * np.dot(XC, XM.T) del XM, XC # Dot the inverse of the covariance matrix W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Compute the transit weights and maximum likelihood transit model w_trn = med ** 2 * np.concatenate([tm.var_depth * np.dot( tm(self.time[m]).reshape(1, -1), W) for tm in self.transit_model]) self.transit_depth = np.array( [med * tm.depth + w_trn[i] for i, tm in enumerate(self.transit_model)]) / med # Remove the transit prediction from the model self.model -= np.dot(np.hstack([tm(self.time).reshape(-1, 1) for tm in self.transit_model]), w_trn) else: # No transit model to worry about W = np.linalg.solve(mK + BIGA, f) self.model = np.dot(BIGB, W) # Subtract the global median self.model -= np.nanmedian(self.model) # Restore the mask if self.transit_model is not None: self.outmask = outmask self.transitmask = transitmask # Get the CDPP and reset the weights self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() self._weights = None
Returns the outlier mask, an array of indices corresponding to the non-outliers. :param numpy.ndarray x: If specified, returns the masked version of \ :py:obj:`x` instead. Default :py:obj:`None` def apply_mask(self, x=None): ''' Returns the outlier mask, an array of indices corresponding to the non-outliers. :param numpy.ndarray x: If specified, returns the masked version of \ :py:obj:`x` instead. Default :py:obj:`None` ''' if x is None: return np.delete(np.arange(len(self.time)), self.mask) else: return np.delete(x, self.mask, axis=0)
Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return :param numpy.ndarray x: If specified, applies the mask to array \ :py:obj:`x`. Default :py:obj:`None` def get_chunk(self, b, x=None, pad=True): ''' Returns the indices corresponding to a given light curve chunk. :param int b: The index of the chunk to return :param numpy.ndarray x: If specified, applies the mask to array \ :py:obj:`x`. Default :py:obj:`None` ''' M = np.arange(len(self.time)) if b > 0: res = M[(M > self.breakpoints[b - 1] - int(pad) * self.bpad) & (M <= self.breakpoints[b] + int(pad) * self.bpad)] else: res = M[M <= self.breakpoints[b] + int(pad) * self.bpad] if x is None: return res else: return x[res]
Computes the PLD weights vector :py:obj:`w`. ..warning :: Deprecated and not thoroughly tested. def get_weights(self): ''' Computes the PLD weights vector :py:obj:`w`. ..warning :: Deprecated and not thoroughly tested. ''' log.info("Computing PLD weights...") # Loop over all chunks weights = [None for i in range(len(self.breakpoints))] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b) c = self.get_chunk(b) # This block of the masked covariance matrix _mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # This chunk of the normalized flux f = self.fraw[m] - np.nanmedian(self.fraw) # Loop over all orders _A = [None for i in range(self.pld_order)] for n in range(self.pld_order): if self.lam_idx >= n: X = self.X(n, m) _A[n] = np.dot(X, X.T) del X # Compute the weights A = np.sum([l * a for l, a in zip(self.lam[b], _A) if l is not None], axis=0) W = np.linalg.solve(_mK + A, f) weights[b] = [l * np.dot(self.X(n, m).T, W) for n, l in enumerate(self.lam[b]) if l is not None] self._weights = weights
Returns the CDPP value in *ppm* for each of the chunks in the light curve. def get_cdpp_arr(self, flux=None): ''' Returns the CDPP value in *ppm* for each of the chunks in the light curve. ''' if flux is None: flux = self.flux return np.array([self._mission.CDPP(flux[self.get_masked_chunk(b)], cadence=self.cadence) for b, _ in enumerate(self.breakpoints)])
Returns the scalar CDPP for the light curve. def get_cdpp(self, flux=None): ''' Returns the scalar CDPP for the light curve. ''' if flux is None: flux = self.flux return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence)
Plots the aperture and the pixel images at the beginning, middle, and end of the time series. Also plots a high resolution image of the target, if available. def plot_aperture(self, axes, labelsize=8): ''' Plots the aperture and the pixel images at the beginning, middle, and end of the time series. Also plots a high resolution image of the target, if available. ''' log.info('Plotting the aperture...') # Get colormap plasma = pl.get_cmap('plasma') plasma.set_bad(alpha=0) # Get aperture contour def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.pixel_images[0].shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) # Plot first, mid, and last TPF image title = ['start', 'mid', 'end'] for i, image in enumerate(self.pixel_images): ax = axes[i] ax.imshow(image, aspect='auto', interpolation='nearest', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: ax.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate(title[i], xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') if i == 1: for source in self.nearby: ax.annotate('%.1f' % source['mag'], xy=(source['x'] - source['x0'], source['y'] - source['y0']), ha='center', va='center', size=labelsize - 2, color='w', fontweight='bold') # Plot hi res image if self.hires is not None: ax = axes[-1] ax.imshow(self.hires, aspect='auto', extent=(-0.5, nx - 0.5, -0.5, ny - 0.5), interpolation='bicubic', cmap=plasma) ax.contour(highres, levels=[0.5], extent=extent, origin='lower', colors='r', linewidths=1) ax.axis('off') ax.set_xlim(-0.7, nx - 0.3) ax.set_ylim(-0.7, ny - 0.3) ax.annotate('hires', xy=(0.5, 0.975), xycoords='axes fraction', ha='center', va='top', size=labelsize, color='w') else: ax = axes[-1] ax.axis('off')
r""" Compute the masked & unmasked overfitting metrics for the light curve. This routine injects a transit model given by `tau` at every cadence in the light curve and recovers the transit depth when (1) leaving the transit unmasked and (2) masking the transit prior to performing regression. :param tau: A function or callable that accepts two arguments, \ `time` and `t0`, and returns an array corresponding to a \ zero-mean, unit depth transit model centered at \ `t0` and evaluated at `time`. \ The easiest way to provide this is to use an instance of \ :py:class:`everest.transit.TransitShape`. Default is \ :py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \ with solar-like limb darkening and a duratio of 0.1 days. :param bool plot: Plot the results as a PDF? Default :py:obj:`True` :param bool clobber: Overwrite the results if present? Default \ :py:obj:`False` :param int w: The size of the masking window in cadences for \ computing the masked overfitting metric. Default `9` \ (about 4.5 hours for `K2` long cadence). :returns: An instance of `everest.basecamp.Overfitting`. def overfit(self, tau=None, plot=True, clobber=False, w=9, **kwargs): r""" Compute the masked & unmasked overfitting metrics for the light curve. This routine injects a transit model given by `tau` at every cadence in the light curve and recovers the transit depth when (1) leaving the transit unmasked and (2) masking the transit prior to performing regression. :param tau: A function or callable that accepts two arguments, \ `time` and `t0`, and returns an array corresponding to a \ zero-mean, unit depth transit model centered at \ `t0` and evaluated at `time`. \ The easiest way to provide this is to use an instance of \ :py:class:`everest.transit.TransitShape`. Default is \ :py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \ with solar-like limb darkening and a duratio of 0.1 days. :param bool plot: Plot the results as a PDF? Default :py:obj:`True` :param bool clobber: Overwrite the results if present? Default \ :py:obj:`False` :param int w: The size of the masking window in cadences for \ computing the masked overfitting metric. Default `9` \ (about 4.5 hours for `K2` long cadence). :returns: An instance of `everest.basecamp.Overfitting`. """ fname = os.path.join(self.dir, self.name + '_overfit.npz') figname = os.path.join(self.dir, self.name) # Compute if not os.path.exists(fname) or clobber: # Baseline med = np.nanmedian(self.fraw) # Default transit model if tau is None: tau = TransitShape(dur=0.1) # The overfitting metrics O1 = [None for brkpt in self.breakpoints] O2 = [None for brkpt in self.breakpoints] O3 = [None for brkpt in self.breakpoints] O4 = [None for brkpt in self.breakpoints] O5 = [None for brkpt in self.breakpoints] # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) time = self.time[m] ferr = self.fraw_err[m] / med y = self.fraw[m] / med - 1 # The metrics we're computing here O1[b] = np.zeros(len(y)) * np.nan O2[b] = np.zeros(len(y)) * np.nan O3[b] = np.zeros(len(y)) * np.nan O4[b] = np.zeros(len(y)) * np.nan O5[b] = np.zeros(len(y)) * np.nan # Compute the astrophysical covariance and its inverse log.info("Computing the covariance...") if self.kernel == 'Basic': wh, am, ta = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ta] elif self.kernel == 'QuasiPeriodic': wh, am, ga, pe = self.kernel_params wh /= med am /= med kernel_params = [wh, am, ga, pe] K = GetCovariance(self.kernel, kernel_params, time, ferr) Kinv = cho_solve((cholesky(K), False), np.eye(len(time))) # Loop over all orders log.info("Computing some large matrices...") X = [None for n in range(self.pld_order)] XL = [None for n in range(self.pld_order)] XLX = [None for n in range(self.pld_order)] for n in range(self.pld_order): if (self.lam_idx >= n) and (self.lam[b][n] is not None): X[n] = self.X(n, m, **kwargs) XL[n] = (self.lam[b][n] / med ** 2) * X[n] XLX[n] = np.dot(XL[n], X[n].T) X = np.hstack(X) XL = np.hstack(XL) XLX = np.sum(XLX, axis=0) # The full covariance C = XLX + K # The unmasked linear problem log.info("Solving the unmasked linear problem...") m = np.dot(XLX, np.linalg.solve(C, y)) m -= np.nanmedian(m) f = y - m R = np.linalg.solve(C, XLX.T).T # The masked linear problem log.info("Solving the masked linear problem...") A = MaskSolve(C, y, w=w) # Now loop through and compute the metric log.info("Computing the overfitting metrics...") for n in prange(len(y)): # # *** Unmasked overfitting metric *** # # Evaluate the sparse transit model TAU = tau(time, t0=time[n]) i = np.where(TAU < 0)[0] TAU = TAU.reshape(-1, 1) # Fast sparse algebra AA = np.dot(np.dot(TAU[i].T, Kinv[i, :][:, i]), TAU[i]) BB = np.dot(TAU[i].T, Kinv[i, :]) CC = TAU - np.dot(R[:, i], TAU[i]) O1[b][n] = AA O2[b][n] = np.dot(BB, CC) O3[b][n] = np.dot(BB, f) O4[b][n] = np.dot(BB, y) # # *** Masked overfitting metric *** # # The current mask and mask centerpoint mask = np.arange(n, n + w) j = n + (w + 1) // 2 - 1 if j >= len(y) - w: continue # The regularized design matrix # This is the same as # XLmX[:, n - 1] = \ # np.dot(XL, np.delete(X, mask, axis=0).T)[:, n - 1] if n == 0: XLmX = np.dot(XL, np.delete(X, mask, axis=0).T) else: XLmX[:, n - 1] = np.dot(XL, X[n - 1, :].T) # The linear solution to this step m = np.dot(XLmX, A[n]) # Evaluate the sparse transit model TAU = tau(time, t0=time[j]) i = np.where(TAU < 0)[0] TAU = TAU[i].reshape(-1, 1) # Dot the transit model in den = np.dot(np.dot(TAU.T, Kinv[i, :][:, i]), TAU) num = np.dot(TAU.T, Kinv[i, :]) # Compute the overfitting metric # Divide this number by a depth # to get the overfitting for that # particular depth. O5[b][j] = -np.dot(num, y - m) / den # Save! np.savez(fname, O1=O1, O2=O2, O3=O3, O4=O4, O5=O5) else: data = np.load(fname) O1 = data['O1'] O2 = data['O2'] O3 = data['O3'] O4 = data['O4'] O5 = data['O5'] # Plot if plot and (clobber or not os.path.exists(figname + '_overfit.pdf')): log.info("Plotting the overfitting metrics...") # Masked time array time = self.apply_mask(self.time) # Plot the final corrected light curve ovr = OVERFIT() self.plot_info(ovr) # Loop over the two metrics for kind, axes, axesh in zip(['unmasked', 'masked'], [ovr.axes1, ovr.axes2], [ovr.axes1h, ovr.axes2h]): # Loop over three depths for depth, ax, axh in zip([0.01, 0.001, 0.0001], axes, axesh): # Get the metric if kind == 'unmasked': metric = 1 - (np.hstack(O2) + np.hstack(O3) / depth) / np.hstack(O1) color = 'r' elif kind == 'masked': metric = np.hstack(O5) / depth color = 'b' else: raise ValueError("Invalid metric.") # Median and median absolute deviation med = np.nanmedian(metric) mad = np.nanmedian(np.abs(metric - med)) # Plot the metric as a function of time ax.plot(time, metric, 'k.', alpha=0.5, ms=2) ax.plot(time, metric, 'k-', alpha=0.1, lw=0.5) ylim = (-0.2, 1.0) ax.margins(0, None) ax.axhline(0, color='k', lw=1, alpha=0.5) ax.set_ylim(*ylim) if kind == 'masked' and depth == 0.0001: ax.set_xlabel('Time (days)', fontsize=14) else: ax.set_xticklabels([]) # Plot the histogram rng = (max(ylim[0], np.nanmin(metric)), min(ylim[1], np.nanmax(metric))) axh.hist(metric, bins=30, range=rng, orientation="horizontal", histtype="step", fill=False, color='k') axh.axhline(med, color=color, ls='-', lw=1) axh.axhspan(med - mad, med + mad, color=color, alpha=0.1) axh.axhline(0, color='k', lw=1, alpha=0.5) axh.yaxis.tick_right() axh.set_ylim(*ax.get_ylim()) axh.set_xticklabels([]) bbox = dict(fc="w", ec="1", alpha=0.5) info = r"$\mathrm{med}=%.3f$" % med + \ "\n" + r"$\mathrm{mad}=%.3f$" % mad axh.annotate(info, xy=(0.1, 0.925), xycoords='axes fraction', ha="left", va="top", bbox=bbox, color=color) bbox = dict(fc="w", ec="1", alpha=0.95) ax.annotate("%s overfitting metric" % kind, xy=(1-0.035, 0.92), xycoords='axes fraction', ha='right', va='top', bbox=bbox, color=color) pl.figtext(0.025, 0.77, "depth = 0.01", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.48, "depth = 0.001", rotation=90, ha='left', va='center', fontsize=18) pl.figtext(0.025, 0.19, "depth = 0.0001", rotation=90, ha='left', va='center', fontsize=18) ovr.fig.savefig(figname + '_overfit.pdf') log.info("Saved plot to %s_overfit.pdf" % figname) pl.close() return Overfitting(O1, O2, O3, O4, O5, figname + '_overfit.pdf')
r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50., full_output=False): r""" Return the likelihood of the astrophysical model `model`. Returns the likelihood of `model` marginalized over the PLD model. :param ndarray model: A vector of the same shape as `self.time` \ corresponding to the astrophysical model. :param bool refactor: Re-compute the Cholesky decomposition? This \ typically does not need to be done, except when the PLD \ model changes. Default :py:obj:`False`. :param float pos_tol: the positive (i.e., above the median) \ outlier tolerance in standard deviations. :param float neg_tol: the negative (i.e., below the median) \ outlier tolerance in standard deviations. :param bool full_output: If :py:obj:`True`, returns the maximum \ likelihood model amplitude and the variance on the amplitude \ in addition to the log-likelihood. In the case of a transit \ model, these are the transit depth and depth variance. Default \ :py:obj:`False`. """ lnl = 0 # Re-factorize the Cholesky decomposition? try: self._ll_info except AttributeError: refactor = True if refactor: # Smooth the light curve and reset the outlier mask t = np.delete(self.time, np.concatenate([self.nanmask, self.badmask])) f = np.delete(self.flux, np.concatenate([self.nanmask, self.badmask])) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(self.time == t[i]) for i in pos_inds]) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(self.time == t[i]) for i in neg_inds]) outmask = np.array(self.outmask) transitmask = np.array(self.transitmask) self.outmask = np.concatenate([neg_inds, pos_inds]) self.transitmask = np.array([], dtype=int) # Now re-factorize the Cholesky decomposition self._ll_info = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Masks for current chunk m = self.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(self.pld_order): XM = self.X(n, m) A += self.lam[b][n] * np.dot(XM, XM.T) K += A self._ll_info[b] = [cho_factor(K), m] # Reset the outlier masks self.outmask = outmask self.transitmask = transitmask # Compute the likelihood for each chunk amp = [None for b in self.breakpoints] var = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Get the inverse covariance and the mask CDK = self._ll_info[b][0] m = self._ll_info[b][1] # Compute the maximum likelihood model amplitude # (for transits, this is the transit depth) var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m])) amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m])) # Compute the residual r = self.fraw[m] - amp[b] * model[m] # Finally, compute the likelihood lnl += -0.5 * np.dot(r, cho_solve(CDK, r)) if full_output: # We need to multiply the Gaussians for all chunks to get the # amplitude and amplitude variance for the entire dataset vari = var[0] ampi = amp[0] for v, a in zip(var[1:], amp[1:]): ampi = (ampi * v + a * vari) / (vari + v) vari = vari * v / (vari + v) med = np.nanmedian(self.fraw) return lnl, ampi / med, vari / med ** 2 else: return lnl
Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum def Inject(ID, inj_model='nPLD', t0=None, per=None, dur=0.1, depth=0.001, mask=False, trn_win=5, poly_order=3, make_fits=False, **kwargs): ''' Run one of the :py:obj:`everest` models with injected transits and attempt to recover the transit depth at the end with a simple linear regression with a polynomial baseline. The depth is stored in the :py:obj:`inject` attribute of the model (a dictionary) as :py:obj:`rec_depth`. A control injection is also performed, in which the transits are injected into the de-trended data; the recovered depth in the control run is stored in :py:obj:`inject` as :py:obj:`rec_depth_control`. :param int ID: The target id :param str inj_model: The name of the :py:obj:`everest` model to run. \ Default `"nPLD"` :param float t0: The transit ephemeris in days. Default is to draw from \ the uniform distributon [0., :py:obj:`per`) :param float per: The injected planet period in days. Default is to draw \ from the uniform distribution [2, 10] :param float dur: The transit duration in days. Must be in the range \ [0.05, 0.5]. Default 0.1 :param float depth: The fractional transit depth. Default 0.001 :param bool mask: Explicitly mask the in-transit cadences when computing \ the PLD model? Default :py:obj:`False` :param float trn_win: The size of the transit window in units of the \ transit duration :param int poly_order: The order of the polynomial used to fit the \ continuum ''' # Randomize the planet params if per is None: a = 3. b = 10. per = a + (b - a) * np.random.random() if t0 is None: t0 = per * np.random.random() # Get the actual class _model = eval(inj_model) inject = {'t0': t0, 'per': per, 'dur': dur, 'depth': depth, 'mask': mask, 'poly_order': poly_order, 'trn_win': trn_win} # Define the injection class class Injection(_model): ''' The :py:obj:`Injection` class is a special subclass of a user-selected :py:obj:`everest` model. See :py:func:`Inject` for more details. ''' def __init__(self, *args, **kwargs): ''' ''' self.inject = kwargs.pop('inject', None) self.parent_class = kwargs.pop('parent_class', None) self.kwargs = kwargs super(Injection, self).__init__(*args, **kwargs) @property def name(self): ''' ''' if self.inject['mask']: maskchar = 'M' else: maskchar = 'U' return '%s_Inject_%s%g' % (self.parent_class, maskchar, self.inject['depth']) def load_tpf(self): ''' Loads the target pixel files and injects transits at the pixel level. ''' # Load the TPF super(Injection, self).load_tpf() log.info("Injecting transits...") # Inject the transits into the regular data transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) for i in range(self.fpix.shape[1]): self.fpix[:, i] *= transit_model self.fraw = np.sum(self.fpix, axis=1) if self.inject['mask']: self.transitmask = np.array(list(set(np.concatenate( [self.transitmask, np.where(transit_model < 1.)[0]]))), dtype=int) # Update the PLD normalization self.get_norm() def recover_depth(self): ''' Recovers the injected transit depth from the long cadence data with a simple LLS solver. The results are all stored in the :py:obj:`inject` attribute of the model. ''' # Control run transit_model = Transit( self.time, t0=self.inject['t0'], per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['depth']) kwargs = dict(self.kwargs) kwargs.update({'clobber': False}) control = eval(self.parent_class)( self.ID, is_parent=True, **kwargs) control.fraw *= transit_model # Get params log.info("Recovering transit depth...") t0 = self.inject['t0'] per = self.inject['per'] dur = self.inject['dur'] depth = self.inject['depth'] trn_win = self.inject['trn_win'] poly_order = self.inject['poly_order'] for run, tag in zip([self, control], ['', '_control']): # Compute the model mask = np.array( list(set(np.concatenate([run.badmask, run.nanmask]))), dtype=int) flux = np.delete(run.flux / np.nanmedian(run.flux), mask) time = np.delete(run.time, mask) transit_model = (Transit(time, t0=t0, per=per, dur=dur, depth=depth) - 1) / depth # Count the transits t0 += np.ceil((time[0] - dur - t0) / per) * per ttimes0 = np.arange(t0, time[-1] + dur, per) tinds = [] for tt in ttimes0: # Get indices for this chunk inds = np.where(np.abs(time - tt) < trn_win * dur / 2.)[0] # Ensure there's a transit in this chunk, and that # there are enough points for the polynomial fit if np.any(transit_model[inds] < 0.) and \ len(inds) > poly_order: tinds.append(inds) # Our design matrix sz = (poly_order + 1) * len(tinds) X = np.empty((0, 1 + sz), dtype=float) Y = np.array([], dtype=float) T = np.array([], dtype=float) # Loop over all transits for i, inds in enumerate(tinds): # Get the transit model trnvec = transit_model[inds].reshape(-1, 1) # Normalize the time array t = time[inds] t = (t - t[0]) / (t[-1] - t[0]) # Cumulative arrays T = np.append(T, time[inds]) Y = np.append(Y, flux[inds]) # Polynomial vector polyvec = np.array( [t ** o for o in range(0, poly_order + 1)]).T # Update the design matrix with this chunk lzeros = np.zeros((len(t), i * (poly_order + 1))) rzeros = np.zeros( (len(t), sz - (i + 1) * (poly_order + 1))) chunk = np.hstack((trnvec, lzeros, polyvec, rzeros)) X = np.vstack((X, chunk)) # Get the relative depth A = np.dot(X.T, X) B = np.dot(X.T, Y) C = np.linalg.solve(A, B) rec_depth = C[0] # Get the uncertainties sig = 1.4826 * \ np.nanmedian(np.abs(flux - np.nanmedian(flux)) ) / np.nanmedian(flux) cov = sig ** 2 * np.linalg.solve(A, np.eye(A.shape[0])) err = np.sqrt(np.diag(cov)) rec_depth_err = err[0] # Store the results self.inject.update( {'rec_depth%s' % tag: rec_depth, 'rec_depth_err%s' % tag: rec_depth_err}) # Store the detrended, folded data D = (Y - np.dot(C[1:], X[:, 1:].T) + np.nanmedian(Y)) / np.nanmedian(Y) T = (T - t0 - per / 2.) % per - per / 2. self.inject.update( {'fold_time%s' % tag: T, 'fold_flux%s' % tag: D}) def plot_final(self, ax): ''' Plots the injection recovery results. ''' from mpl_toolkits.axes_grid.inset_locator import inset_axes ax.axis('off') ax1 = inset_axes(ax, width="47%", height="100%", loc=6) ax2 = inset_axes(ax, width="47%", height="100%", loc=7) # Plot the recovered folded transits ax1.plot(self.inject['fold_time'], self.inject['fold_flux'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time']), np.max( self.inject['fold_time']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax1.plot(x, y, 'r-') ax1.annotate('INJECTED', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax1.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax1.margins(0, None) ax1.ticklabel_format(useOffset=False) # Plot the recovered folded transits (control) ax2.plot(self.inject['fold_time_control'], self.inject['fold_flux_control'], 'k.', alpha=0.3) x = np.linspace(np.min(self.inject['fold_time_control']), np.max( self.inject['fold_time_control']), 500) try: y = Transit( x, t0=0., per=self.inject['per'], dur=self.inject['dur'], depth=self.inject['rec_depth_control']) except: # Log the error, and carry on exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): l = line.replace('\n', '') log.error(l) y = np.ones_like(x) * np.nan ax2.plot(x, y, 'r-') ax2.annotate('CONTROL', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax2.annotate('True depth:\nRecovered depth:', xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.annotate('%.6f\n%.6f' % (self.inject['depth'], self.inject['rec_depth_control']), xy=(0.4, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=6, color='r') ax2.margins(0, None) ax2.ticklabel_format(useOffset=False) N = int(0.995 * len(self.inject['fold_flux_control'])) hi, lo = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])][[N, -N]] fsort = self.inject['fold_flux_control'][np.argsort( self.inject['fold_flux_control'])] pad = (hi - lo) * 0.2 ylim = (lo - 2 * pad, hi + pad) ax2.set_ylim(ylim) ax1.set_ylim(ylim) ax2.set_yticklabels([]) for tick in ax1.get_xticklabels() + ax1.get_yticklabels() + \ ax2.get_xticklabels(): tick.set_fontsize(5) def finalize(self): ''' Calls the depth recovery routine at the end of the de-trending step. ''' super(Injection, self).finalize() self.recover_depth() return Injection(ID, inject=inject, parent_class=inj_model, make_fits=make_fits, **kwargs)
Instance of :attr:`model_type` with id :attr:`object_id`. def object(self, session): '''Instance of :attr:`model_type` with id :attr:`object_id`.''' if not hasattr(self, '_object'): pkname = self.model_type._meta.pkname() query = session.query(self.model_type).filter(**{pkname: self.object_id}) return query.items(callback=self.__set_object) else: return self._object
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int ID: The target ID number :param int season: The observing season. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str saturated_aperture_name: The name of the aperture to use if the target is \ saturated. Default :py:obj:`None` :param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None` :param bool download_only: Download raw TPF and return? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated if flux is within \ this fraction of the pixel well depth. Default :py:obj:`None` :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \ computing the model. Default :py:obj:`None` def GetData(ID, season = None, cadence = 'lc', clobber = False, delete_raw = False, aperture_name = None, saturated_aperture_name = None, max_pixels = None, download_only = False, saturation_tolerance = None, bad_bits = None, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int ID: The target ID number :param int season: The observing season. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str saturated_aperture_name: The name of the aperture to use if the target is \ saturated. Default :py:obj:`None` :param int max_pixels: Maximum number of pixels in the TPF. Default :py:obj:`None` :param bool download_only: Download raw TPF and return? Default :py:obj:`False` :param float saturation_tolerance: Target is considered saturated if flux is within \ this fraction of the pixel well depth. Default :py:obj:`None` :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider outliers when \ computing the model. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
Return `neighbors` random bright stars on the same module as `EPIC`. :param int ID: The target ID number :param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default None :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None` :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None` def GetNeighbors(ID, model = None, neighbors = None, mag_range = None, cdpp_range = None, aperture_name = None, cadence = 'lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int ID: The target ID number :param str model: The :py:obj:`everest` model name. Only used when imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default None :param str aperture_name: The name of the aperture to use. Select `custom` to call \ :py:func:`GetCustomAperture`. Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. Default :py:obj:`None` :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. Default :py:obj:`None` ''' raise NotImplementedError('This mission is not yet supported.')
Returns the `time` and `flux` for a given EPIC `ID` and a given `pipeline` name. def get(ID, pipeline='everest2', campaign=None): ''' Returns the `time` and `flux` for a given EPIC `ID` and a given `pipeline` name. ''' log.info('Downloading %s light curve for %d...' % (pipeline, ID)) # Dev version hack if EVEREST_DEV: if pipeline.lower() == 'everest2' or pipeline.lower() == 'k2sff': from . import Season, TargetDirectory, FITSFile if campaign is None: campaign = Season(ID) fits = os.path.join(TargetDirectory( ID, campaign), FITSFile(ID, campaign)) newdir = os.path.join(KPLR_ROOT, "data", "everest", str(ID)) if not os.path.exists(newdir): os.makedirs(newdir) if os.path.exists(fits): shutil.copy(fits, newdir) if pipeline.lower() == 'everest2': s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'everest1': s = k2plr.EVEREST(ID, version=1, sci_campaign=campaign) time = s.time flux = s.flux elif pipeline.lower() == 'k2sff': s = k2plr.K2SFF(ID, sci_campaign=campaign) time = s.time flux = s.fcor # Normalize to the median flux s = k2plr.EVEREST(ID, version=2, sci_campaign=campaign) flux *= np.nanmedian(s.flux) elif pipeline.lower() == 'k2sc': s = k2plr.K2SC(ID, sci_campaign=campaign) time = s.time flux = s.pdcflux elif pipeline.lower() == 'raw': s = k2plr.EVEREST(ID, version=2, raw=True, sci_campaign=campaign) time = s.time flux = s.flux else: raise ValueError('Invalid pipeline: `%s`.' % pipeline) return time, flux
Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. def plot(ID, pipeline='everest2', show=True, campaign=None): ''' Plots the de-trended flux for the given EPIC `ID` and for the specified `pipeline`. ''' # Get the data time, flux = get(ID, pipeline=pipeline, campaign=campaign) # Remove nans mask = np.where(np.isnan(flux))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Show the CDPP from .k2 import CDPP ax.annotate('%.2f ppm' % CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (BJD - 2454833)", fontsize=16) ax.set_ylabel("%s Flux" % pipeline.upper(), fontsize=16) fig.canvas.set_window_title("%s: EPIC %d" % (pipeline.upper(), ID)) if show: pl.show() pl.close() else: return fig, ax
Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". def get_cdpp(campaign, pipeline='everest2'): ''' Computes the CDPP for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". ''' # Imports from .k2 import CDPP from .utils import GetK2Campaign # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the CDPP try: _, flux = get(EPIC, pipeline=pipeline, campaign=campaign) mask = np.where(np.isnan(flux))[0] flux = np.delete(flux, mask) cdpp = CDPP(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>15.3f}".format(EPIC, 0), file=outfile) continue # Log to file print("{:>09d} {:>15.3f}".format(EPIC, cdpp), file=outfile)
Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 def get_outliers(campaign, pipeline='everest2', sigma=5): ''' Computes the number of outliers for a given `campaign` and a given `pipeline`. Stores the results in a file under "/missions/k2/tables/". :param int sigma: The sigma level at which to clip outliers. Default 5 ''' # Imports from .utils import GetK2Campaign client = k2plr.API() # Check pipeline assert pipeline.lower() in Pipelines, 'Invalid pipeline: `%s`.' % pipeline # Create file if it doesn't exist file = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.out' % (int(campaign), pipeline)) if not os.path.exists(file): open(file, 'a').close() # Get all EPIC stars stars = GetK2Campaign(campaign, epics_only=True) nstars = len(stars) # Remove ones we've done with warnings.catch_warnings(): warnings.simplefilter("ignore") done = np.loadtxt(file, dtype=float) if len(done): done = [int(s) for s in done[:, 0]] stars = list(set(stars) - set(done)) n = len(done) + 1 # Open the output file with open(file, 'a', 1) as outfile: # Loop over all to get the CDPP for EPIC in stars: # Progress sys.stdout.write('\rRunning target %d/%d...' % (n, nstars)) sys.stdout.flush() n += 1 # Get the number of outliers try: time, flux = get(EPIC, pipeline=pipeline, campaign=campaign) # Get the raw K2 data tpf = os.path.join(KPLR_ROOT, "data", "k2", "target_pixel_files", "%09d" % EPIC, "ktwo%09d-c%02d_lpd-targ.fits.gz" % (EPIC, campaign)) if not os.path.exists(tpf): client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: k2_qual = np.array(f[1].data.field('QUALITY'), dtype=int) k2_time = np.array( f[1].data.field('TIME'), dtype='float64') mask = [] for b in [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17]: mask += list(np.where(k2_qual & 2 ** (b - 1))[0]) mask = np.array(sorted(list(set(mask)))) # Fill in missing cadences, if any tol = 0.005 if not ((len(time) == len(k2_time)) and (np.abs(time[0] - k2_time[0]) < tol) and (np.abs(time[-1] - k2_time[-1]) < tol)): ftmp = np.zeros_like(k2_time) * np.nan j = 0 for i, t in enumerate(k2_time): if np.abs(time[j] - t) < tol: ftmp[i] = flux[j] j += 1 if j == len(time) - 1: break flux = ftmp # Remove flagged cadences flux = np.delete(flux, mask) # Remove nans nanmask = np.where(np.isnan(flux))[0] flux = np.delete(flux, nanmask) # Iterative sigma clipping inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) f = SavGol(np.delete(flux, inds)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.append(inds, np.where( (f > med + sigma * MAD) | (f < med - sigma * MAD))[0]) nout = len(inds) ntot = len(flux) except (urllib.error.HTTPError, urllib.error.URLError, TypeError, ValueError, IndexError): print("{:>09d} {:>5d} {:>5d}".format( EPIC, -1, -1), file=outfile) continue # Log to file print("{:>09d} {:>5d} {:>5d}".format( EPIC, nout, ntot), file=outfile)
Returns the name of the current :py:class:`Detrender` subclass. def name(self): ''' Returns the name of the current :py:class:`Detrender` subclass. ''' if self.cadence == 'lc': return self.__class__.__name__ else: return '%s.sc' % self.__class__.__name__
Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. def cv_precompute(self, mask, b): ''' Pre-compute the matrices :py:obj:`A` and :py:obj:`B` (cross-validation step only) for chunk :py:obj:`b`. ''' # Get current chunk and mask outliers m1 = self.get_masked_chunk(b) flux = self.fraw[m1] K = GetCovariance(self.kernel, self.kernel_params, self.time[m1], self.fraw_err[m1]) med = np.nanmedian(flux) # Now mask the validation set M = lambda x, axis = 0: np.delete(x, mask, axis=axis) m2 = M(m1) mK = M(M(K, axis=0), axis=1) f = M(flux) - med # Pre-compute the matrices A = [None for i in range(self.pld_order)] B = [None for i in range(self.pld_order)] for n in range(self.pld_order): # Only compute up to the current PLD order if self.lam_idx >= n: X2 = self.X(n, m2) X1 = self.X(n, m1) A[n] = np.dot(X2, X2.T) B[n] = np.dot(X1, X2.T) del X1, X2 if self.transit_model is None: C = 0 else: C = np.zeros((len(m2), len(m2))) mean_transit_model = med * \ np.sum([tm.depth * tm(self.time[m2]) for tm in self.transit_model], axis=0) f -= mean_transit_model for tm in self.transit_model: X2 = tm(self.time[m2]).reshape(-1, 1) C += tm.var_depth * np.dot(X2, X2.T) del X2 return A, B, C, mK, f, m1, m2
Compute the model (cross-validation step only) for chunk :py:obj:`b`. def cv_compute(self, b, A, B, C, mK, f, m1, m2): ''' Compute the model (cross-validation step only) for chunk :py:obj:`b`. ''' A = np.sum([l * a for l, a in zip(self.lam[b], A) if l is not None], axis=0) B = np.sum([l * b for l, b in zip(self.lam[b], B) if l is not None], axis=0) W = np.linalg.solve(mK + A + C, f) if self.transit_model is None: model = np.dot(B, W) else: w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W) for n, l in enumerate(self.lam[b]) if l is not None]) model = np.dot(np.hstack( [self.X(n, m1) for n, l in enumerate(self.lam[b]) if l is not None]), w_pld) model -= np.nanmedian(model) return model
Performs iterative sigma clipping to get outliers. def get_outliers(self): ''' Performs iterative sigma clipping to get outliers. ''' log.info("Clipping outliers...") log.info('Iter %d/%d: %d outliers' % (0, self.oiter, len(self.outmask))) def M(x): return np.delete(x, np.concatenate( [self.nanmask, self.badmask, self.transitmask]), axis=0) t = M(self.time) outmask = [np.array([-1]), np.array(self.outmask)] # Loop as long as the last two outlier arrays aren't equal while not np.array_equal(outmask[-2], outmask[-1]): # Check if we've done this too many times if len(outmask) - 1 > self.oiter: log.error('Maximum number of iterations in ' + '``get_outliers()`` exceeded. Skipping...') break # Check if we're going in circles if np.any([np.array_equal(outmask[-1], i) for i in outmask[:-1]]): log.error('Function ``get_outliers()`` ' + 'is going in circles. Skipping...') break # Compute the model to get the flux self.compute() # Get the outliers f = SavGol(M(self.flux)) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) inds = np.where((f > med + self.osigma * MAD) | (f < med - self.osigma * MAD))[0] # Project onto unmasked time array inds = np.array([np.argmax(self.time == t[i]) for i in inds]) self.outmask = np.array(inds, dtype=int) # Add them to the running list outmask.append(np.array(inds)) # Log log.info('Iter %d/%d: %d outliers' % (len(outmask) - 2, self.oiter, len(self.outmask)))
Returns the index of :py:attr:`self.lambda_arr` that minimizes the validation scatter in the segment with minimum at the lowest value of :py:obj:`lambda`, with fractional tolerance :py:attr:`self.leps`. :param numpy.ndarray validation: The scatter in the validation set \ as a function of :py:obj:`lambda` def optimize_lambda(self, validation): ''' Returns the index of :py:attr:`self.lambda_arr` that minimizes the validation scatter in the segment with minimum at the lowest value of :py:obj:`lambda`, with fractional tolerance :py:attr:`self.leps`. :param numpy.ndarray validation: The scatter in the validation set \ as a function of :py:obj:`lambda` ''' maxm = 0 minr = len(validation) for n in range(validation.shape[1]): # The index that minimizes the scatter for this segment m = np.nanargmin(validation[:, n]) if m > maxm: # The largest of the `m`s. maxm = m # The largest index with validation scatter within # `self.leps` of the minimum for this segment r = np.where((validation[:, n] - validation[m, n]) / validation[m, n] <= self.leps)[0][-1] if r < minr: # The smallest of the `r`s minr = r return min(maxm, minr)
Cross-validate to find the optimal value of :py:obj:`lambda`. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \ plot the cross-validation results. :param str info: The label to show in the bottom right-hand corner \ of the plot. Default `''` def cross_validate(self, ax, info=''): ''' Cross-validate to find the optimal value of :py:obj:`lambda`. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance to \ plot the cross-validation results. :param str info: The label to show in the bottom right-hand corner \ of the plot. Default `''` ''' # Loop over all chunks ax = np.atleast_1d(ax) for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) med_training = np.zeros_like(self.lambda_arr) med_validation = np.zeros_like(self.lambda_arr) # Mask for current chunk m = self.get_masked_chunk(b) # Check that we have enough data if len(m) < 3 * self.cdivs: self.cdppv_arr[b] = np.nan self.lam[b][self.lam_idx] = 0. log.info( "Insufficient data to run cross-validation on this chunk.") continue # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(flux) # The precision in the validation set validation = [[] for k, _ in enumerate(self.lambda_arr)] # The precision in the training set training = [[] for k, _ in enumerate(self.lambda_arr)] # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # Loop over the different masks for i, mask in enumerate(masks): log.info("Section %d/%d..." % (i + 1, len(masks))) # Pre-compute (training set) pre_t = self.cv_precompute([], b) # Pre-compute (validation set) pre_v = self.cv_precompute(mask, b) # Iterate over lambda for k, lam in enumerate(self.lambda_arr): # Update the lambda matrix self.lam[b][self.lam_idx] = lam # Training set model = self.cv_compute(b, *pre_t) training[k].append( self.fobj(flux - model, med, time, gp, mask)) # Validation set model = self.cv_compute(b, *pre_v) validation[k].append( self.fobj(flux - model, med, time, gp, mask)) # Finalize training = np.array(training) validation = np.array(validation) for k, _ in enumerate(self.lambda_arr): # Take the mean med_validation[k] = np.nanmean(validation[k]) med_training[k] = np.nanmean(training[k]) # Compute best model i = self.optimize_lambda(validation) v_best = med_validation[i] t_best = med_training[i] self.cdppv_arr[b] = v_best / t_best self.lam[b][self.lam_idx] = self.lambda_arr[i] log.info("Found optimum solution at log(lambda) = %.1f." % np.log10(self.lam[b][self.lam_idx])) # Plotting: There's not enough space in the DVS to show the # cross-val results for more than three light curve segments. if len(self.breakpoints) <= 3: # Plotting hack: first x tick will be -infty lambda_arr = np.array(self.lambda_arr) lambda_arr[0] = 10 ** (np.log10(lambda_arr[1]) - 3) # Plot cross-val for n in range(len(masks)): ax[b].plot(np.log10(lambda_arr), validation[:, n], 'r-', alpha=0.3) ax[b].plot(np.log10(lambda_arr), med_training, 'b-', lw=1., alpha=1) ax[b].plot(np.log10(lambda_arr), med_validation, 'r-', lw=1., alpha=1) ax[b].axvline(np.log10(self.lam[b][self.lam_idx]), color='k', ls='--', lw=0.75, alpha=0.75) ax[b].axhline(v_best, color='k', ls='--', lw=0.75, alpha=0.75) ax[b].set_ylabel(r'Scatter (ppm)', fontsize=5) hi = np.max(validation[0]) lo = np.min(training) rng = (hi - lo) ax[b].set_ylim(lo - 0.15 * rng, hi + 0.15 * rng) if rng > 2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP) ax[b].get_yaxis().set_major_locator( MaxNLocator(4, integer=True)) elif rng > 0.2: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP1F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) else: ax[b].get_yaxis().set_major_formatter(Formatter.CDPP2F) ax[b].get_yaxis().set_major_locator(MaxNLocator(4)) # Fix the x ticks xticks = [np.log10(lambda_arr[0])] + list(np.linspace( np.log10(lambda_arr[1]), np.log10(lambda_arr[-1]), 6)) ax[b].set_xticks(xticks) ax[b].set_xticklabels(['' for x in xticks]) pad = 0.01 * \ (np.log10(lambda_arr[-1]) - np.log10(lambda_arr[0])) ax[b].set_xlim(np.log10(lambda_arr[0]) - pad, np.log10(lambda_arr[-1]) + pad) ax[b].annotate('%s.%d' % (info, b), xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=7, alpha=0.25, fontweight='bold') # Finally, compute the model self.compute() # Tidy up if len(ax) == 2: ax[0].xaxis.set_ticks_position('top') for axis in ax[1:]: axis.spines['top'].set_visible(False) axis.xaxis.set_ticks_position('bottom') if len(self.breakpoints) <= 3: # A hack to mark the first xtick as -infty labels = ['%.1f' % x for x in xticks] labels[0] = r'$-\infty$' ax[-1].set_xticklabels(labels) ax[-1].set_xlabel(r'Log $\Lambda$', fontsize=5) else: # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r.') ax[0].plot(bs + 1, [np.log10(self.lam[b][self.lam_idx]) for b in bs], 'r-', alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP and approximate validation CDPP cdpp_arr = self.get_cdpp_arr() cdppv_arr = self.cdppv_arr * cdpp_arr ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].plot(bs + 1, cdppv_arr, 'r.') ax[1].plot(bs + 1, cdppv_arr, 'r-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) if len(self.breakpoints) < 15: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1)) else: ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1, 2))
Computes the ideal y-axis limits for the light curve plot. Attempts to set the limits equal to those of the raw light curve, but if more than 1% of the flux lies either above or below these limits, auto-expands to include those points. At the end, adds 5% padding to both the top and the bottom. def get_ylim(self): ''' Computes the ideal y-axis limits for the light curve plot. Attempts to set the limits equal to those of the raw light curve, but if more than 1% of the flux lies either above or below these limits, auto-expands to include those points. At the end, adds 5% padding to both the top and the bottom. ''' bn = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) fraw = np.delete(self.fraw, bn) lo, hi = fraw[np.argsort(fraw)][[3, -3]] flux = np.delete(self.flux, bn) fsort = flux[np.argsort(flux)] if fsort[int(0.01 * len(fsort))] < lo: lo = fsort[int(0.01 * len(fsort))] if fsort[int(0.99 * len(fsort))] > hi: hi = fsort[int(0.99 * len(fsort))] pad = (hi - lo) * 0.05 ylim = (lo - pad, hi + pad) return ylim
Plots the current light curve. This is called at several stages to plot the de-trending progress as a function of the different *PLD* orders. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance :param str info_left: Information to display at the left of the \ plot. Default `''` :param str info_right: Information to display at the right of the \ plot. Default `''` :param str color: The color of the data points. Default `'b'` def plot_lc(self, ax, info_left='', info_right='', color='b'): ''' Plots the current light curve. This is called at several stages to plot the de-trending progress as a function of the different *PLD* orders. :param ax: The current :py:obj:`matplotlib.pyplot` axis instance :param str info_left: Information to display at the left of the \ plot. Default `''` :param str info_right: Information to display at the right of the \ plot. Default `''` :param str color: The color of the data points. Default `'b'` ''' # Plot if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(self.apply_mask(self.time), self.apply_mask(self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.5) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.5) else: ax.plot(self.apply_mask(self.time), self.apply_mask( self.flux), ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.plot(self.time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color=color, markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) ylim = self.get_ylim() # Plot the outliers, but not the NaNs badmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[self.outmask] def O2(x): return x[badmask] if self.cadence == 'lc': ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.5) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.25) else: ax.plot(O1(self.time), O1(self.flux), ls='none', color="#777777", marker='.', markersize=2, alpha=0.25, zorder=-1) ax.plot(O2(self.time), O2(self.flux), 'r.', markersize=2, alpha=0.125, zorder=-1) for i in np.where(self.flux < ylim[0])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(self.flux > ylim[1])[0]: if i in badmask: color = "#ffcccc" elif i in self.outmask: color = "#cccccc" elif i in self.nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(self.time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Plot the breakpoints for brkpt in self.breakpoints[:-1]: if len(self.breakpoints) <= 5: ax.axvline(self.time[brkpt], color='r', ls='--', alpha=0.5) else: ax.axvline(self.time[brkpt], color='r', ls='-', alpha=0.025) # Appearance if len(self.cdpp_arr) == 2: ax.annotate('%.2f ppm' % self.cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate('%.2f ppm' % self.cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=10) elif len(self.cdpp_arr) < 6: for n in range(len(self.cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % self.cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=8) else: ax.annotate('%.2f ppm' % self.cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10) ax.annotate(info_right, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.annotate(info_left, xy=(0.02, 0.025), xycoords='axes fraction', ha='left', va='bottom', fontsize=8) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=5) ax.margins(0.01, 0.1) ax.set_ylim(*ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
Plots the final de-trended light curve. def plot_final(self, ax): ''' Plots the final de-trended light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if (self.cadence == 'lc') or (len(self.time) < 4000): ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.3) else: ax.plot(M(self.time), M(self.flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0) # Plot the GP (long cadence only) if self.cadence == 'lc': gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) y += med ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5) # Compute the CDPP of the GP-detrended flux self.cdppg = self._mission.CDPP(self.apply_mask( self.flux - y + med), cadence=self.cadence) else: # We're not going to calculate this self.cdppg = 0. # Appearance ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(self.flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux)
Plots the final CBV-corrected light curve. def plot_cbv(self, ax, flux, info, show_cbv=False): ''' Plots the final CBV-corrected light curve. ''' # Plot the light curve bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) def M(x): return np.delete(x, bnmask) if self.cadence == 'lc': ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.45) else: ax.plot(M(self.time), M(flux), ls='none', marker='.', color='k', markersize=2, alpha=0.03, zorder=-1) ax.set_rasterization_zorder(0) # Hack: Plot invisible first and last points to ensure # the x axis limits are the # same in the other plots, where we also plot outliers! ax.plot(self.time[0], np.nanmedian(M(flux)), marker='.', alpha=0) ax.plot(self.time[-1], np.nanmedian(M(flux)), marker='.', alpha=0) # Show CBV fit? if show_cbv: ax.plot(self.time, self._mission.FitCBVs( self) + np.nanmedian(flux), 'r-', alpha=0.2) # Appearance ax.annotate(info, xy=(0.98, 0.025), xycoords='axes fraction', ha='right', va='bottom', fontsize=10, alpha=0.5, fontweight='bold') ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux flux = np.delete(flux, bnmask) N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] fsort = flux[np.argsort(flux)] pad = (hi - lo) * 0.2 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) ax.set_xlabel(r'Time (%s)' % self._mission.TIMEUNITS, fontsize=9) for tick in ax.get_xticklabels() + ax.get_yticklabels(): tick.set_fontsize(7)
Loads the target pixel file. def load_tpf(self): ''' Loads the target pixel file. ''' if not self.loaded: if self._data is not None: data = self._data else: data = self._mission.GetData( self.ID, season=self.season, cadence=self.cadence, clobber=self.clobber_tpf, aperture_name=self.aperture_name, saturated_aperture_name=self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance=self.saturation_tolerance, get_hires=self.get_hires, get_nearby=self.get_nearby) if data is None: raise Exception("Unable to retrieve target data.") self.cadn = data.cadn self.time = data.time self.model = np.zeros_like(self.time) self.fpix = data.fpix self.fraw = np.sum(self.fpix, axis=1) self.fpix_err = data.fpix_err self.fraw_err = np.sqrt(np.sum(self.fpix_err ** 2, axis=1)) self.nanmask = data.nanmask self.badmask = data.badmask self.transitmask = np.array([], dtype=int) self.outmask = np.array([], dtype=int) self.aperture = data.aperture self.aperture_name = data.aperture_name self.apertures = data.apertures self.quality = data.quality self.Xpos = data.Xpos self.Ypos = data.Ypos self.mag = data.mag self.pixel_images = data.pixel_images self.nearby = data.nearby self.hires = data.hires self.saturated = data.saturated self.meta = data.meta self.bkg = data.bkg # Update the last breakpoint to the correct value self.breakpoints[-1] = len(self.time) - 1 # Get PLD normalization self.get_norm() self.loaded = True
Loads a saved version of the model. def load_model(self, name=None): ''' Loads a saved version of the model. ''' if self.clobber: return False if name is None: name = self.name file = os.path.join(self.dir, '%s.npz' % name) if os.path.exists(file): if not self.is_parent: log.info("Loading '%s.npz'..." % name) try: data = np.load(file) for key in data.keys(): try: setattr(self, key, data[key][()]) except NotImplementedError: pass # HACK: Backwards compatibility. Previous version stored # the CDPP in the `cdpp6` # and `cdpp6_arr` attributes. Let's move them over. if hasattr(self, 'cdpp6'): self.cdpp = self.cdpp6 del self.cdpp6 if hasattr(self, 'cdpp6_arr'): self.cdpp_arr = np.array(self.cdpp6_arr) del self.cdpp6_arr if hasattr(self, 'gppp'): self.cdppg = self.gppp del self.gppp # HACK: At one point we were saving the figure instances, # so loading the .npz # opened a plotting window. I don't think this is the case # any more, so this # next line should be removed in the future... pl.close() return True except: log.warn("Error loading '%s.npz'." % name) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.warn(ln) os.rename(file, file + '.bad') if self.is_parent: raise Exception( 'Unable to load `%s` model for target %d.' % (self.name, self.ID)) return False
Saves all of the de-trending information to disk in an `npz` file and saves the DVS as a `pdf`. def save_model(self): ''' Saves all of the de-trending information to disk in an `npz` file and saves the DVS as a `pdf`. ''' # Save the data log.info("Saving data to '%s.npz'..." % self.name) d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) d.pop('transit_model', None) d.pop('_transit_model', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d) # Save the DVS pdf = PdfPages(os.path.join(self.dir, self.name + '.pdf')) pdf.savefig(self.dvs.fig) pl.close(self.dvs.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close()
A custom exception handler. :param pdb: If :py:obj:`True`, enters PDB post-mortem \ mode for debugging. def exception_handler(self, pdb): ''' A custom exception handler. :param pdb: If :py:obj:`True`, enters PDB post-mortem \ mode for debugging. ''' # Grab the exception exctype, value, tb = sys.exc_info() # Log the error and create a .err file errfile = os.path.join(self.dir, self.name + '.err') with open(errfile, 'w') as f: for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) for line in traceback.format_tb(tb): ln = line.replace('\n', '') log.error(ln) print(ln, file=f) # Re-raise? if pdb: raise
Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the covariance matrix for the regression. def update_gp(self): ''' Calls :py:func:`gp.GetKernelParams` to optimize the GP and obtain the covariance matrix for the regression. ''' self.kernel_params = GetKernelParams(self.time, self.flux, self.fraw_err, mask=self.mask, guess=self.kernel_params, kernel=self.kernel, giter=self.giter, gmaxf=self.gmaxf)
Initializes the covariance matrix with a guess at the GP kernel parameters. def init_kernel(self): ''' Initializes the covariance matrix with a guess at the GP kernel parameters. ''' if self.kernel_params is None: X = self.apply_mask(self.fpix / self.flux.reshape(-1, 1)) y = self.apply_mask(self.flux) - np.dot(X, np.linalg.solve( np.dot(X.T, X), np.dot(X.T, self.apply_mask(self.flux)))) white = np.nanmedian([np.nanstd(c) for c in Chunks(y, 13)]) amp = self.gp_factor * np.nanstd(y) tau = 30.0 if self.kernel == 'Basic': self.kernel_params = [white, amp, tau] elif self.kernel == 'QuasiPeriodic': self.kernel_params = [white, amp, 1., 20.]
Runs the de-trending step. def run(self): ''' Runs the de-trending step. ''' try: # Load raw data log.info("Loading target data...") self.load_tpf() self.mask_planets() self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.init_kernel() M = self.apply_mask(np.arange(len(self.time))) self.cdppr_arr = self.get_cdpp_arr() self.cdpp_arr = np.array(self.cdppr_arr) self.cdppv_arr = np.array(self.cdppr_arr) self.cdppr = self.get_cdpp() self.cdpp = self.cdppr self.cdppv = self.cdppr log.info("%s (Raw): CDPP = %s" % (self.name, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='Raw', color='k') # Loop for n in range(self.pld_order): self.lam_idx += 1 self.get_outliers() if n > 0 and self.optimize_gp: self.update_gp() self.cross_validate(self.dvs.right(), info='CV%d' % n) self.cdpp_arr = self.get_cdpp_arr() self.cdppv_arr *= self.cdpp_arr self.cdpp = self.get_cdpp() self.cdppv = np.nanmean(self.cdppv_arr) log.info("%s (%d/%d): CDPP = %s" % (self.name, n + 1, self.pld_order, self.cdpps)) self.plot_lc(self.dvs.left(), info_right='LC%d' % ( n + 1), info_left='%d outliers' % len(self.outmask)) # Save self.finalize() self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)
Correct the light curve with the CBVs, generate a cover page for the DVS figure, and produce a FITS file for publication. def publish(self, **kwargs): ''' Correct the light curve with the CBVs, generate a cover page for the DVS figure, and produce a FITS file for publication. ''' try: # HACK: Force these params for publication self.cbv_win = 999 self.cbv_order = 3 self.cbv_num = 1 # Get the CBVs self._mission.GetTargetCBVs(self) # Plot the final corrected light curve cbv = CBV() self.plot_info(cbv) self.plot_cbv(cbv.body(), self.fcor, 'Corrected') self.plot_cbv(cbv.body(), self.flux, 'De-trended', show_cbv=True) self.plot_cbv(cbv.body(), self.fraw, 'Raw') # Save the CBV pdf pdf = PdfPages(os.path.join(self.dir, 'cbv.pdf')) pdf.savefig(cbv.fig) pl.close(cbv.fig) d = pdf.infodict() d['Title'] = 'EVEREST: %s de-trending of %s %d' % ( self.name, self._mission.IDSTRING, self.ID) d['Author'] = 'Rodrigo Luger' pdf.close() # Now merge the two PDFs assert os.path.exists(os.path.join( self.dir, self.name + '.pdf')), \ "Unable to locate %s.pdf." % self.name output = PdfFileWriter() pdfOne = PdfFileReader(os.path.join(self.dir, 'cbv.pdf')) pdfTwo = PdfFileReader(os.path.join(self.dir, self.name + '.pdf')) # Add the CBV page output.addPage(pdfOne.getPage(0)) # Add the original DVS page output.addPage(pdfTwo.getPage(pdfTwo.numPages - 1)) # Write the final PDF outputStream = open(os.path.join(self.dir, self._mission.DVSFile( self.ID, self.season, self.cadence)), "wb") output.write(outputStream) outputStream.close() os.remove(os.path.join(self.dir, 'cbv.pdf')) # Make the FITS file MakeFITS(self) except: self.exception_handler(self.debug)
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param tuple cdpp_range: If :py:obj:`parent_model` is set, \ neighbors are selected only if \ their de-trended CDPPs fall within this range. Default `None` :param tuple mag_range: Only select neighbors whose magnitudes are \ within this range. Default (11., 13.) :param int neighbors: The number of neighboring stars to use in \ the de-trending. The higher this number, the more signals \ there are and hence the more de-trending information there is. \ However, the neighboring star signals are regularized together \ with the target's signals, so adding too many neighbors will \ inevitably reduce the contribution of the target's own \ signals, which may reduce performance. Default `10` :param str parent_model: By default, :py:class:`nPLD` is run in \ stand-alone mode. The neighbor signals are computed directly \ from their TPFs, so there is no need to have run *PLD* on them \ beforehand. However, if :py:obj:`parent_model` \ is set, :py:class:`nPLD` will use information from the \ :py:obj:`parent_model` model of each neighboring star when \ de-trending. This is particularly useful for identifying \ outliers in the neighbor signals and preventing them from \ polluting the current target. Setting :py:obj:`parent_model` \ to :py:class:`rPLD`, for instance, will use the \ outlier information in the :py:class:`rPLD` model of the \ neighbors (this must have been run ahead of time). \ Note, however, that tests with *K2* data show that including \ outliers in the neighbor signals actually \ *improves* the performance, since many of these outliers \ are associated with events such as thruster firings and are \ present in all light curves, and therefore *help* in the \ de-trending. Default `None` ..note :: Optionally, the :py:obj:`neighbors` may be specified \ directly as a list of target IDs to use. \ In this case, users may also provide a list of \ :py:class:`everest.utils.DataContainer` instances \ corresponding to each of the neighbors in the \ :py:obj:`neighbors_data` kwarg. def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param tuple cdpp_range: If :py:obj:`parent_model` is set, \ neighbors are selected only if \ their de-trended CDPPs fall within this range. Default `None` :param tuple mag_range: Only select neighbors whose magnitudes are \ within this range. Default (11., 13.) :param int neighbors: The number of neighboring stars to use in \ the de-trending. The higher this number, the more signals \ there are and hence the more de-trending information there is. \ However, the neighboring star signals are regularized together \ with the target's signals, so adding too many neighbors will \ inevitably reduce the contribution of the target's own \ signals, which may reduce performance. Default `10` :param str parent_model: By default, :py:class:`nPLD` is run in \ stand-alone mode. The neighbor signals are computed directly \ from their TPFs, so there is no need to have run *PLD* on them \ beforehand. However, if :py:obj:`parent_model` \ is set, :py:class:`nPLD` will use information from the \ :py:obj:`parent_model` model of each neighboring star when \ de-trending. This is particularly useful for identifying \ outliers in the neighbor signals and preventing them from \ polluting the current target. Setting :py:obj:`parent_model` \ to :py:class:`rPLD`, for instance, will use the \ outlier information in the :py:class:`rPLD` model of the \ neighbors (this must have been run ahead of time). \ Note, however, that tests with *K2* data show that including \ outliers in the neighbor signals actually \ *improves* the performance, since many of these outliers \ are associated with events such as thruster firings and are \ present in all light curves, and therefore *help* in the \ de-trending. Default `None` ..note :: Optionally, the :py:obj:`neighbors` may be specified \ directly as a list of target IDs to use. \ In this case, users may also provide a list of \ :py:class:`everest.utils.DataContainer` instances \ corresponding to each of the neighbors in the \ :py:obj:`neighbors_data` kwarg. ''' # Get neighbors self.parent_model = kwargs.get('parent_model', None) neighbors = kwargs.get('neighbors', 10) neighbors_data = kwargs.get('neighbors_data', None) if hasattr(neighbors, '__len__'): self.neighbors = neighbors else: num_neighbors = neighbors self.neighbors = \ self._mission.GetNeighbors(self.ID, season=self.season, cadence=self.cadence, model=self.parent_model, neighbors=num_neighbors, mag_range=kwargs.get( 'mag_range', (11., 13.)), cdpp_range=kwargs.get( 'cdpp_range', None), aperture_name=self.aperture_name) if len(self.neighbors): if len(self.neighbors) < num_neighbors: log.warn("%d neighbors requested, but only %d found." % (num_neighbors, len(self.neighbors))) elif num_neighbors > 0: log.warn("No neighbors found! Running standard PLD...") for n, neighbor in enumerate(self.neighbors): log.info("Loading data for neighboring target %d..." % neighbor) if neighbors_data is not None: data = neighbors_data[n] data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) elif self.parent_model is not None and self.cadence == 'lc': # We load the `parent` model. The advantage here is # that outliers have properly been identified and masked. # I haven't tested this on short # cadence data, so I'm going to just forbid it... data = eval(self.parent_model)( neighbor, mission=self.mission, is_parent=True) else: # We load the data straight from the TPF. Much quicker, # since no model must be run in advance. Downside is we # don't know where the outliers are. But based # on tests with K2 data, the de-trending is actually # *better* if the outliers are # included! These are mostly thruster fire events and other # artifacts common to # all the stars, so it makes sense that we might want # to keep them in the design matrix. data = self._mission.GetData(neighbor, season=self.season, clobber=self.clobber_tpf, cadence=self.cadence, aperture_name=self.aperture_name, saturated_aperture_name= self.saturated_aperture_name, max_pixels=self.max_pixels, saturation_tolerance= self.saturation_tolerance, get_hires=False, get_nearby=False) if data is None: raise Exception( "Unable to retrieve data for neighboring target.") data.mask = np.array( list(set(np.concatenate([data.badmask, data.nanmask]))), dtype=int) data.fraw = np.sum(data.fpix, axis=1) # Compute the linear PLD vectors and interpolate over # outliers, NaNs and bad timestamps X1 = data.fpix / data.fraw.reshape(-1, 1) X1 = Interpolate(data.time, data.mask, X1) if self.X1N is None: self.X1N = np.array(X1) else: self.X1N = np.hstack([self.X1N, X1]) del X1 del data
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param str parent_model: The name of the model to operate on. \ Default `nPLD` def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param str parent_model: The name of the model to operate on. \ Default `nPLD` ''' # Load the parent model self.parent_model = kwargs.get('parent_model', 'nPLD') if not self.load_model(self.parent_model): raise Exception('Unable to load parent model.') # Save static copies of the de-trended flux, # the outlier mask and the lambda array self._norm = np.array(self.flux) self.recmask = np.array(self.mask) self.reclam = np.array(self.lam) # Now reset the model params self.optimize_gp = False nseg = len(self.breakpoints) self.lam_idx = -1 self.lam = [ [1e5] + [None for i in range(self.pld_order - 1)] for b in range(nseg)] self.cdpp_arr = np.array([np.nan for b in range(nseg)]) self.cdppr_arr = np.array([np.nan for b in range(nseg)]) self.cdppv_arr = np.array([np.nan for b in range(nseg)]) self.cdpp = np.nan self.cdppr = np.nan self.cdppv = np.nan self.cdppg = np.nan self.model = np.zeros_like(self.time) self.loaded = True
This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param inter piter: The number of iterations in the minimizer. \ Default 3 :param int pmaxf: The maximum number of function evaluations per \ iteration. Default 300 :param float ppert: The fractional amplitude of the perturbation on \ the initial guess. Default 0.1 def setup(self, **kwargs): ''' This is called during production de-trending, prior to calling the :py:obj:`Detrender.run()` method. :param inter piter: The number of iterations in the minimizer. \ Default 3 :param int pmaxf: The maximum number of function evaluations per \ iteration. Default 300 :param float ppert: The fractional amplitude of the perturbation on \ the initial guess. Default 0.1 ''' # Check for saved model clobber = self.clobber self.clobber = False if not self.load_model('nPLD'): raise Exception("Can't find `nPLD` model for target.") self.clobber = clobber # Powell iterations self.piter = kwargs.get('piter', 3) self.pmaxf = kwargs.get('pmaxf', 300) self.ppert = kwargs.get('ppert', 0.1)
Runs the de-trending. def run(self): ''' Runs the de-trending. ''' try: # Plot original self.plot_aperture([self.dvs.top_right() for i in range(4)]) self.plot_lc(self.dvs.left(), info_right='nPLD', color='k') # Cross-validate self.cross_validate(self.dvs.right()) self.compute() self.cdpp_arr = self.get_cdpp_arr() self.cdpp = self.get_cdpp() # Plot new self.plot_lc(self.dvs.left(), info_right='Powell', color='k') # Save self.plot_final(self.dvs.top_left()) self.plot_info(self.dvs) self.save_model() except: self.exception_handler(self.debug)