text
stringlengths
81
112k
Performs the cross-validation step. def cross_validate(self, ax): ''' Performs the cross-validation step. ''' # The CDPP to beat cdpp_opt = self.get_cdpp_arr() # Loop over all chunks for b, brkpt in enumerate(self.breakpoints): log.info("Cross-validating chunk %d/%d..." % (b + 1, len(self.breakpoints))) # Mask for current chunk m = self.get_masked_chunk(b) # Mask transits and outliers time = self.time[m] flux = self.fraw[m] ferr = self.fraw_err[m] med = np.nanmedian(self.fraw) # Setup the GP gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(time, ferr) # The masks masks = list(Chunks(np.arange(0, len(time)), len(time) // self.cdivs)) # The pre-computed matrices pre_v = [self.cv_precompute(mask, b) for mask in masks] # Initialize with the nPLD solution log_lam_opt = np.log10(self.lam[b]) scatter_opt = self.validation_scatter( log_lam_opt, b, masks, pre_v, gp, flux, time, med) log.info("Iter 0/%d: " % (self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) # Do `piter` iterations for p in range(self.piter): # Perturb the initial condition a bit log_lam = np.array( np.log10(self.lam[b])) * \ (1 + self.ppert * np.random.randn(len(self.lam[b]))) scatter = self.validation_scatter( log_lam, b, masks, pre_v, gp, flux, time, med) log.info("Initializing at: " + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # Call the minimizer log_lam, scatter, _, _, _, _ = \ fmin_powell(self.validation_scatter, log_lam, args=(b, masks, pre_v, gp, flux, time, med), maxfun=self.pmaxf, disp=False, full_output=True) # Did it improve the CDPP? tmp = np.array(self.lam[b]) self.lam[b] = 10 ** log_lam self.compute() cdpp = self.get_cdpp_arr()[b] self.lam[b] = tmp if cdpp < cdpp_opt[b]: cdpp_opt[b] = cdpp log_lam_opt = log_lam # Log it log.info("Iter %d/%d: " % (p + 1, self.piter) + "logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam]), scatter)) # The best solution log.info("Found minimum: logL = (%s), s = %.3f" % (", ".join(["%.3f" % l for l in log_lam_opt]), scatter_opt)) self.lam[b] = 10 ** log_lam_opt # We're just going to plot lambda as a function of chunk number bs = np.arange(len(self.breakpoints)) color = ['k', 'b', 'r', 'g', 'y'] for n in range(self.pld_order): ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '.', color=color[n]) ax[0].plot(bs + 1, [np.log10(self.lam[b][n]) for b in bs], '-', color=color[n], alpha=0.25) ax[0].set_ylabel(r'$\log\Lambda$', fontsize=5) ax[0].margins(0.1, 0.1) ax[0].set_xticks(np.arange(1, len(self.breakpoints) + 1)) ax[0].set_xticklabels([]) # Now plot the CDPP cdpp_arr = self.get_cdpp_arr() ax[1].plot(bs + 1, cdpp_arr, 'b.') ax[1].plot(bs + 1, cdpp_arr, 'b-', alpha=0.25) ax[1].margins(0.1, 0.1) ax[1].set_ylabel(r'Scatter (ppm)', fontsize=5) ax[1].set_xlabel(r'Chunk', fontsize=5) ax[1].set_xticks(np.arange(1, len(self.breakpoints) + 1))
Computes the scatter in the validation set. def validation_scatter(self, log_lam, b, masks, pre_v, gp, flux, time, med): ''' Computes the scatter in the validation set. ''' # Update the lambda matrix self.lam[b] = 10 ** log_lam # Validation set scatter scatter = [None for i in range(len(masks))] for i in range(len(masks)): model = self.cv_compute(b, *pre_v[i]) try: gpm, _ = gp.predict(flux - model - med, time[masks[i]]) except ValueError: # Sometimes the model can have NaNs if # `lambda` is a crazy value return 1.e30 fdet = (flux - model)[masks[i]] - gpm scatter[i] = 1.e6 * (1.4826 * np.nanmedian(np.abs(fdet / med - np.nanmedian(fdet / med))) / np.sqrt(len(masks[i]))) return np.max(scatter)
Utility function for populating lists with random data. Useful for populating database with data for fuzzy testing. Supported data-types * *string* For example:: populate('string',100, min_len=3, max_len=10) create a 100 elements list with random strings with random length between 3 and 10 * *date* For example:: from datetime import date populate('date',200, start = date(1997,1,1), end = date.today()) create a 200 elements list with random datetime.date objects between *start* and *end* * *integer* For example:: populate('integer',200, start = 0, end = 1000) create a 200 elements list with random int between *start* and *end* * *float* For example:: populate('float', 200, start = 0, end = 10) create a 200 elements list with random floats between *start* and *end* * *choice* (elements of an iterable) For example:: populate('choice', 200, choice_from = ['pippo','pluto','blob']) create a 200 elements list with random elements from *choice_from*. def populate(datatype='string', size=10, start=None, end=None, converter=None, choice_from=None, **kwargs): '''Utility function for populating lists with random data. Useful for populating database with data for fuzzy testing. Supported data-types * *string* For example:: populate('string',100, min_len=3, max_len=10) create a 100 elements list with random strings with random length between 3 and 10 * *date* For example:: from datetime import date populate('date',200, start = date(1997,1,1), end = date.today()) create a 200 elements list with random datetime.date objects between *start* and *end* * *integer* For example:: populate('integer',200, start = 0, end = 1000) create a 200 elements list with random int between *start* and *end* * *float* For example:: populate('float', 200, start = 0, end = 10) create a 200 elements list with random floats between *start* and *end* * *choice* (elements of an iterable) For example:: populate('choice', 200, choice_from = ['pippo','pluto','blob']) create a 200 elements list with random elements from *choice_from*. ''' data = [] converter = converter or def_converter if datatype == 'date': date_end = end or date.today() date_start = start or date(1990, 1, 1) delta = date_end - date_start for s in range(size): data.append(converter(random_date(date_start, delta.days))) elif datatype == 'integer': start = start or 0 end = end or 1000000 for s in range(size): data.append(converter(randint(start, end))) elif datatype == 'float': start = start or 0 end = end or 10 for s in range(size): data.append(converter(uniform(start, end))) elif datatype == 'choice' and choice_from: for s in range(size): data.append(choice(list(choice_from))) else: for s in range(size): data.append(converter(random_string(**kwargs))) return data
NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations. def Search(star, pos_tol=2.5, neg_tol=50., **ps_kwargs): ''' NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations. ''' # Smooth the light curve t = np.delete(star.time, np.concatenate([star.nanmask, star.badmask])) f = np.delete(star.flux, np.concatenate([star.nanmask, star.badmask])) f = SavGol(f) med = np.nanmedian(f) # Kill positive outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(star.time == t[i]) for i in pos_inds]) # Kill negative outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(star.time == t[i]) for i in neg_inds]) # Replace the star.outmask array star.outmask = np.concatenate([neg_inds, pos_inds]) star.transitmask = np.array([], dtype=int) # Delta chi squared TIME = np.array([]) DEPTH = np.array([]) VARDEPTH = np.array([]) DELCHISQ = np.array([]) for b, brkpt in enumerate(star.breakpoints): # Log log.info('Running chunk %d/%d...' % (b + 1, len(star.breakpoints))) # Masks for current chunk m = star.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(star.kernel, star.kernel_params, star.time[m], star.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(star.pld_order): XM = star.X(n, m) A += star.lam[b][n] * np.dot(XM, XM.T) K += A CDK = cho_factor(K) # Baseline med = np.nanmedian(star.fraw[m]) lnL0 = -0.5 * np.dot(star.fraw[m], cho_solve(CDK, star.fraw[m])) dt = np.median(np.diff(star.time[m])) # Create a uniform time array and get indices of missing cadences tol = np.nanmedian(np.diff(star.time[m])) / 5. tunif = np.arange(star.time[m][0], star.time[m][-1] + tol, dt) tnogaps = np.array(tunif) gaps = [] j = 0 for i, t in enumerate(tunif): if np.abs(star.time[m][j] - t) < tol: tnogaps[i] = star.time[m][j] j += 1 if j == len(star.time[m]): break else: gaps.append(i) gaps = np.array(gaps, dtype=int) # Compute the normalized transit model for a single transit transit_model = TransitShape(**ps_kwargs) # Now roll the transit model across each cadence dchisq = np.zeros(len(tnogaps)) d = np.zeros(len(tnogaps)) vard = np.zeros(len(tnogaps)) for i in prange(len(tnogaps)): trn = transit_model(tnogaps, tnogaps[i]) trn = np.delete(trn, gaps) trn *= med vard[i] = 1. / np.dot(trn, cho_solve(CDK, trn)) if not np.isfinite(vard[i]): vard[i] = np.nan d[i] = np.nan dchisq[i] = np.nan continue d[i] = vard[i] * np.dot(trn, cho_solve(CDK, star.fraw[m])) r = star.fraw[m] - trn * d[i] lnL = -0.5 * np.dot(r, cho_solve(CDK, r)) dchisq[i] = -2 * (lnL0 - lnL) TIME = np.append(TIME, tnogaps) DEPTH = np.append(DEPTH, d) VARDEPTH = np.append(VARDEPTH, vard) DELCHISQ = np.append(DELCHISQ, dchisq) return TIME, DEPTH, VARDEPTH, DELCHISQ
Ordered iterator over dirty elements. def iterdirty(self): '''Ordered iterator over dirty elements.''' return iter(chain(itervalues(self._new), itervalues(self._modified)))
Add a new instance to this :class:`SessionModel`. :param modified: Optional flag indicating if the ``instance`` has been modified. By default its value is ``True``. :param force_update: if ``instance`` is persistent, it forces an update of the data rather than a full replacement. This is used by the :meth:`insert_update_replace` method. :rtype: The instance added to the session def add(self, instance, modified=True, persistent=None, force_update=False): '''Add a new instance to this :class:`SessionModel`. :param modified: Optional flag indicating if the ``instance`` has been modified. By default its value is ``True``. :param force_update: if ``instance`` is persistent, it forces an update of the data rather than a full replacement. This is used by the :meth:`insert_update_replace` method. :rtype: The instance added to the session''' if instance._meta.type == 'structure': return self._add_structure(instance) state = instance.get_state() if state.deleted: raise ValueError('State is deleted. Cannot add.') self.pop(state.iid) pers = persistent if persistent is not None else state.persistent pkname = instance._meta.pkname() if not pers: instance._dbdata.pop(pkname, None) # to make sure it is add action state = instance.get_state(iid=None) elif persistent: instance._dbdata[pkname] = instance.pkvalue() state = instance.get_state(iid=instance.pkvalue()) else: action = 'update' if force_update else None state = instance.get_state(action=action, iid=state.iid) iid = state.iid if state.persistent: if modified: self._modified[iid] = instance else: self._new[iid] = instance return instance
delete an *instance* def delete(self, instance, session): '''delete an *instance*''' if instance._meta.type == 'structure': return self._delete_structure(instance) inst = self.pop(instance) instance = inst if inst is not None else instance if instance is not None: state = instance.get_state() if state.persistent: state.deleted = True self._deleted[state.iid] = instance instance.session = session else: instance.session = None return instance
Remove ``instance`` from the :class:`SessionModel`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an ``id``. :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. def pop(self, instance): '''Remove ``instance`` from the :class:`SessionModel`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an ``id``. :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' if isinstance(instance, self.model): iid = instance.get_state().iid else: iid = instance instance = None for d in (self._new, self._modified, self._deleted): if iid in d: inst = d.pop(iid) if instance is None: instance = inst elif inst is not instance: raise ValueError('Critical error: %s is duplicated' % iid) return instance
Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. def expunge(self, instance): '''Remove *instance* from the :class:`Session`. Instance could be a :class:`Model` or an id. :parameter instance: a :class:`Model` or an *id* :rtype: the :class:`Model` removed from session or ``None`` if it was not in the session. ''' instance = self.pop(instance) instance.session = None return instance
\ Process results after a commit. :parameter results: iterator over :class:`stdnet.instance_session_result` items. :rtype: a two elements tuple containing a list of instances saved and a list of ids of instances deleted. def post_commit(self, results): '''\ Process results after a commit. :parameter results: iterator over :class:`stdnet.instance_session_result` items. :rtype: a two elements tuple containing a list of instances saved and a list of ids of instances deleted.''' tpy = self._meta.pk_to_python instances = [] deleted = [] errors = [] # The length of results must be the same as the length of # all committed instances for result in results: if isinstance(result, Exception): errors.append(result.__class__('Exception while committing %s.' ' %s' % (self._meta, result))) continue instance = self.pop(result.iid) id = tpy(result.id, self.backend) if result.deleted: deleted.append(id) else: if instance is None: raise InvalidTransaction('{0} session received id "{1}"\ which is not in the session.'.format(self, result.iid)) setattr(instance, instance._meta.pkname(), id) instance = self.add(instance, modified=False, persistent=result.persistent) instance.get_state().score = result.score if instance.get_state().persistent: instances.append(instance) return instances, deleted, errors
Close the transaction and commit session to the backend. def commit(self, callback=None): '''Close the transaction and commit session to the backend.''' if self.executed: raise InvalidTransaction('Invalid operation. ' 'Transaction already executed.') session = self.session self.session = None self.on_result = self._commit(session, callback) return self.on_result
The set of instances in this :class:`Session` which have been modified. def dirty(self): '''The set of instances in this :class:`Session` which have been modified.''' return frozenset(chain(*tuple((sm.dirty for sm in itervalues(self._models)))))
Begin a new :class:`Transaction`. If this :class:`Session` is already in a :ref:`transactional state <transactional-state>`, an error will occur. It returns the :attr:`transaction` attribute. This method is mostly used within a ``with`` statement block:: with session.begin() as t: t.add(...) ... which is equivalent to:: t = session.begin() t.add(...) ... session.commit() ``options`` parameters are passed to the :class:`Transaction` constructor. def begin(self, **options): '''Begin a new :class:`Transaction`. If this :class:`Session` is already in a :ref:`transactional state <transactional-state>`, an error will occur. It returns the :attr:`transaction` attribute. This method is mostly used within a ``with`` statement block:: with session.begin() as t: t.add(...) ... which is equivalent to:: t = session.begin() t.add(...) ... session.commit() ``options`` parameters are passed to the :class:`Transaction` constructor. ''' if self.transaction is not None: raise InvalidTransaction("A transaction is already begun.") else: self.transaction = Transaction(self, **options) return self.transaction
Create a new :class:`Query` for *model*. def query(self, model, **kwargs): '''Create a new :class:`Query` for *model*.''' sm = self.model(model) query_class = sm.manager.query_class or Query return query_class(sm._meta, self, **kwargs)
Update or create a new instance of ``model``. This method can raise an exception if the ``kwargs`` dictionary contains field data that does not validate. :param model: a :class:`StdModel` :param kwargs: dictionary of parameters. :returns: A two elements tuple containing the instance and a boolean indicating if the instance was created or not. def update_or_create(self, model, **kwargs): '''Update or create a new instance of ``model``. This method can raise an exception if the ``kwargs`` dictionary contains field data that does not validate. :param model: a :class:`StdModel` :param kwargs: dictionary of parameters. :returns: A two elements tuple containing the instance and a boolean indicating if the instance was created or not. ''' backend = self.model(model).backend return backend.execute(self._update_or_create(model, **kwargs))
Add an ``instance`` to the session. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the back-end server immediately. :parameter instance: a :class:`Model` instance. It must be registered with the :attr:`router` which created this :class:`Session`. :parameter modified: a boolean flag indicating if the instance was modified. :return: the ``instance``. If the instance is persistent (it is already stored in the database), an updated will be performed, otherwise a new entry will be created once the :meth:`commit` method is invoked. def add(self, instance, modified=True, **params): '''Add an ``instance`` to the session. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the back-end server immediately. :parameter instance: a :class:`Model` instance. It must be registered with the :attr:`router` which created this :class:`Session`. :parameter modified: a boolean flag indicating if the instance was modified. :return: the ``instance``. If the instance is persistent (it is already stored in the database), an updated will be performed, otherwise a new entry will be created once the :meth:`commit` method is invoked. ''' sm = self.model(instance) instance.session = self o = sm.add(instance, modified=modified, **params) if modified and not self.transaction: transaction = self.begin() return transaction.commit(lambda: o) else: return o
Delete an ``instance`` or a ``query``. Adds ``instance_or_query`` to this :class:`Session` list of data to be deleted. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the backend server immediately. :parameter instance_or_query: a :class:`Model` instance or a :class:`Query`. def delete(self, instance_or_query): '''Delete an ``instance`` or a ``query``. Adds ``instance_or_query`` to this :class:`Session` list of data to be deleted. If the session is not in a :ref:`transactional state <transactional-state>`, this operation commits changes to the backend server immediately. :parameter instance_or_query: a :class:`Model` instance or a :class:`Query`. ''' sm = self.model(instance_or_query) # not an instance of a Model. Assume it is a query. if is_query(instance_or_query): if instance_or_query.session is not self: raise ValueError('Adding a query generated by another session') sm._delete_query.append(instance_or_query) else: instance_or_query = sm.delete(instance_or_query, self) if not self.transaction: transaction = self.begin() return transaction.commit( lambda: transaction.deleted.get(sm._meta)) else: return instance_or_query
Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`. def model(self, model, create=True): '''Returns the :class:`SessionModel` for ``model`` which can be :class:`Model`, or a :class:`MetaClass`, or an instance of :class:`Model`.''' manager = self.manager(model) sm = self._models.get(manager) if sm is None and create: sm = SessionModel(manager) self._models[manager] = sm return sm
Remove ``instance`` from this :class:`Session`. If ``instance`` is not given, it removes all instances from this :class:`Session`. def expunge(self, instance=None): '''Remove ``instance`` from this :class:`Session`. If ``instance`` is not given, it removes all instances from this :class:`Session`.''' if instance is not None: sm = self._models.get(instance._meta) if sm: return sm.expunge(instance) else: self._models.clear()
Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method. def manager(self, model): '''Retrieve the :class:`Manager` for ``model`` which can be any of the values valid for the :meth:`model` method.''' try: return self.router[model] except KeyError: meta = getattr(model, '_meta', model) if meta.type == 'structure': # this is a structure if hasattr(model, 'model'): structure_model = model.model if structure_model: return self.manager(structure_model) else: manager = self.router.structure(model) if manager: return manager raise InvalidTransaction('"%s" not valid in this session' % meta)
Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs)) def new(self, *args, **kwargs): '''Create a new instance of :attr:`model` and commit it to the backend server. This a shortcut method for the more verbose:: instance = manager.session().add(MyModel(**kwargs)) ''' return self.session().add(self.model(*args, **kwargs))
Returns a new :class:`Query` for :attr:`Manager.model`. def query(self, session=None): '''Returns a new :class:`Query` for :attr:`Manager.model`.''' if session is None or session.router is not self.router: session = self.session() return session.query(self.model)
Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value. def search(self, text, lookup=None): '''Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.''' return self.query().search(text, lookup=lookup)
Create a dict given a list of key/value pairs def pairs_to_dict(response, encoding): "Create a dict given a list of key/value pairs" it = iter(response) return dict(((k.decode(encoding), v) for k, v in zip(it, it)))
Parse data for related objects. def load_related(self, meta, fname, data, fields, encoding): '''Parse data for related objects.''' field = meta.dfields[fname] if field in meta.multifields: fmeta = field.structure_class()._meta if fmeta.name in ('hashtable', 'zset'): return ((native_str(id, encoding), pairs_to_dict(fdata, encoding)) for id, fdata in data) else: return ((native_str(id, encoding), fdata) for id, fdata in data) else: # this is data for stdmodel instances return self.build(data, meta, fields, fields, encoding)
Execute the query without fetching data. Returns the number of elements in the query. def _execute_query(self): '''Execute the query without fetching data. Returns the number of elements in the query.''' pipe = self.pipe if not self.card: if self.meta.ordering: self.ismember = getattr(self.backend.client, 'zrank') self.card = getattr(pipe, 'zcard') self._check_member = self.zism else: self.ismember = getattr(self.backend.client, 'sismember') self.card = getattr(pipe, 'scard') self._check_member = self.sism else: self.ismember = None self.card(self.query_key) result = yield pipe.execute() yield result[-1]
Perform ordering with respect model fields. def order(self, last): '''Perform ordering with respect model fields.''' desc = last.desc field = last.name nested = last.nested nested_args = [] while nested: meta = nested.model._meta nested_args.extend((self.backend.basekey(meta), nested.name)) last = nested nested = nested.nested method = 'ALPHA' if last.field.internal_type == 'text' else '' if field == last.model._meta.pkname(): field = '' return {'field': field, 'method': method, 'desc': desc, 'nested': nested_args}
Generator of load_related arguments def related_lua_args(self): '''Generator of load_related arguments''' related = self.queryelem.select_related if related: meta = self.meta for rel in related: field = meta.dfields[rel] relmodel = field.relmodel bk = self.backend.basekey(relmodel._meta) if relmodel else '' fields = list(related[rel]) if meta.pkname() in fields: fields.remove(meta.pkname()) if not fields: fields.append('') ftype = field.type if field in meta.multifields else '' data = {'field': field.attname, 'type': ftype, 'bk': bk, 'fields': fields} yield field.name, data
Remove and return a range from the ordered set by rank (index). def ipop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by rank (index).''' return self.backend.execute( self.client.zpopbyrank(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
Remove and return a range from the ordered set by score. def pop_range(self, start, stop=None, withscores=True, **options): '''Remove and return a range from the ordered set by score.''' return self.backend.execute( self.client.zpopbyscore(self.id, start, stop, withscores=withscores, **options), partial(self._range, withscores))
Extract model metadata for lua script stdnet/lib/lua/odm.lua def meta(self, meta): '''Extract model metadata for lua script stdnet/lib/lua/odm.lua''' data = meta.as_dict() data['namespace'] = self.basekey(meta) return data
Execute a session in redis. def execute_session(self, session_data): '''Execute a session in redis.''' pipe = self.client.pipeline() for sm in session_data: # loop through model sessions meta = sm.meta if sm.structures: self.flush_structure(sm, pipe) delquery = None if sm.deletes is not None: delquery = sm.deletes.backend_query(pipe=pipe) self.accumulate_delete(pipe, delquery) if sm.dirty: meta_info = json.dumps(self.meta(meta)) lua_data = [len(sm.dirty)] processed = [] for instance in sm.dirty: state = instance.get_state() if not meta.is_valid(instance): raise FieldValueError( json.dumps(instance._dbdata['errors'])) score = MIN_FLOAT if meta.ordering: if meta.ordering.auto: score = meta.ordering.name.incrby else: v = getattr(instance, meta.ordering.name, None) if v is not None: score = meta.ordering.field.scorefun(v) data = instance._dbdata['cleaned_data'] action = state.action prev_id = state.iid if state.persistent else '' id = instance.pkvalue() or '' data = flat_mapping(data) lua_data.extend((action, prev_id, id, score, len(data))) lua_data.extend(data) processed.append(state.iid) self.odmrun(pipe, 'commit', meta, (), meta_info, *lua_data, iids=processed) return pipe.execute()
Flush all model keys from the database def flush(self, meta=None): '''Flush all model keys from the database''' pattern = self.basekey(meta) if meta else self.namespace return self.client.delpattern('%s*' % pattern)
Returns the covariance matrix for a given light curve segment. :param array_like kernel_params: A list of kernel parameters \ (white noise amplitude, red noise amplitude, and red noise timescale) :param array_like time: The time array (*N*) :param array_like errors: The data error array (*N*) :returns: The covariance matrix :py:obj:`K` (*N*,*N*) def GetCovariance(kernel, kernel_params, time, errors): ''' Returns the covariance matrix for a given light curve segment. :param array_like kernel_params: A list of kernel parameters \ (white noise amplitude, red noise amplitude, and red noise timescale) :param array_like time: The time array (*N*) :param array_like errors: The data error array (*N*) :returns: The covariance matrix :py:obj:`K` (*N*,*N*) ''' # NOTE: We purposefully compute the covariance matrix # *without* the GP white noise term K = np.diag(errors ** 2) K += GP(kernel, kernel_params, white=False).get_matrix(time) return K
Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None` def GetKernelParams(time, flux, errors, kernel='Basic', mask=[], giter=3, gmaxf=200, guess=None): ''' Optimizes the GP by training it on the current de-trended light curve. Returns the white noise amplitude, red noise amplitude, and red noise timescale. :param array_like time: The time array :param array_like flux: The flux array :param array_like errors: The flux errors array :param array_like mask: The indices to be masked when training the GP. \ Default `[]` :param int giter: The number of iterations. Default 3 :param int gmaxf: The maximum number of function evaluations. Default 200 :param tuple guess: The guess to initialize the minimization with. \ Default :py:obj:`None` ''' log.info("Optimizing the GP...") # Save a copy of time and errors for later time_copy = np.array(time) errors_copy = np.array(errors) # Apply the mask time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Remove 5-sigma outliers to be safe f = flux - savgol_filter(flux, 49, 2) + np.nanmedian(flux) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) mask = np.where((f > med + 5 * MAD) | (f < med - 5 * MAD))[0] time = np.delete(time, mask) flux = np.delete(flux, mask) errors = np.delete(errors, mask) # Initial guesses and bounds white = np.nanmedian([np.nanstd(c) for c in Chunks(flux, 13)]) amp = np.nanstd(flux) tau = 30.0 if kernel == 'Basic': if guess is None: guess = [white, amp, tau] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [0.5, 100.]] elif kernel == 'QuasiPeriodic': if guess is None: guess = [white, amp, tau, 1., 20.] bounds = [[0.1 * white, 10. * white], [1., 10000. * amp], [1e-5, 1e2], [0.02, 100.]] else: raise ValueError('Invalid value for `kernel`.') # Loop llbest = -np.inf xbest = np.array(guess) for i in range(giter): # Randomize an initial guess iguess = [np.inf for g in guess] for j, b in enumerate(bounds): tries = 0 while (iguess[j] < b[0]) or (iguess[j] > b[1]): iguess[j] = (1 + 0.5 * np.random.randn()) * guess[j] tries += 1 if tries > 100: iguess[j] = b[0] + np.random.random() * (b[1] - b[0]) break # Optimize x = fmin_l_bfgs_b(NegLnLike, iguess, approx_grad=False, bounds=bounds, args=(time, flux, errors, kernel), maxfun=gmaxf) log.info('Iteration #%d/%d:' % (i + 1, giter)) log.info(' ' + x[2]['task'].decode('utf-8')) log.info(' ' + 'Function calls: %d' % x[2]['funcalls']) log.info(' ' + 'Log-likelihood: %.3e' % -x[1]) if kernel == 'Basic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Red timescale : %.2f days' % x[0][2]) elif kernel == 'QuasiPeriodic': log.info(' ' + 'White noise : %.3e (%.1f x error bars)' % (x[0][0], x[0][0] / np.nanmedian(errors))) log.info(' ' + 'Red amplitude : %.3e (%.1f x stand dev)' % (x[0][1], x[0][1] / np.nanstd(flux))) log.info(' ' + 'Gamma : %.3e' % x[0][2]) log.info(' ' + 'Period : %.2f days' % x[0][3]) if -x[1] > llbest: llbest = -x[1] xbest = np.array(x[0]) return xbest
Returns the negative log-likelihood function and its gradient. def NegLnLike(x, time, flux, errors, kernel): ''' Returns the negative log-likelihood function and its gradient. ''' gp = GP(kernel, x, white=True) gp.compute(time, errors) if OLDGEORGE: nll = -gp.lnlikelihood(flux) # NOTE: There was a bug on this next line! Used to be # # ngr = -gp.grad_lnlikelihood(flux) / gp.kernel.pars # # But I think we want # # dlogL/dx = dlogL/dlogx^2 * dlogx^2/dx^2 * dx^2/dx # = gp.grad_lnlikelihood() * 1/x^2 * 2x # = 2 * gp.grad_lnlikelihood() / x # = 2 * gp.grad_lnlikelihood() / np.sqrt(x^2) # = 2 * gp.grad_lnlikelihood() / np.sqrt(gp.kernel.pars) # # (with a negative sign out front for the negative gradient). # So we probably weren't optimizing the GP correctly! This affects # all campaigns through C13. It's not a *huge* deal, since the sign # of the gradient was correct and the model isn't that sensitive to # the value of the hyperparameters, but it may have contributed to # the poor performance on super variable stars. In most cases it means # the solver takes longer to converge and isn't as good at finding # the minimum. ngr = -2 * gp.grad_lnlikelihood(flux) / np.sqrt(gp.kernel.pars) else: nll = -gp.log_likelihood(flux) ngr = -2 * gp.grad_log_likelihood(flux) / \ np.sqrt(np.exp(gp.get_parameter_vector())) return nll, ngr
Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples. def missing_intervals(startdate, enddate, start, end, dateconverter=None, parseinterval=None, intervals=None): '''Given a ``startdate`` and an ``enddate`` dates, evaluate the date intervals from which data is not available. It return a list of two-dimensional tuples containing start and end date for the interval. The list could countain 0,1 or 2 tuples.''' parseinterval = parseinterval or default_parse_interval dateconverter = dateconverter or todate startdate = dateconverter(parseinterval(startdate, 0)) enddate = max(startdate, dateconverter(parseinterval(enddate, 0))) if intervals is not None and not isinstance(intervals, Intervals): intervals = Intervals(intervals) calc_intervals = Intervals() # we have some history already if start: # the startdate not available if startdate < start: calc_start = startdate calc_end = parseinterval(start, -1) if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) if enddate > end: calc_start = parseinterval(end, 1) calc_end = enddate if calc_end >= calc_start: calc_intervals.append(Interval(calc_start, calc_end)) else: start = startdate end = enddate calc_intervals.append(Interval(startdate, enddate)) if calc_intervals: if intervals: calc_intervals.extend(intervals) elif intervals: calc_intervals = intervals return calc_intervals
Generates dates between *atrt* and *end*. def dategenerator(start, end, step=1, desc=False): '''Generates dates between *atrt* and *end*.''' delta = timedelta(abs(step)) end = max(start, end) if desc: dt = end while dt >= start: yield dt dt -= delta else: dt = start while dt <= end: yield dt dt += delta
A little routine to initialize the logging functionality. :param str file_name: The name of the file to log to. \ Default :py:obj:`None` (set internally by :py:mod:`everest`) :param int log_level: The file logging level (0-50). Default 10 (debug) :param int screen_level: The screen logging level (0-50). \ Default 50 (critical) def InitLog(file_name=None, log_level=logging.DEBUG, screen_level=logging.CRITICAL, pdb=False): ''' A little routine to initialize the logging functionality. :param str file_name: The name of the file to log to. \ Default :py:obj:`None` (set internally by :py:mod:`everest`) :param int log_level: The file logging level (0-50). Default 10 (debug) :param int screen_level: The screen logging level (0-50). \ Default 50 (critical) ''' # Initialize the logging root = logging.getLogger() root.handlers = [] root.setLevel(logging.DEBUG) # File handler if file_name is not None: if not os.path.exists(os.path.dirname(file_name)): os.makedirs(os.path.dirname(file_name)) fh = logging.FileHandler(file_name) fh.setLevel(log_level) fh_formatter = logging.Formatter( "%(asctime)s %(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s", datefmt="%m/%d/%y %H:%M:%S") fh.setFormatter(fh_formatter) fh.addFilter(NoPILFilter()) root.addHandler(fh) # Screen handler sh = logging.StreamHandler(sys.stdout) if pdb: sh.setLevel(logging.DEBUG) else: sh.setLevel(screen_level) sh_formatter = logging.Formatter( "%(levelname)-5s [%(name)s.%(funcName)s()]: %(message)s") sh.setFormatter(sh_formatter) sh.addFilter(NoPILFilter()) root.addHandler(sh) # Set exception hook if pdb: sys.excepthook = ExceptionHookPDB else: sys.excepthook = ExceptionHook
A custom exception handler that logs errors to file. def ExceptionHook(exctype, value, tb): ''' A custom exception handler that logs errors to file. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb)
A custom exception handler, with :py:obj:`pdb` post-mortem for debugging. def ExceptionHookPDB(exctype, value, tb): ''' A custom exception handler, with :py:obj:`pdb` post-mortem for debugging. ''' for line in traceback.format_exception_only(exctype, value): log.error(line.replace('\n', '')) for line in traceback.format_tb(tb): log.error(line.replace('\n', '')) sys.__excepthook__(exctype, value, tb) pdb.pm()
Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`. Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1`` and returns ``l[i]``. This is useful when comparing the CDPP values of catalogs generated by different pipelines. The target IDs are all the same, but won't necessarily be in the same order. This allows :py:obj:`everest` to sort the CDPP arrays so that the targets match. :param array_like l: The list or array to sort :param array_like col1: A list or array (same length as :py:obj:`l`) :param array_like col2: A second list or array containing the same \ elements as :py:obj:`col1` but in a different order def sort_like(l, col1, col2): ''' Sorts the list :py:obj:`l` by comparing :py:obj:`col2` to :py:obj:`col1`. Specifically, finds the indices :py:obj:`i` such that ``col2[i] = col1`` and returns ``l[i]``. This is useful when comparing the CDPP values of catalogs generated by different pipelines. The target IDs are all the same, but won't necessarily be in the same order. This allows :py:obj:`everest` to sort the CDPP arrays so that the targets match. :param array_like l: The list or array to sort :param array_like col1: A list or array (same length as :py:obj:`l`) :param array_like col2: A second list or array containing the same \ elements as :py:obj:`col1` but in a different order ''' s = np.zeros_like(col1) * np.nan for i, c in enumerate(col1): j = np.argmax(col2 == c) if j == 0: if col2[0] != c: continue s[i] = l[j] return s
Progress bar range with `tqdm` def prange(*x): ''' Progress bar range with `tqdm` ''' try: root = logging.getLogger() if len(root.handlers): for h in root.handlers: if (type(h) is logging.StreamHandler) and \ (h.level != logging.CRITICAL): from tqdm import tqdm return tqdm(range(*x)) return range(*x) else: from tqdm import tqdm return tqdm(range(*x)) except ImportError: return range(*x)
Return the front pair of the structure def front(self, *fields): '''Return the front pair of the structure''' ts = self.irange(0, 0, fields=fields) if ts: return ts.start(), ts[0]
Return the back pair of the structure def back(self, *fields): '''Return the back pair of the structure''' ts = self.irange(-1, -1, fields=fields) if ts: return ts.end(), ts[0]
Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple. def parse_backend(backend): """Converts the "backend" into the database connection parameters. It returns a (scheme, host, params) tuple.""" r = urlparse.urlsplit(backend) scheme, host = r.scheme, r.netloc path, query = r.path, r.query if path and not query: query, path = path, '' if query: if query.find('?'): path = query else: query = query[1:] if query: params = dict(urlparse.parse_qsl(query)) else: params = {} return scheme, host, params
get a :class:`BackendDataServer`. def getdb(backend=None, **kwargs): '''get a :class:`BackendDataServer`.''' if isinstance(backend, BackendDataServer): return backend backend = backend or settings.DEFAULT_BACKEND if not backend: return None scheme, address, params = parse_backend(backend) params.update(kwargs) if 'timeout' in params: params['timeout'] = int(params['timeout']) return _getdb(scheme, address, params)
Calculate the key to access model data. :parameter meta: a :class:`stdnet.odm.Metaclass`. :parameter args: optional list of strings to prepend to the basekey. :rtype: a native string def basekey(self, meta, *args): """Calculate the key to access model data. :parameter meta: a :class:`stdnet.odm.Metaclass`. :parameter args: optional list of strings to prepend to the basekey. :rtype: a native string """ key = '%s%s' % (self.namespace, meta.modelkey) postfix = ':'.join((str(p) for p in args if p is not None)) return '%s:%s' % (key, postfix) if postfix else key
Generator of :class:`stdnet.odm.StdModel` instances with data from database. :parameter meta: instance of model :class:`stdnet.odm.Metaclass`. :parameter data: iterator over instances data. def make_objects(self, meta, data, related_fields=None): '''Generator of :class:`stdnet.odm.StdModel` instances with data from database. :parameter meta: instance of model :class:`stdnet.odm.Metaclass`. :parameter data: iterator over instances data. ''' make_object = meta.make_object related_data = [] if related_fields: for fname, fdata in iteritems(related_fields): field = meta.dfields[fname] if field in meta.multifields: related = dict(fdata) multi = True else: multi = False relmodel = field.relmodel related = dict(((obj.id, obj) for obj in self.make_objects(relmodel._meta, fdata))) related_data.append((field, related, multi)) for state in data: instance = make_object(state, self) for field, rdata, multi in related_data: if multi: field.set_cache(instance, rdata.get(str(instance.id))) else: rid = getattr(instance, field.attname, None) if rid is not None: value = rdata.get(rid) setattr(instance, field.name, value) yield instance
Create a backend :class:`stdnet.odm.Structure` handler. :param instance: a :class:`stdnet.odm.Structure` :param client: Optional client handler. def structure(self, instance, client=None): '''Create a backend :class:`stdnet.odm.Structure` handler. :param instance: a :class:`stdnet.odm.Structure` :param client: Optional client handler. ''' struct = self.struct_map.get(instance._meta.name) if struct is None: raise ModelNotAvailable('"%s" is not available for backend ' '"%s"' % (instance._meta.name, self)) client = client if client is not None else self.client return struct(instance, self, client)
Why is my target not in the EVEREST database? def Search(ID, mission='k2'): """Why is my target not in the EVEREST database?""" # Only K2 supported for now assert mission == 'k2', "Only the K2 mission is supported for now." print("Searching for target %d..." % ID) # First check if it is in the database season = missions.k2.Season(ID) if season in [91, 92, [91, 92]]: print("Campaign 9 is currently not part of the EVEREST catalog.") return elif season == 101: print("The first half of campaign 10 is not currently part of " + "the EVEREST catalog.") return elif season is not None: print("Target is in campaign %d of the EVEREST catalog." % season) return # Get the kplr object star = k2plr_client.k2_star(ID) # First check if this is a star if star.objtype.lower() != "star": print("Target is of type %s, not STAR, " % star.objtype + "and is therefore not included in the EVEREST catalog.") return # Let's try to download the pixel data and see what happens try: tpf = star.get_target_pixel_files() except: print("Unable to download the raw pixel files for this target.") return if len(tpf) == 0: print("Raw pixel files are not available for this target. Looks like " + "data may not have been collected for it.") return # Perhaps it's in a campaign we haven't gotten to yet if tpf[0].sci_campaign not in missions.k2.SEASONS: print("Targets for campaign %d are not yet available." % tpf[0].sci_campaign) return # Let's try to download the K2SFF data try: k2sff = k2plr.K2SFF(ID) except: print("Error downloading the K2SFF light curve for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Let's try to get the aperture try: assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture." except: print("Unable to retrieve the K2SFF aperture for this target. " + "Currently, EVEREST uses the K2SFF apertures to perform " + "photometry. This is likely to change in the next version.") return # Perhaps the star is *super* saturated and we didn't bother # de-trending it? if star.kp < 8: print("Target has Kp = %.1f and is too saturated " + "for proper de-trending with EVEREST.") return # I'm out of ideas print("I'm not sure why this target isn't in the EVEREST catalog." + "You can try de-trending it yourself:") print("http://faculty.washington.edu/rodluger/everest/pipeline.html") return
Download a given :py:mod:`everest` file from MAST. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param str filename: The name of the file to download. Default \ :py:obj:`None`, in which case the default \ FITS file is retrieved. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` def DownloadFile(ID, season=None, mission='k2', cadence='lc', filename=None, clobber=False): ''' Download a given :py:mod:`everest` file from MAST. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param str filename: The name of the file to download. Default \ :py:obj:`None`, in which case the default \ FITS file is retrieved. :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) if season is None: if getattr(missions, mission).ISTARGET(ID): raise ValueError('Target not found in local database. ' + 'Run `everest.Search(%d)` for more information.' % ID) else: raise ValueError('Invalid target ID.') path = getattr(missions, mission).TargetDirectory(ID, season) relpath = getattr(missions, mission).TargetDirectory( ID, season, relative=True) if filename is None: filename = getattr(missions, mission).FITSFile(ID, season, cadence) # Check if file exists if not os.path.exists(path): os.makedirs(path) elif os.path.exists(os.path.join(path, filename)) and not clobber: log.info('Found cached file.') return os.path.join(path, filename) # Get file URL log.info('Downloading the file...') fitsurl = getattr(missions, mission).FITSUrl(ID, season) if not fitsurl.endswith('/'): fitsurl += '/' # Download the data r = urllib.request.Request(fitsurl + filename) try: handler = urllib.request.urlopen(r) code = handler.getcode() except (urllib.error.HTTPError, urllib.error.URLError): code = 0 if int(code) == 200: # Read the data data = handler.read() # Atomically save to disk f = NamedTemporaryFile("wb", delete=False) f.write(data) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, os.path.join(path, filename)) else: # Something went wrong! log.error("Error code {0} for URL '{1}'".format( code, fitsurl + filename)) # If the files can be accessed by `ssh`, let's try that # (development version only!) if EVEREST_FITS is None: raise Exception("Unable to locate the file.") # Get the url inpath = os.path.join(EVEREST_FITS, relpath, filename) outpath = os.path.join(path, filename) # Download the data log.info("Accessing file via `scp`...") subprocess.call(['scp', inpath, outpath]) # Success? if os.path.exists(os.path.join(path, filename)): return os.path.join(path, filename) else: raise Exception("Unable to download the file." + "Run `everest.Search(%d)` to troubleshoot." % ID)
Show the data validation summary (DVS) for a given target. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` def DVS(ID, season=None, mission='k2', clobber=False, cadence='lc', model='nPLD'): ''' Show the data validation summary (DVS) for a given target. :param str mission: The mission name. Default `k2` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: If :py:obj:`True`, download and overwrite \ existing files. Default :py:obj:`False` ''' # Get season if season is None: season = getattr(missions, mission).Season(ID) if hasattr(season, '__len__'): raise AttributeError( "Please choose a `season` for this target: %s." % season) # Get file name if model == 'nPLD': filename = getattr(missions, mission).DVSFile(ID, season, cadence) else: if cadence == 'sc': filename = model + '.sc.pdf' else: filename = model + '.pdf' file = DownloadFile(ID, season=season, mission=mission, filename=filename, clobber=clobber) try: if platform.system().lower().startswith('darwin'): subprocess.call(['open', file]) elif os.name == 'nt': os.startfile(file) elif os.name == 'posix': subprocess.call(['xdg-open', file]) else: raise Exception("") except: log.info("Unable to open the pdf. Try opening it manually:") log.info(file)
Re-compute the :py:mod:`everest` model for the given value of :py:obj:`lambda`. For long cadence `k2` light curves, this should take several seconds. For short cadence `k2` light curves, it may take a few minutes. Note that this is a simple wrapper around :py:func:`everest.Basecamp.compute`. def compute(self): ''' Re-compute the :py:mod:`everest` model for the given value of :py:obj:`lambda`. For long cadence `k2` light curves, this should take several seconds. For short cadence `k2` light curves, it may take a few minutes. Note that this is a simple wrapper around :py:func:`everest.Basecamp.compute`. ''' # If we're doing iterative PLD, get the normalization if self.model_name == 'iPLD': self._get_norm() # Compute as usual super(Everest, self).compute() # Make NaN cadences NaNs self.flux[self.nanmask] = np.nan
Computes the PLD flux normalization array. ..note :: `iPLD` model **only**. def _get_norm(self): ''' Computes the PLD flux normalization array. ..note :: `iPLD` model **only**. ''' log.info('Computing the PLD normalization...') # Loop over all chunks mod = [None for b in self.breakpoints] for b, brkpt in enumerate(self.breakpoints): # Unmasked chunk c = self.get_chunk(b) # Masked chunk (original mask plus user transit mask) inds = np.array( list(set(np.concatenate([self.transitmask, self.recmask]))), dtype=int) M = np.delete(np.arange(len(self.time)), inds, axis=0) if b > 0: m = M[(M > self.breakpoints[b - 1] - self.bpad) & (M <= self.breakpoints[b] + self.bpad)] else: m = M[M <= self.breakpoints[b] + self.bpad] # This block of the masked covariance matrix mK = GetCovariance(self.kernel, self.kernel_params, self.time[m], self.fraw_err[m]) # Get median med = np.nanmedian(self.fraw[m]) # Normalize the flux f = self.fraw[m] - med # The X^2 matrices A = np.zeros((len(m), len(m))) B = np.zeros((len(c), len(m))) # Loop over all orders for n in range(self.pld_order): XM = self.X(n, m) XC = self.X(n, c) A += self.reclam[b][n] * np.dot(XM, XM.T) B += self.reclam[b][n] * np.dot(XC, XM.T) del XM, XC W = np.linalg.solve(mK + A, f) mod[b] = np.dot(B, W) del A, B, W # Join the chunks after applying the correct offset if len(mod) > 1: # First chunk model = mod[0][:-self.bpad] # Center chunks for m in mod[1:-1]: offset = model[-1] - m[self.bpad - 1] model = np.concatenate( [model, m[self.bpad:-self.bpad] + offset]) # Last chunk offset = model[-1] - mod[-1][self.bpad - 1] model = np.concatenate([model, mod[-1][self.bpad:] + offset]) else: model = mod[0] # Subtract the global median model -= np.nanmedian(model) # Save the norm self._norm = self.fraw - model
Load the FITS file from disk and populate the class instance with its data. def load_fits(self): ''' Load the FITS file from disk and populate the class instance with its data. ''' log.info("Loading FITS file for %d." % (self.ID)) with pyfits.open(self.fitsfile) as f: # Params and long cadence data self.loaded = True self.is_parent = False try: self.X1N = f[2].data['X1N'] except KeyError: self.X1N = None self.aperture = f[3].data self.aperture_name = f[1].header['APNAME'] try: self.bkg = f[1].data['BKG'] except KeyError: self.bkg = 0. self.bpad = f[1].header['BPAD'] self.cbv_minstars = [] self.cbv_num = f[1].header.get('CBVNUM', 1) self.cbv_niter = f[1].header['CBVNITER'] self.cbv_win = f[1].header['CBVWIN'] self.cbv_order = f[1].header['CBVORD'] self.cadn = f[1].data['CADN'] self.cdivs = f[1].header['CDIVS'] self.cdpp = f[1].header['CDPP'] self.cdppr = f[1].header['CDPPR'] self.cdppv = f[1].header['CDPPV'] self.cdppg = f[1].header['CDPPG'] self.cv_min = f[1].header['CVMIN'] self.fpix = f[2].data['FPIX'] self.pixel_images = [f[4].data['STAMP1'], f[4].data['STAMP2'], f[4].data['STAMP3']] self.fraw = f[1].data['FRAW'] self.fraw_err = f[1].data['FRAW_ERR'] self.giter = f[1].header['GITER'] self.gmaxf = f[1].header.get('GMAXF', 200) self.gp_factor = f[1].header['GPFACTOR'] try: self.hires = f[5].data except: self.hires = None self.kernel_params = np.array([f[1].header['GPWHITE'], f[1].header['GPRED'], f[1].header['GPTAU']]) try: self.kernel = f[1].header['KERNEL'] self.kernel_params = np.append( self.kernel_params, [f[1].header['GPGAMMA'], f[1].header['GPPER']]) except KeyError: self.kernel = 'Basic' self.pld_order = f[1].header['PLDORDER'] self.lam_idx = self.pld_order self.leps = f[1].header['LEPS'] self.mag = f[0].header['KEPMAG'] self.max_pixels = f[1].header['MAXPIX'] self.model = self.fraw - f[1].data['FLUX'] self.nearby = [] for i in range(99): try: ID = f[1].header['NRBY%02dID' % (i + 1)] x = f[1].header['NRBY%02dX' % (i + 1)] y = f[1].header['NRBY%02dY' % (i + 1)] mag = f[1].header['NRBY%02dM' % (i + 1)] x0 = f[1].header['NRBY%02dX0' % (i + 1)] y0 = f[1].header['NRBY%02dY0' % (i + 1)] self.nearby.append( {'ID': ID, 'x': x, 'y': y, 'mag': mag, 'x0': x0, 'y0': y0}) except KeyError: break self.neighbors = [] for c in range(99): try: self.neighbors.append(f[1].header['NEIGH%02d' % (c + 1)]) except KeyError: break self.oiter = f[1].header['OITER'] self.optimize_gp = f[1].header['OPTGP'] self.osigma = f[1].header['OSIGMA'] self.planets = [] for i in range(99): try: t0 = f[1].header['P%02dT0' % (i + 1)] per = f[1].header['P%02dPER' % (i + 1)] dur = f[1].header['P%02dDUR' % (i + 1)] self.planets.append((t0, per, dur)) except KeyError: break self.quality = f[1].data['QUALITY'] self.saturated = f[1].header['SATUR'] self.saturation_tolerance = f[1].header['SATTOL'] self.time = f[1].data['TIME'] self._norm = np.array(self.fraw) # Chunk arrays self.breakpoints = [] self.cdpp_arr = [] self.cdppv_arr = [] self.cdppr_arr = [] for c in range(99): try: self.breakpoints.append(f[1].header['BRKPT%02d' % (c + 1)]) self.cdpp_arr.append(f[1].header['CDPP%02d' % (c + 1)]) self.cdppr_arr.append(f[1].header['CDPPR%02d' % (c + 1)]) self.cdppv_arr.append(f[1].header['CDPPV%02d' % (c + 1)]) except KeyError: break self.lam = [[f[1].header['LAMB%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] if self.model_name == 'iPLD': self.reclam = [[f[1].header['RECL%02d%02d' % (c + 1, o + 1)] for o in range(self.pld_order)] for c in range(len(self.breakpoints))] # Masks self.badmask = np.where(self.quality & 2 ** (QUALITY_BAD - 1))[0] self.nanmask = np.where(self.quality & 2 ** (QUALITY_NAN - 1))[0] self.outmask = np.where(self.quality & 2 ** (QUALITY_OUT - 1))[0] self.recmask = np.where(self.quality & 2 ** (QUALITY_REC - 1))[0] self.transitmask = np.where( self.quality & 2 ** (QUALITY_TRN - 1))[0] # CBVs self.XCBV = np.empty((len(self.time), 0)) for i in range(99): try: self.XCBV = np.hstack( [self.XCBV, f[1].data['CBV%02d' % (i + 1)].reshape(-1, 1)]) except KeyError: break # These are not stored in the fits file; we don't need them self.saturated_aperture_name = None self.apertures = None self.Xpos = None self.Ypos = None self.fpix_err = None self.parent_model = None self.lambda_arr = None self.meta = None self._transit_model = None self.transit_depth = None
Plot sample postage stamps for the target with the aperture outline marked, as well as a high-res target image (if available). :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` def plot_aperture(self, show=True): ''' Plot sample postage stamps for the target with the aperture outline marked, as well as a high-res target image (if available). :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` ''' # Set up the axes fig, ax = pl.subplots(2, 2, figsize=(6, 8)) fig.subplots_adjust(top=0.975, bottom=0.025, left=0.05, right=0.95, hspace=0.05, wspace=0.05) ax = ax.flatten() fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) super(Everest, self).plot_aperture(ax, labelsize=12) if show: pl.show() pl.close() else: return fig, ax
Plots the final de-trended light curve. :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` :param bool plot_raw: Show the raw light curve? Default :py:obj:`True` :param bool plot_gp: Show the GP model prediction? \ Default :py:obj:`True` :param bool plot_bad: Show and indicate the bad data points? \ Default :py:obj:`True` :param bool plot_out: Show and indicate the outliers? \ Default :py:obj:`True` :param bool plot_cbv: Plot the CBV-corrected light curve? \ Default :py:obj:`True`. If :py:obj:`False`, plots the \ de-trended but uncorrected light curve. def plot(self, show=True, plot_raw=True, plot_gp=True, plot_bad=True, plot_out=True, plot_cbv=True, simple=False): ''' Plots the final de-trended light curve. :param bool show: Show the plot or return the `(fig, ax)` instance? \ Default :py:obj:`True` :param bool plot_raw: Show the raw light curve? Default :py:obj:`True` :param bool plot_gp: Show the GP model prediction? \ Default :py:obj:`True` :param bool plot_bad: Show and indicate the bad data points? \ Default :py:obj:`True` :param bool plot_out: Show and indicate the outliers? \ Default :py:obj:`True` :param bool plot_cbv: Plot the CBV-corrected light curve? \ Default :py:obj:`True`. If :py:obj:`False`, plots the \ de-trended but uncorrected light curve. ''' log.info('Plotting the light curve...') # Set up axes if plot_raw: fig, axes = pl.subplots(2, figsize=(13, 9), sharex=True) fig.subplots_adjust(hspace=0.1) axes = [axes[1], axes[0]] if plot_cbv: fluxes = [self.fcor, self.fraw] else: fluxes = [self.flux, self.fraw] labels = ['EVEREST Flux', 'Raw Flux'] else: fig, axes = pl.subplots(1, figsize=(13, 6)) axes = [axes] if plot_cbv: fluxes = [self.fcor] else: fluxes = [self.flux] labels = ['EVEREST Flux'] fig.canvas.set_window_title('EVEREST Light curve') # Set up some stuff time = self.time badmask = self.badmask nanmask = self.nanmask outmask = self.outmask transitmask = self.transitmask fraw_err = self.fraw_err breakpoints = self.breakpoints if self.cadence == 'sc': ms = 2 else: ms = 4 # Get the cdpps cdpps = [[self.get_cdpp(self.flux), self.get_cdpp_arr(self.flux)], [self.get_cdpp(self.fraw), self.get_cdpp_arr(self.fraw)]] self.cdpp = cdpps[0][0] self.cdpp_arr = cdpps[0][1] for n, ax, flux, label, c in zip([0, 1], axes, fluxes, labels, cdpps): # Initialize CDPP cdpp = c[0] cdpp_arr = c[1] # Plot the good data points ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the outliers bnmask = np.array( list(set(np.concatenate([badmask, nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] def O1(x): return x[outmask] def O2(x): return x[bmask] def O3(x): return x[transitmask] if plot_out: ax.plot(O1(time), O1(flux), ls='none', color="#777777", marker='.', markersize=ms, alpha=0.5) if plot_bad: ax.plot(O2(time), O2(flux), 'r.', markersize=ms, alpha=0.25) ax.plot(O3(time), O3(flux), 'b.', markersize=ms, alpha=0.25) # Plot the GP if n == 0 and plot_gp and self.cadence != 'sc': gp = GP(self.kernel, self.kernel_params) gp.compute(self.apply_mask(time), self.apply_mask(fraw_err)) med = np.nanmedian(self.apply_mask(flux)) y, _ = gp.predict(self.apply_mask(flux) - med, time) y += med ax.plot(self.apply_mask(time), self.apply_mask( y), 'r-', lw=0.5, alpha=0.5) # Appearance if n == 0: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) ax.set_ylabel(label, fontsize=18) for brkpt in breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) if len(cdpp_arr) == 2: ax.annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.annotate('%.2f ppm' % cdpp_arr[1], xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) elif len(cdpp_arr) < 6: for n in range(len(cdpp_arr)): if n > 0: x = (self.time[self.breakpoints[n - 1]] - self.time[0] ) / (self.time[-1] - self.time[0]) + 0.02 else: x = 0.02 ax.annotate('%.2f ppm' % cdpp_arr[n], xy=(x, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=10, zorder=99, color='r') else: ax.annotate('%.2f ppm' % cdpp, xy=(0.02, 0.975), xycoords='axes fraction', ha='left', va='top', fontsize=12, color='r', zorder=99) ax.margins(0.01, 0.1) # Get y lims that bound 99% of the flux f = np.concatenate([np.delete(f, bnmask) for f in fluxes]) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" if not plot_bad: continue elif i in outmask: color = "#cccccc" if not plot_out: continue elif i in nanmask: continue else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color)) # Show total CDPP improvement pl.figtext(0.5, 0.94, '%s %d' % (self._mission.IDSTRING, self.ID), fontsize=18, ha='center', va='bottom') pl.figtext(0.5, 0.905, r'$%.2f\ \mathrm{ppm} \rightarrow %.2f\ \mathrm{ppm}$' % (self.cdppr, self.cdpp), fontsize=14, ha='center', va='bottom') if show: pl.show() pl.close() else: if plot_raw: return fig, axes else: return fig, axes[0]
Shows the data validation summary (DVS) for the target. def dvs(self): ''' Shows the data validation summary (DVS) for the target. ''' DVS(self.ID, season=self.season, mission=self.mission, model=self.model_name, clobber=self.clobber)
Plots the light curve for the target de-trended with a given pipeline. :param str pipeline: The name of the pipeline (lowercase). Options \ are 'everest2', 'everest1', and other mission-specific \ pipelines. For `K2`, the available pipelines are 'k2sff' \ and 'k2sc'. Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.plot` function of the mission. def plot_pipeline(self, pipeline, *args, **kwargs): ''' Plots the light curve for the target de-trended with a given pipeline. :param str pipeline: The name of the pipeline (lowercase). Options \ are 'everest2', 'everest1', and other mission-specific \ pipelines. For `K2`, the available pipelines are 'k2sff' \ and 'k2sc'. Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.plot` function of the mission. ''' if pipeline != 'everest2': return getattr(missions, self.mission).pipelines.plot(self.ID, pipeline, *args, **kwargs) else: # We're going to plot the everest 2 light curve like we plot # the other pipelines for easy comparison plot_raw = kwargs.get('plot_raw', False) plot_cbv = kwargs.get('plot_cbv', True) show = kwargs.get('show', True) if plot_raw: y = self.fraw ylabel = 'Raw Flux' elif plot_cbv: y = self.fcor ylabel = "EVEREST2 Flux" else: y = self.flux ylabel = "EVEREST2 Flux" # Remove nans bnmask = np.concatenate([self.nanmask, self.badmask]) time = np.delete(self.time, bnmask) flux = np.delete(y, bnmask) # Plot it fig, ax = pl.subplots(1, figsize=(10, 4)) fig.subplots_adjust(bottom=0.15) ax.plot(time, flux, "k.", markersize=3, alpha=0.5) # Axis limits N = int(0.995 * len(flux)) hi, lo = flux[np.argsort(flux)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Plot bad data points ax.plot(self.time[self.badmask], y[self.badmask], "r.", markersize=3, alpha=0.2) # Show the CDPP ax.annotate('%.2f ppm' % self._mission.CDPP(flux), xy=(0.98, 0.975), xycoords='axes fraction', ha='right', va='top', fontsize=12, color='r', zorder=99) # Appearance ax.margins(0, None) ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16) ax.set_ylabel(ylabel, fontsize=16) fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID)) if show: pl.show() pl.close() else: return fig, ax
Returns the `time` and `flux` arrays for the target obtained by a given pipeline. Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.get` function of the mission. def get_pipeline(self, *args, **kwargs): ''' Returns the `time` and `flux` arrays for the target obtained by a given pipeline. Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to the :py:func:`pipelines.get` function of the mission. ''' return getattr(missions, self.mission).pipelines.get(self.ID, *args, **kwargs)
Mask all of the transits/eclipses of a given planet/EB. After calling this method, you must re-compute the model by calling :py:meth:`compute` in order for the mask to take effect. :param float t0: The time of first transit (same units as light curve) :param float period: The period of the planet in days :param foat dur: The transit duration in days. Default 0.2 def mask_planet(self, t0, period, dur=0.2): ''' Mask all of the transits/eclipses of a given planet/EB. After calling this method, you must re-compute the model by calling :py:meth:`compute` in order for the mask to take effect. :param float t0: The time of first transit (same units as light curve) :param float period: The period of the planet in days :param foat dur: The transit duration in days. Default 0.2 ''' mask = [] t0 += np.ceil((self.time[0] - dur - t0) / period) * period for t in np.arange(t0, self.time[-1] + dur, period): mask.extend(np.where(np.abs(self.time - t) < dur / 2.)[0]) self.transitmask = np.array( list(set(np.concatenate([self.transitmask, mask]))))
.. warning:: Untested! def _plot_weights(self, show=True): ''' .. warning:: Untested! ''' # Set up the axes fig = pl.figure(figsize=(12, 12)) fig.subplots_adjust(top=0.95, bottom=0.025, left=0.1, right=0.92) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) ax = [pl.subplot2grid((80, 130), (20 * j, 25 * i), colspan=23, rowspan=18) for j in range(len(self.breakpoints) * 2) for i in range(1 + 2 * (self.pld_order - 1))] cax = [pl.subplot2grid((80, 130), (20 * j, 25 * (1 + 2 * (self.pld_order - 1))), colspan=4, rowspan=18) for j in range(len(self.breakpoints) * 2)] ax = np.array(ax).reshape(2 * len(self.breakpoints), -1) cax = np.array(cax) # Check number of segments if len(self.breakpoints) > 3: log.error('Cannot currently plot weights for light ' + 'curves with more than 3 segments.') return # Loop over all PLD orders and over all chunks npix = len(self.fpix[1]) ap = self.aperture.flatten() ncol = 1 + 2 * (len(self.weights[0]) - 1) raw_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) scaled_weights = np.zeros( (len(self.breakpoints), ncol, self.aperture.shape[0], self.aperture.shape[1]), dtype=float) # Loop over orders for o in range(len(self.weights[0])): if o == 0: oi = 0 else: oi = 1 + 2 * (o - 1) # Loop over chunks for b in range(len(self.weights)): c = self.get_chunk(b) rw_ii = np.zeros(npix) rw_ij = np.zeros(npix) sw_ii = np.zeros(npix) sw_ij = np.zeros(npix) X = np.nanmedian(self.X(o, c), axis=0) # Compute all sets of pixels at this PLD order, then # loop over them and assign the weights to the correct pixels sets = np.array(list(multichoose(np.arange(npix).T, o + 1))) for i, s in enumerate(sets): if (o == 0) or (s[0] == s[1]): # Not the cross-terms j = s[0] rw_ii[j] += self.weights[b][o][i] sw_ii[j] += X[i] * self.weights[b][o][i] else: # Cross-terms for j in s: rw_ij[j] += self.weights[b][o][i] sw_ij[j] += X[i] * self.weights[b][o][i] # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ii[n] sw[i] = sw_ii[n] n += 1 raw_weights[b][oi] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi] = sw.reshape(*self.aperture.shape) if o > 0: # Make the array 2D and plot it rw = np.zeros_like(ap, dtype=float) sw = np.zeros_like(ap, dtype=float) n = 0 for i, a in enumerate(ap): if (a & 1): rw[i] = rw_ij[n] sw[i] = sw_ij[n] n += 1 raw_weights[b][oi + 1] = rw.reshape(*self.aperture.shape) scaled_weights[b][oi + 1] = sw.reshape(*self.aperture.shape) # Plot the images log.info('Plotting the PLD weights...') rdbu = pl.get_cmap('RdBu_r') rdbu.set_bad('k') for b in range(len(self.weights)): rmax = max([-raw_weights[b][o].min() for o in range(ncol)] + [raw_weights[b][o].max() for o in range(ncol)]) smax = max([-scaled_weights[b][o].min() for o in range(ncol)] + [scaled_weights[b][o].max() for o in range(ncol)]) for o in range(ncol): imr = ax[2 * b, o].imshow(raw_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-rmax, vmax=rmax) ims = ax[2 * b + 1, o].imshow(scaled_weights[b][o], aspect='auto', interpolation='nearest', cmap=rdbu, origin='lower', vmin=-smax, vmax=smax) # Colorbars def fmt(x, pos): a, b = '{:.0e}'.format(x).split('e') b = int(b) if float(a) > 0: a = r'+' + a elif float(a) == 0: return '' return r'${} \times 10^{{{}}}$'.format(a, b) cbr = pl.colorbar(imr, cax=cax[2 * b], format=FuncFormatter(fmt)) cbr.ax.tick_params(labelsize=8) cbs = pl.colorbar( ims, cax=cax[2 * b + 1], format=FuncFormatter(fmt)) cbs.ax.tick_params(labelsize=8) # Plot aperture contours def PadWithZeros(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 0 vector[-pad_width[1]:] = 0 return vector ny, nx = self.aperture.shape contour = np.zeros((ny, nx)) contour[np.where(self.aperture)] = 1 contour = np.lib.pad(contour, 1, PadWithZeros) highres = zoom(contour, 100, order=0, mode='nearest') extent = np.array([-1, nx, -1, ny]) for axis in ax.flatten(): axis.contour(highres, levels=[ 0.5], extent=extent, origin='lower', colors='r', linewidths=1) # Check for saturated columns for x in range(self.aperture.shape[0]): for y in range(self.aperture.shape[1]): if self.aperture[x][y] == AP_SATURATED_PIXEL: axis.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5], [x - 0.5, x - 0.5, x + 0.5, x + 0.5], fill=False, hatch='xxxxx', color='r', lw=0) axis.set_xlim(-0.5, nx - 0.5) axis.set_ylim(-0.5, ny - 0.5) axis.set_xticks([]) axis.set_yticks([]) # Labels titles = [r'$1^{\mathrm{st}}$', r'$2^{\mathrm{nd}}\ (i = j)$', r'$2^{\mathrm{nd}}\ (i \neq j)$', r'$3^{\mathrm{rd}}\ (i = j)$', r'$3^{\mathrm{rd}}\ (i \neq j)$'] + ['' for i in range(10)] for i, axis in enumerate(ax[0]): axis.set_title(titles[i], fontsize=12) for j in range(len(self.weights)): ax[2 * j, 0].text(-0.55, -0.15, r'$%d$' % (j + 1), fontsize=16, transform=ax[2 * j, 0].transAxes) ax[2 * j, 0].set_ylabel(r'$w_{ij}$', fontsize=18) ax[2 * j + 1, 0].set_ylabel(r'$\bar{X}_{ij} \cdot w_{ij}$', fontsize=18) if show: pl.show() pl.close() else: return fig, ax, cax
Saves all of the de-trending information to disk in an `npz` file def _save_npz(self): ''' Saves all of the de-trending information to disk in an `npz` file ''' # Save the data d = dict(self.__dict__) d.pop('_weights', None) d.pop('_A', None) d.pop('_B', None) d.pop('_f', None) d.pop('_mK', None) d.pop('K', None) d.pop('dvs', None) d.pop('clobber', None) d.pop('clobber_tpf', None) d.pop('_mission', None) d.pop('debug', None) np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`. def optimize(self, piter=3, pmaxf=300, ppert=0.1): ''' Runs :py:obj:`pPLD` on the target in an attempt to further optimize the values of the PLD priors. See :py:class:`everest.detrender.pPLD`. ''' self._save_npz() optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf, ppert=ppert, debug=True, clobber=True) optimized.publish() self.reset()
Plot the light curve folded on a given `period` and centered at `t0`. When plotting folded transits, please mask them using :py:meth:`mask_planet` and re-compute the model using :py:meth:`compute`. :param float t0: The time at which to center the plot \ (same units as light curve) :param float period: The period of the folding operation :param float dur: The transit duration in days. Default 0.2 def plot_folded(self, t0, period, dur=0.2): ''' Plot the light curve folded on a given `period` and centered at `t0`. When plotting folded transits, please mask them using :py:meth:`mask_planet` and re-compute the model using :py:meth:`compute`. :param float t0: The time at which to center the plot \ (same units as light curve) :param float period: The period of the folding operation :param float dur: The transit duration in days. Default 0.2 ''' # Mask the planet self.mask_planet(t0, period, dur) # Whiten gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) med = np.nanmedian(self.apply_mask(self.flux)) y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time) fwhite = (self.flux - y) fwhite /= np.nanmedian(fwhite) # Fold tfold = (self.time - t0 - period / 2.) % period - period / 2. # Crop inds = np.where(np.abs(tfold) < 2 * dur)[0] x = tfold[inds] y = fwhite[inds] # Plot fig, ax = pl.subplots(1, figsize=(9, 5)) fig.subplots_adjust(bottom=0.125) ax.plot(x, y, 'k.', alpha=0.5) # Get ylims yfin = np.delete(y, np.where(np.isnan(y))) lo, hi = yfin[np.argsort(yfin)][[3, -3]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(*ylim) # Appearance ax.set_xlabel(r'Time (days)', fontsize=18) ax.set_ylabel(r'Normalized Flux', fontsize=18) fig.canvas.set_window_title( '%s %d' % (self._mission.IDSTRING, self.ID)) pl.show()
Plot the light curve de-trended with a join instrumental + transit model with the best fit transit model overlaid. The transit model should be specified using the :py:obj:`transit_model` attribute and should be an instance or list of instances of :py:class:`everest.transit.TransitModel`. :param bool show: Show the plot, or return the `fig, ax` instances? \ Default `True` :param str fold: The name of the planet/transit model on which to \ fold. If only one model is present, can be set to \ :py:obj:`True`. Default :py:obj:`False` \ (does not fold the data). :param ax: A `matplotlib` axis instance to use for plotting. \ Default :py:obj:`None` def plot_transit_model(self, show=True, fold=None, ax=None): ''' Plot the light curve de-trended with a join instrumental + transit model with the best fit transit model overlaid. The transit model should be specified using the :py:obj:`transit_model` attribute and should be an instance or list of instances of :py:class:`everest.transit.TransitModel`. :param bool show: Show the plot, or return the `fig, ax` instances? \ Default `True` :param str fold: The name of the planet/transit model on which to \ fold. If only one model is present, can be set to \ :py:obj:`True`. Default :py:obj:`False` \ (does not fold the data). :param ax: A `matplotlib` axis instance to use for plotting. \ Default :py:obj:`None` ''' if self.transit_model is None: raise ValueError("No transit model provided!") if self.transit_depth is None: self.compute() if fold is not None: if (fold is True and len(self.transit_model) > 1) or \ (type(fold) is not str): raise Exception( "Kwarg `fold` should be the name of the transit " + "model on which to fold the data.") if fold is True: # We are folding on the first index of `self.transit_model` fold = 0 elif type(fold) is str: # Figure out the index of the transit model on which to fold fold = np.argmax( [fold == tm.name for tm in self.transit_model]) log.info('Plotting the transit model folded ' + 'on transit model index %d...' % fold) else: log.info('Plotting the transit model...') # Set up axes if ax is None: if fold is not None: fig, ax = pl.subplots(1, figsize=(8, 5)) else: fig, ax = pl.subplots(1, figsize=(13, 6)) fig.canvas.set_window_title('EVEREST Light curve') else: fig = pl.gcf() # Set up some stuff if self.cadence == 'sc': ms = 2 else: ms = 4 # Fold? if fold is not None: times = self.transit_model[fold].params.get('times', None) if times is not None: time = self.time - \ [times[np.argmin(np.abs(ti - times))] for ti in self.time] t0 = times[0] else: t0 = self.transit_model[fold].params.get('t0', 0.) period = self.transit_model[fold].params.get('per', 10.) time = (self.time - t0 - period / 2.) % period - period / 2. dur = 0.01 * \ len(np.where(self.transit_model[fold]( np.linspace(t0 - 0.5, t0 + 0.5, 100)) < 0)[0]) else: time = self.time ax.plot(self.apply_mask(time), self.apply_mask(self.flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], self.flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], self.flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) # Plot the transit + GP model med = np.nanmedian(self.apply_mask(self.flux)) transit_model = \ med * np.sum([depth * tm(self.time) for tm, depth in zip(self.transit_model, self.transit_depth)], axis=0) gp = GP(self.kernel, self.kernel_params, white=False) gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err)) y, _ = gp.predict(self.apply_mask( self.flux - transit_model) - med, self.time) if fold is not None: flux = (self.flux - y) / med ax.plot(self.apply_mask(time), self.apply_mask(flux), ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.outmask], flux[self.outmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) ax.plot(time[self.transitmask], flux[self.transitmask], ls='none', marker='.', color='k', markersize=ms, alpha=0.5) hires_time = np.linspace(-5 * dur, 5 * dur, 1000) hires_transit_model = 1 + \ self.transit_depth[fold] * \ self.transit_model[fold](hires_time + t0) ax.plot(hires_time, hires_transit_model, 'r-', lw=1, alpha=1) else: flux = self.flux y += med y += transit_model ax.plot(time, y, 'r-', lw=1, alpha=1) # Plot the bad data points bnmask = np.array( list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int) bmask = [i for i in self.badmask if i not in self.nanmask] ax.plot(time[bmask], flux[bmask], 'r.', markersize=ms, alpha=0.25) # Appearance ax.set_ylabel('EVEREST Flux', fontsize=18) ax.margins(0.01, 0.1) if fold is not None: ax.set_xlabel('Time From Transit Center (days)', fontsize=18) ax.set_xlim(-3 * dur, 3 * dur) else: ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18) for brkpt in self.breakpoints[:-1]: ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25) ax.get_yaxis().set_major_formatter(Formatter.Flux) # Get y lims that bound most of the flux if fold is not None: lo = np.min(hires_transit_model) pad = 1.5 * (1 - lo) ylim = (lo - pad, 1 + pad) else: f = np.delete(flux, bnmask) N = int(0.995 * len(f)) hi, lo = f[np.argsort(f)][[N, -N]] pad = (hi - lo) * 0.1 ylim = (lo - pad, hi + pad) ax.set_ylim(ylim) # Indicate off-axis outliers for i in np.where(flux < ylim[0])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[0]), xycoords='data', xytext=(0, 15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) for i in np.where(flux > ylim[1])[0]: if i in bmask: color = "#ffcccc" else: color = "#ccccff" ax.annotate('', xy=(time[i], ylim[1]), xycoords='data', xytext=(0, -15), textcoords='offset points', arrowprops=dict(arrowstyle="-|>", color=color, alpha=0.5)) if show: pl.show() pl.close() else: return fig, ax
Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array def Interpolate(time, mask, y): ''' Masks certain elements in the array `y` and linearly interpolates over them, returning an array `y'` of the same length. :param array_like time: The time array :param array_like mask: The indices to be interpolated over :param array_like y: The dependent array ''' # Ensure `y` doesn't get modified in place yy = np.array(y) t_ = np.delete(time, mask) y_ = np.delete(y, mask, axis=0) if len(yy.shape) == 1: yy[mask] = np.interp(time[mask], t_, y_) elif len(yy.shape) == 2: for n in range(yy.shape[1]): yy[mask, n] = np.interp(time[mask], t_, y_[:, n]) else: raise Exception("Array ``y`` must be either 1- or 2-d.") return yy
Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point. def Chunks(l, n, all=False): ''' Returns a generator of consecutive `n`-sized chunks of list `l`. If `all` is `True`, returns **all** `n`-sized chunks in `l` by iterating over the starting point. ''' if all: jarr = range(0, n - 1) else: jarr = [0] for j in jarr: for i in range(j, len(l), n): if i + 2 * n <= len(l): yield l[i:i + n] else: if not all: yield l[i:] break
Smooth data by convolving on a given timescale. :param ndarray x: The data array :param int window_len: The size of the smoothing window. Default `100` :param str window: The window type. Default `hanning` def Smooth(x, window_len=100, window='hanning'): ''' Smooth data by convolving on a given timescale. :param ndarray x: The data array :param int window_len: The size of the smoothing window. Default `100` :param str window: The window type. Default `hanning` ''' if window_len == 0: return np.zeros_like(x) s = np.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]] if window == 'flat': w = np.ones(window_len, 'd') else: w = eval('np.' + window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1]
Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False` def Scatter(y, win=13, remove_outliers=False): ''' Return the scatter in ppm based on the median running standard deviation for a window size of :py:obj:`win` = 13 cadences (for K2, this is ~6.5 hours, as in VJ14). :param ndarray y: The array whose CDPP is to be computed :param int win: The window size in cadences. Default `13` :param bool remove_outliers: Clip outliers at 5 sigma before computing \ the CDPP? Default `False` ''' if remove_outliers: # Remove 5-sigma outliers from data # smoothed on a 1 day timescale if len(y) >= 50: ys = y - Smooth(y, 50) else: ys = y M = np.nanmedian(ys) MAD = 1.4826 * np.nanmedian(np.abs(ys - M)) out = [] for i, _ in enumerate(y): if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD): out.append(i) out = np.array(out, dtype=int) y = np.delete(y, out) if len(y): return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win) for yi in Chunks(y, win, all=True)]) else: return np.nan
Subtracts a second order Savitsky-Golay filter with window size `win` and returns the result. This acts as a high pass filter. def SavGol(y, win=49): ''' Subtracts a second order Savitsky-Golay filter with window size `win` and returns the result. This acts as a high pass filter. ''' if len(y) >= win: return y - savgol_filter(y, win, 2) + np.nanmedian(y) else: return y
Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True` def NumRegressors(npix, pld_order, cross_terms=True): ''' Return the number of regressors for `npix` pixels and PLD order `pld_order`. :param bool cross_terms: Include pixel cross-terms? Default :py:obj:`True` ''' res = 0 for k in range(1, pld_order + 1): if cross_terms: res += comb(npix + k - 1, k) else: res += npix return int(res)
Downbins an array to a smaller size. :param array_like x: The array to down-bin :param int newsize: The new size of the axis along which to down-bin :param int axis: The axis to operate on. Default 0 :param str operation: The operation to perform when down-binning. \ Default `mean` def Downbin(x, newsize, axis=0, operation='mean'): ''' Downbins an array to a smaller size. :param array_like x: The array to down-bin :param int newsize: The new size of the axis along which to down-bin :param int axis: The axis to operate on. Default 0 :param str operation: The operation to perform when down-binning. \ Default `mean` ''' assert newsize < x.shape[axis], \ "The new size of the array must be smaller than the current size." oldsize = x.shape[axis] newshape = list(x.shape) newshape[axis] = newsize newshape.insert(axis + 1, oldsize // newsize) trim = oldsize % newsize if trim: xtrim = x[:-trim] else: xtrim = x if operation == 'mean': xbin = np.nanmean(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'sum': xbin = np.nansum(xtrim.reshape(newshape), axis=axis + 1) elif operation == 'quadsum': xbin = np.sqrt(np.nansum(xtrim.reshape(newshape) ** 2, axis=axis + 1)) elif operation == 'median': xbin = np.nanmedian(xtrim.reshape(newshape), axis=axis + 1) else: raise ValueError("`operation` must be either `mean`, " + "`sum`, `quadsum`, or `median`.") return xbin
Called during the creation of a the :class:`StdModel` class when :class:`Metaclass` is initialised. It fills :attr:`Field.name` and :attr:`Field.model`. This is an internal function users should never call. def register_with_model(self, name, model): '''Called during the creation of a the :class:`StdModel` class when :class:`Metaclass` is initialised. It fills :attr:`Field.name` and :attr:`Field.model`. This is an internal function users should never call.''' if self.name: raise FieldError('Field %s is already registered\ with a model' % self) self.name = name self.attname = self.get_attname() self.model = model meta = model._meta self.meta = meta meta.dfields[name] = self meta.fields.append(self) if not self.primary_key: self.add_to_fields() else: model._meta.pk = self
Add this :class:`Field` to the fields of :attr:`model`. def add_to_fields(self): '''Add this :class:`Field` to the fields of :attr:`model`.''' meta = self.model._meta meta.scalarfields.append(self) if self.index: meta.indices.append(self)
called by the :class:`Query` method when it needs to build lookup on fields with additional nested fields. This is the case of :class:`ForeignKey` and :class:`JSONField`. :param remaining: the :ref:`double underscored` fields if this :class:`Field` :param errorClass: Optional exception class to use if the *remaining* field is not valid. def get_lookup(self, remaining, errorClass=ValueError): '''called by the :class:`Query` method when it needs to build lookup on fields with additional nested fields. This is the case of :class:`ForeignKey` and :class:`JSONField`. :param remaining: the :ref:`double underscored` fields if this :class:`Field` :param errorClass: Optional exception class to use if the *remaining* field is not valid.''' if remaining: raise errorClass('Cannot use nested lookup on field %s' % self) return (self.attname, None)
Retrieve the value :class:`Field` from a :class:`StdModel` ``instance``. :param instance: The :class:`StdModel` ``instance`` invoking this function. :param bits: Additional information for nested fields which derives from the :ref:`double underscore <tutorial-underscore>` notation. :return: the value of this :class:`Field` in the ``instance``. can raise :class:`AttributeError`. This method is used by the :meth:`StdModel.get_attr_value` method when retrieving values form a :class:`StdModel` instance. def get_value(self, instance, *bits): '''Retrieve the value :class:`Field` from a :class:`StdModel` ``instance``. :param instance: The :class:`StdModel` ``instance`` invoking this function. :param bits: Additional information for nested fields which derives from the :ref:`double underscore <tutorial-underscore>` notation. :return: the value of this :class:`Field` in the ``instance``. can raise :class:`AttributeError`. This method is used by the :meth:`StdModel.get_attr_value` method when retrieving values form a :class:`StdModel` instance. ''' if bits: raise AttributeError else: return getattr(instance, self.attname)
Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`. def set_value(self, instance, value): '''Set the ``value`` for this :class:`Field` in a ``instance`` of a :class:`StdModel`.''' setattr(instance, self.attname, self.to_python(value))
lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found def lookup(var_name, contexts=(), start=0): """lookup the value of the var_name on the stack of contexts :var_name: TODO :contexts: TODO :returns: None if not found """ start = len(contexts) if start >=0 else start for context in reversed(contexts[:start]): try: if var_name in context: return context[var_name] except TypeError as te: # we may put variable on the context, skip it continue return None
convert delimiters to corresponding regular expressions def delimiters_to_re(delimiters): """convert delimiters to corresponding regular expressions""" # caching delimiters = tuple(delimiters) if delimiters in re_delimiters: re_tag = re_delimiters[delimiters] else: open_tag, close_tag = delimiters # escape open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag]) close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag]) re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL) re_delimiters[delimiters] = re_tag return re_tag
check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end] def is_standalone(text, start, end): """check if the string text[start:end] is standalone by checking forwards and backwards for blankspaces :text: TODO :(start, end): TODO :returns: the start of next index after text[start:end] """ left = False start -= 1 while start >= 0 and text[start] in spaces_not_newline: start -= 1 if start < 0 or text[start] == '\n': left = True right = re_space.match(text, end) return (start+1, right.end()) if left and right else None
Compile a template into token tree :template: TODO :delimiters: TODO :returns: the root token def compiled(template, delimiters=DEFAULT_DELIMITERS): """Compile a template into token tree :template: TODO :delimiters: TODO :returns: the root token """ re_tag = delimiters_to_re(delimiters) # variable to save states tokens = [] index = 0 sections = [] tokens_stack = [] # root token root = Root('root') root.filters = copy.copy(filters) m = re_tag.search(template, index) while m is not None: token = None last_literal = None strip_space = False if m.start() > index: last_literal = Literal('str', template[index:m.start()], root=root) tokens.append(last_literal) # parse token prefix, name, suffix = m.groups() if prefix == '=' and suffix == '=': # {{=| |=}} to change delimiters delimiters = re.split(r'\s+', name) if len(delimiters) != 2: raise SyntaxError('Invalid new delimiter definition: ' + m.group()) re_tag = delimiters_to_re(delimiters) strip_space = True elif prefix == '{' and suffix == '}': # {{{ variable }}} token = Variable(name, name, root=root) elif prefix == '' and suffix == '': # {{ name }} token = Variable(name, name, root=root) token.escape = True elif suffix != '' and suffix != None: raise SyntaxError('Invalid token: ' + m.group()) elif prefix == '&': # {{& escaped variable }} token = Variable(name, name, root=root) elif prefix == '!': # {{! comment }} token = Comment(name, root=root) if len(sections) <= 0: # considered as standalone only outside sections strip_space = True elif prefix == '>': # {{> partial}} token = Partial(name, name, root=root) strip_space = True pos = is_standalone(template, m.start(), m.end()) if pos: token.indent = len(template[pos[0]:m.start()]) elif prefix == '#' or prefix == '^': # {{# section }} or # {{^ inverted }} # strip filter sec_name = name.split('|')[0].strip() token = Section(sec_name, name, root=root) if prefix == '#' else Inverted(name, name, root=root) token.delimiter = delimiters tokens.append(token) # save the tokens onto stack token = None tokens_stack.append(tokens) tokens = [] sections.append((sec_name, prefix, m.end())) strip_space = True elif prefix == '/': tag_name, sec_type, text_end = sections.pop() if tag_name != name: raise SyntaxError("unclosed tag: '" + tag_name + "' Got:" + m.group()) children = tokens tokens = tokens_stack.pop() tokens[-1].text = template[text_end:m.start()] tokens[-1].children = children strip_space = True else: raise SyntaxError('Unknown tag: ' + m.group()) if token is not None: tokens.append(token) index = m.end() if strip_space: pos = is_standalone(template, m.start(), m.end()) if pos: index = pos[1] if last_literal: last_literal.value = last_literal.value.rstrip(spaces_not_newline) m = re_tag.search(template, index) tokens.append(Literal('str', template[index:])) root.children = tokens return root
Escape text according to self.escape def _escape(self, text): """Escape text according to self.escape""" ret = EMPTYSTRING if text is None else str(text) if self.escape: return html_escape(ret) else: return ret
lookup value for names like 'a.b.c' and handle filters as well def _lookup(self, dot_name, contexts): """lookup value for names like 'a.b.c' and handle filters as well""" # process filters filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))] dot_name = filters[0] filters = filters[1:] # should support paths like '../../a.b.c/../d', etc. if not dot_name.startswith('.'): dot_name = './' + dot_name paths = dot_name.split('/') last_path = paths[-1] # path like '../..' or ./../. etc. refer_context = last_path == '' or last_path == '.' or last_path == '..' paths = paths if refer_context else paths[:-1] # count path level level = 0 for path in paths: if path == '..': level -= 1 elif path != '.': # ../a.b.c/.. in the middle level += len(path.strip('.').split('.')) names = last_path.split('.') # fetch the correct context if refer_context or names[0] == '': try: value = contexts[level-1] except: value = None else: # support {{a.b.c.d.e}} like lookup value = lookup(names[0], contexts, level) # lookup for variables if not refer_context: for name in names[1:]: try: # a.num (a.1, a.2) to access list index = parse_int(name) name = parse_int(name) if isinstance(value, (list, tuple)) else name value = value[name] except: # not found value = None break; # apply filters for f in filters: try: func = self.root.filters[f] value = func(value) except: continue return value
Render the children tokens def _render_children(self, contexts, partials): """Render the children tokens""" ret = [] for child in self.children: ret.append(child._render(contexts, partials)) return EMPTYSTRING.join(ret)
render variable def _render(self, contexts, partials): """render variable""" value = self._lookup(self.value, contexts) # lambda if callable(value): value = inner_render(str(value()), contexts, partials) return self._escape(value)
render section def _render(self, contexts, partials): """render section""" val = self._lookup(self.value, contexts) if not val: # false value return EMPTYSTRING # normally json has types: number/string/list/map # but python has more, so we decide that map and string should not iterate # by default, other do. if hasattr(val, "__iter__") and not isinstance(val, (str, dict)): # non-empty lists ret = [] for item in val: contexts.append(item) ret.append(self._render_children(contexts, partials)) contexts.pop() if len(ret) <= 0: # empty lists return EMPTYSTRING return self._escape(''.join(ret)) elif callable(val): # lambdas new_template = val(self.text) value = inner_render(new_template, contexts, partials, self.delimiter) else: # context contexts.append(val) value = self._render_children(contexts, partials) contexts.pop() return self._escape(value)
render inverted section def _render(self, contexts, partials): """render inverted section""" val = self._lookup(self.value, contexts) if val: return EMPTYSTRING return self._render_children(contexts, partials)
render partials def _render(self, contexts, partials): """render partials""" try: partial = partials[self.value] except KeyError as e: return self._escape(EMPTYSTRING) partial = re_insert_indent.sub(r'\1' + ' '*self.indent, partial) return inner_render(partial, contexts, partials, self.delimiter)
Called when the code is installed. Sets up directories and downloads the K2 catalog. def Setup(): ''' Called when the code is installed. Sets up directories and downloads the K2 catalog. ''' if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')): os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv')) GetK2Stars(clobber=False)
Compute the proxy 6-hr CDPP metric. :param array_like flux: The flux array to compute the CDPP for :param array_like mask: The indices to be masked :param str cadence: The light curve cadence. Default `lc` def CDPP(flux, mask=[], cadence='lc'): ''' Compute the proxy 6-hr CDPP metric. :param array_like flux: The flux array to compute the CDPP for :param array_like mask: The indices to be masked :param str cadence: The light curve cadence. Default `lc` ''' # 13 cadences is 6.5 hours rmswin = 13 # Smooth the data on a 2 day timescale svgwin = 49 # If short cadence, need to downbin if cadence == 'sc': newsize = len(flux) // 30 flux = Downbin(flux, newsize, operation='mean') flux_savgol = SavGol(np.delete(flux, mask), win=svgwin) if len(flux_savgol): return Scatter(flux_savgol / np.nanmedian(flux_savgol), remove_outliers=True, win=rmswin) else: return np.nan
Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int EPIC: The EPIC ID number :param int season: The observing season (campaign). Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? \ Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select \ `custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15` :param str saturated_aperture_name: The name of the aperture to use if \ the target is saturated. Default `k2sff_19` :param int max_pixels: Maximum number of pixels in the TPF. Default 75 :param bool download_only: Download raw TPF and return? Default \ :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` def GetData(EPIC, season=None, cadence='lc', clobber=False, delete_raw=False, aperture_name='k2sff_15', saturated_aperture_name='k2sff_19', max_pixels=75, download_only=False, saturation_tolerance=-0.1, bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17], get_hires=True, get_nearby=True, **kwargs): ''' Returns a :py:obj:`DataContainer` instance with the raw data for the target. :param int EPIC: The EPIC ID number :param int season: The observing season (campaign). Default :py:obj:`None` :param str cadence: The light curve cadence. Default `lc` :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param bool delete_raw: Delete the FITS TPF after processing it? \ Default :py:obj:`False` :param str aperture_name: The name of the aperture to use. Select \ `custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15` :param str saturated_aperture_name: The name of the aperture to use if \ the target is saturated. Default `k2sff_19` :param int max_pixels: Maximum number of pixels in the TPF. Default 75 :param bool download_only: Download raw TPF and return? Default \ :py:obj:`False` :param float saturation_tolerance: Target is considered saturated \ if flux is within this fraction of the pixel well depth. \ Default -0.1 :param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \ outliers when computing the model. \ Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]` :param bool get_hires: Download a high resolution image of the target? \ Default :py:obj:`True` :param bool get_nearby: Retrieve location of nearby sources? \ Default :py:obj:`True` ''' # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season # Is there short cadence data available for this target? short_cadence = HasShortCadence(EPIC, season=campaign) if cadence == 'sc' and not short_cadence: raise ValueError("Short cadence data not available for this target.") # Local file name filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz') # Download? if clobber or not os.path.exists(filename): # Get the TPF tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_lpd-targ.fits.gz' % (EPIC, campaign)) sc_tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files', str(EPIC), 'ktwo%09d-c%02d_spd-targ.fits.gz' % (EPIC, campaign)) if clobber or not os.path.exists(tpf): kplr_client.k2_star(EPIC).get_target_pixel_files(fetch=True) with pyfits.open(tpf) as f: qdata = f[1].data # Get the TPF aperture tpf_aperture = (f[2].data & 2) // 2 # Get the enlarged TPF aperture tpf_big_aperture = np.array(tpf_aperture) for i in range(tpf_big_aperture.shape[0]): for j in range(tpf_big_aperture.shape[1]): if f[2].data[i][j] == 1: for n in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]: if n[0] >= 0 and n[0] < tpf_big_aperture.shape[0]: if n[1] >= 0 and n[1] < \ tpf_big_aperture.shape[1]: if tpf_aperture[n[0]][n[1]] == 1: tpf_big_aperture[i][j] = 1 # Is there short cadence data? if short_cadence: with pyfits.open(sc_tpf) as f: sc_qdata = f[1].data # Get K2SFF apertures try: k2sff = kplr.K2SFF(EPIC, sci_campaign=campaign) k2sff_apertures = k2sff.apertures if delete_raw: os.remove(k2sff._file) except: k2sff_apertures = [None for i in range(20)] # Make a dict of all our apertures # We're not getting K2SFF apertures 0-9 any more apertures = {'tpf': tpf_aperture, 'tpf_big': tpf_big_aperture} for i in range(10, 20): apertures.update({'k2sff_%02d' % i: k2sff_apertures[i]}) # Get the header info fitsheader = [pyfits.getheader(tpf, 0).cards, pyfits.getheader(tpf, 1).cards, pyfits.getheader(tpf, 2).cards] if short_cadence: sc_fitsheader = [pyfits.getheader(sc_tpf, 0).cards, pyfits.getheader(sc_tpf, 1).cards, pyfits.getheader(sc_tpf, 2).cards] else: sc_fitsheader = None # Get a hi res image of the target if get_hires: hires = GetHiResImage(EPIC) else: hires = None # Get nearby sources if get_nearby: nearby = GetSources(EPIC) else: nearby = [] # Delete? if delete_raw: os.remove(tpf) if short_cadence: os.remove(sc_tpf) # Get the arrays cadn = np.array(qdata.field('CADENCENO'), dtype='int32') time = np.array(qdata.field('TIME'), dtype='float64') fpix = np.array(qdata.field('FLUX'), dtype='float64') fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64') qual = np.array(qdata.field('QUALITY'), dtype=int) # Get rid of NaNs in the time array by interpolating naninds = np.where(np.isnan(time)) time = Interpolate(np.arange(0, len(time)), naninds, time) # Get the motion vectors (if available!) pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64') pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)): pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1) pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2) else: pc1 = None pc2 = None # Do the same for short cadence if short_cadence: sc_cadn = np.array(sc_qdata.field('CADENCENO'), dtype='int32') sc_time = np.array(sc_qdata.field('TIME'), dtype='float64') sc_fpix = np.array(sc_qdata.field('FLUX'), dtype='float64') sc_fpix_err = np.array(sc_qdata.field('FLUX_ERR'), dtype='float64') sc_qual = np.array(sc_qdata.field('QUALITY'), dtype=int) sc_naninds = np.where(np.isnan(sc_time)) sc_time = Interpolate( np.arange(0, len(sc_time)), sc_naninds, sc_time) sc_pc1 = np.array(sc_qdata.field('POS_CORR1'), dtype='float64') sc_pc2 = np.array(sc_qdata.field('POS_CORR2'), dtype='float64') if not np.all(np.isnan(sc_pc1)) and not np.all(np.isnan(sc_pc2)): sc_pc1 = Interpolate( sc_time, np.where(np.isnan(sc_pc1)), sc_pc1) sc_pc2 = Interpolate( sc_time, np.where(np.isnan(sc_pc2)), sc_pc2) else: sc_pc1 = None sc_pc2 = None else: sc_cadn = None sc_time = None sc_fpix = None sc_fpix_err = None sc_qual = None sc_pc1 = None sc_pc2 = None # Static pixel images for plotting pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]] # Atomically write to disk. # http://stackoverflow.com/questions/2333872/ # atomic-writing-to-file-with-python if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) f = NamedTemporaryFile("wb", delete=False) np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix, fpix_err=fpix_err, qual=qual, apertures=apertures, pc1=pc1, pc2=pc2, fitsheader=fitsheader, pixel_images=pixel_images, nearby=nearby, hires=hires, sc_cadn=sc_cadn, sc_time=sc_time, sc_fpix=sc_fpix, sc_fpix_err=sc_fpix_err, sc_qual=sc_qual, sc_pc1=sc_pc1, sc_pc2=sc_pc2, sc_fitsheader=sc_fitsheader) f.flush() os.fsync(f.fileno()) f.close() shutil.move(f.name, filename) if download_only: return # Load data = np.load(filename) apertures = data['apertures'][()] pixel_images = data['pixel_images'] nearby = data['nearby'] hires = data['hires'][()] if cadence == 'lc': fitsheader = data['fitsheader'] cadn = data['cadn'] time = data['time'] fpix = data['fpix'] fpix_err = data['fpix_err'] qual = data['qual'] pc1 = data['pc1'] pc2 = data['pc2'] elif cadence == 'sc': fitsheader = data['sc_fitsheader'] cadn = data['sc_cadn'] time = data['sc_time'] fpix = data['sc_fpix'] fpix_err = data['sc_fpix_err'] qual = data['sc_qual'] pc1 = data['sc_pc1'] pc2 = data['sc_pc2'] else: raise ValueError("Invalid value for the cadence.") # Select the "saturated aperture" to check if the star is saturated # If it is, we will use this aperture instead if saturated_aperture_name == 'custom': saturated_aperture = GetCustomAperture(data) else: if saturated_aperture_name is None: saturated_aperture_name = 'k2sff_19' saturated_aperture = apertures[saturated_aperture_name] if saturated_aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") saturated_aperture_name = 'tpf_big' saturated_aperture = apertures[saturated_aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if saturated_aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Compute the saturation flux and the 97.5th percentile # flux in each pixel of the saturated aperture. We're going # to compare these to decide if the star is saturated. satflx = SaturationFlux(EPIC, campaign=campaign) * \ (1. + saturation_tolerance) f97 = np.zeros((fpix.shape[1], fpix.shape[2])) for i in range(fpix.shape[1]): for j in range(fpix.shape[2]): if saturated_aperture[i, j]: # Let's remove NaNs... tmp = np.delete(fpix[:, i, j], np.where( np.isnan(fpix[:, i, j]))) # ... and really bad outliers... if len(tmp): f = SavGol(tmp) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] np.delete(tmp, bad) # ... so we can compute the 97.5th percentile flux i97 = int(0.975 * len(tmp)) tmp = tmp[np.argsort(tmp)[i97]] f97[i, j] = tmp # Check if any of the pixels are actually saturated if np.nanmax(f97) <= satflx: log.info("No saturated columns detected.") saturated = False else: log.info("Saturated pixel(s) found. Switching to aperture `%s`." % saturated_aperture_name) aperture_name = saturated_aperture_name saturated = True # Now grab the aperture we'll actually use if aperture_name == 'custom': aperture = GetCustomAperture(data) else: if aperture_name is None: aperture_name = 'k2sff_15' aperture = apertures[aperture_name] if aperture is None: log.error("Invalid aperture selected. Defaulting to `tpf_big`.") aperture_name = 'tpf_big' aperture = apertures[aperture_name] # HACK: Some C05 K2SFF apertures don't match the target pixel file # pixel grid size. This is likely because they're defined on the M67 # superstamp. For now, let's ignore these stars. if aperture.shape != fpix.shape[1:]: log.error("Aperture size mismatch!") return None # Now we check if the aperture is too big. Can lead to memory errors... # Treat saturated and unsaturated stars differently. if saturated: # Need to check if we have too many pixels *after* collapsing columns. # Sort the apertures in decreasing order of pixels, but keep the apert. # chosen by the user first. aperture_names = np.array(list(apertures.keys())) npix_per_aperture = np.array( [np.sum(apertures[k]) for k in aperture_names]) aperture_names = aperture_names[np.argsort(npix_per_aperture)[::-1]] aperture_names = np.append([aperture_name], np.delete( aperture_names, np.argmax(aperture_names == aperture_name))) # Loop through them. Pick the first one that satisfies # the `max_pixels` constraint for aperture_name in aperture_names: aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 ncol = 0 apcopy = np.array(aperture) for j in range(apcopy.shape[1]): if np.any(f97[:, j] > satflx): apcopy[:, j] = 0 ncol += 1 if np.sum(apcopy) + ncol <= max_pixels: break if np.sum(apcopy) + ncol > max_pixels: log.error( "No apertures available with fewer than %d pixels. Aborting." % max_pixels) return None # Now, finally, we collapse the saturated columns into single pixels # and make the pixel array 2D ncol = 0 fpixnew = [] ferrnew = [] # HACK: K2SFF sometimes clips the heads/tails of saturated columns # That's really bad, since that's where all the information is. Let's # artificially extend the aperture by two pixels at the top and bottom # of each saturated column. This *could* increase contamination, but # it's unlikely since the saturated target is by definition really # bright ext = 0 for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): for i in range(aperture.shape[0]): if (aperture[i, j] == 0) and \ (np.nanmedian(fpix[:, i, j]) > 0): if (i + 2 < aperture.shape[0]) and \ aperture[i + 2, j] == 1: aperture[i, j] = 2 ext += 1 elif (i + 1 < aperture.shape[0]) and \ aperture[i + 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 1 >= 0) and aperture[i - 1, j] == 1: aperture[i, j] = 2 ext += 1 elif (i - 2 >= 0) and aperture[i - 2, j] == 1: aperture[i, j] = 2 ext += 1 if ext: log.info("Extended saturated columns by %d pixel(s)." % ext) for j in range(aperture.shape[1]): if np.any(f97[:, j] > satflx): marked = False collapsed = np.zeros(len(fpix[:, 0, 0])) collapsed_err2 = np.zeros(len(fpix[:, 0, 0])) for i in range(aperture.shape[0]): if aperture[i, j]: if not marked: aperture[i, j] = AP_COLLAPSED_PIXEL marked = True else: aperture[i, j] = AP_SATURATED_PIXEL collapsed += fpix[:, i, j] collapsed_err2 += fpix_err[:, i, j] ** 2 if np.any(collapsed): fpixnew.append(collapsed) ferrnew.append(np.sqrt(collapsed_err2)) ncol += 1 else: for i in range(aperture.shape[0]): if aperture[i, j]: fpixnew.append(fpix[:, i, j]) ferrnew.append(fpix_err[:, i, j]) fpix2D = np.array(fpixnew).T fpix_err2D = np.array(ferrnew).T log.info("Collapsed %d saturated column(s)." % ncol) else: # Check if there are too many pixels if np.sum(aperture) > max_pixels: # This case is simpler: we just pick the largest aperture # that's less than or equal to `max_pixels` keys = list(apertures.keys()) npix = np.array([np.sum(apertures[k]) for k in keys]) aperture_name = keys[np.argmax(npix * (npix <= max_pixels))] aperture = apertures[aperture_name] aperture[np.isnan(fpix[0])] = 0 if np.sum(aperture) > max_pixels: log.error("No apertures available with fewer than " + "%d pixels. Aborting." % max_pixels) return None log.warn( "Selected aperture is too big. Proceeding with aperture " + "`%s` instead." % aperture_name) # Make the pixel flux array 2D aperture[np.isnan(fpix[0])] = 0 ap = np.where(aperture & 1) fpix2D = np.array([f[ap] for f in fpix], dtype='float64') fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64') # Compute the background binds = np.where(aperture ^ 1) if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0): bkg = np.nanmedian(np.array([f[binds] for f in fpix], dtype='float64'), axis=1) # Uncertainty of the median: # http://davidmlane.com/hyperstat/A106993.html bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err], dtype='float64'), axis=1) \ / np.sqrt(len(binds[0])) bkg = bkg.reshape(-1, 1) bkg_err = bkg_err.reshape(-1, 1) else: bkg = 0. bkg_err = 0. # Make everything 2D and remove the background fpix = fpix2D - bkg fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2) flux = np.sum(fpix, axis=1) ferr = np.sqrt(np.sum(fpix_err ** 2, axis=1)) # Get NaN data points nanmask = np.where(np.isnan(flux) | (flux == 0))[0] # Get flagged data points -- we won't train our model on them badmask = [] for b in bad_bits: badmask += list(np.where(qual & 2 ** (b - 1))[0]) # Flag >10 sigma outliers -- same thing. tmpmask = np.array(list(set(np.concatenate([badmask, nanmask])))) t = np.delete(time, tmpmask) f = np.delete(flux, tmpmask) f = SavGol(f) med = np.nanmedian(f) MAD = 1.4826 * np.nanmedian(np.abs(f - med)) bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0] badmask.extend([np.argmax(time == t[i]) for i in bad]) # Campaign 2 hack: the first day or two are screwed up if campaign == 2: badmask.extend(np.where(time < 2061.5)[0]) # TODO: Fix time offsets in first half of # Campaign 0. See note in everest 1.0 code # Finalize the mask badmask = np.array(sorted(list(set(badmask)))) # Interpolate the nans fpix = Interpolate(time, nanmask, fpix) fpix_err = Interpolate(time, nanmask, fpix_err) # Return data = DataContainer() data.ID = EPIC data.campaign = campaign data.cadn = cadn data.time = time data.fpix = fpix data.fpix_err = fpix_err data.nanmask = nanmask data.badmask = badmask data.aperture = aperture data.aperture_name = aperture_name data.apertures = apertures data.quality = qual data.Xpos = pc1 data.Ypos = pc2 data.meta = fitsheader data.mag = fitsheader[0]['KEPMAG'][1] data.pixel_images = pixel_images data.nearby = nearby data.hires = hires data.saturated = saturated data.bkg = bkg return data
Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None` def GetNeighbors(EPIC, season=None, model=None, neighbors=10, mag_range=(11., 13.), cdpp_range=None, aperture_name='k2sff_15', cadence='lc', **kwargs): ''' Return `neighbors` random bright stars on the same module as `EPIC`. :param int EPIC: The EPIC ID number :param str model: The :py:obj:`everest` model name. Only used when \ imposing CDPP bounds. Default :py:obj:`None` :param int neighbors: Number of neighbors to return. Default 10 :param str aperture_name: The name of the aperture to use. Select \ `custom` to call \ :py:func:`GetCustomAperture`. Default `k2sff_15` :param str cadence: The light curve cadence. Default `lc` :param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \ Default (11, 13) :param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \ Default :py:obj:`None` ''' # Zero neighbors? if neighbors == 0: return [] # Get the IDs # Campaign no. if season is None: campaign = Season(EPIC) if hasattr(campaign, '__len__'): raise AttributeError( "Please choose a campaign/season for this target: %s." % campaign) else: campaign = season epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[ campaign]).T short_cadence = np.array(short_cadence, dtype=bool) epics = np.array(epics, dtype=int) c = GetNeighboringChannels(Channel(EPIC, campaign=season)) # Manage kwargs if aperture_name is None: aperture_name = 'k2sff_15' if mag_range is None: mag_lo = -np.inf mag_hi = np.inf else: mag_lo = mag_range[0] mag_hi = mag_range[1] # K2-specific tweak. The short cadence stars are preferentially # really bright ones, so we won't get many neighbors if we # stick to the default magnitude range! I'm # therefore enforcing a lower magnitude cut-off of 8. if cadence == 'sc': mag_lo = 8. if cdpp_range is None: cdpp_lo = -np.inf cdpp_hi = np.inf else: cdpp_lo = cdpp_range[0] cdpp_hi = cdpp_range[1] targets = [] # First look for nearby targets, then relax the constraint # If still no targets, widen magnitude range for n in range(3): if n == 0: nearby = True elif n == 1: nearby = False elif n == 2: mag_lo -= 1 mag_hi += 1 # Loop over all stars for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence): # Preliminary vetting if not (((channel in c) if nearby else True) and (kp < mag_hi) \ and (kp > mag_lo) and (sc if cadence == 'sc' else True)): continue # Reject if self or if already in list if (star == EPIC) or (star in targets): continue # Ensure raw light curve file exists if not os.path.exists( os.path.join(TargetDirectory(star, campaign), 'data.npz')): continue # Ensure crowding is OK. This is quite conservative, as we # need to prevent potential astrophysical false positive # contamination from crowded planet-hosting neighbors when # doing neighboring PLD. contam = False data = np.load(os.path.join( TargetDirectory(star, campaign), 'data.npz')) aperture = data['apertures'][()][aperture_name] # Check that the aperture exists! if aperture is None: continue fpix = data['fpix'] for source in data['nearby'][()]: # Ignore self if source['ID'] == star: continue # Ignore really dim stars if source['mag'] < kp - 5: continue # Compute source position x = int(np.round(source['x'] - source['x0'])) y = int(np.round(source['y'] - source['y0'])) # If the source is within two pixels of the edge # of the target aperture, reject the target for j in [x - 2, x - 1, x, x + 1, x + 2]: if j < 0: # Outside the postage stamp continue for i in [y - 2, y - 1, y, y + 1, y + 2]: if i < 0: # Outside the postage stamp continue try: if aperture[i][j]: # Oh-oh! contam = True except IndexError: # Out of bounds... carry on! pass if contam: continue # HACK: This happens for K2SFF M67 targets in C05. # Let's skip them if aperture.shape != fpix.shape[1:]: continue # Reject if the model is not present if model is not None: if not os.path.exists(os.path.join( TargetDirectory(star, campaign), model + '.npz')): continue # Reject if CDPP out of range if cdpp_range is not None: cdpp = np.load(os.path.join(TargetDirectory( star, campaign), model + '.npz'))['cdpp'] if (cdpp > cdpp_hi) or (cdpp < cdpp_lo): continue # Passed all the tests! targets.append(star) # Do we have enough? If so, return if len(targets) == neighbors: random.shuffle(targets) return targets # If we get to this point, we didn't find enough neighbors... # Return what we have anyway. return targets
Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all known K2 planets. :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or \ other K2 pipeline name def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all known K2 planets. :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or \ other K2 pipeline name ''' # Load all planet hosts f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv') epic, campaign, kp, _, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) campaign = np.array(campaign, dtype=int) cdpp = np.zeros(len(epic)) saturated = np.zeros(len(epic), dtype=int) cdpp_1 = np.zeros(len(epic)) # Get the stats for c in set(campaign): # Everest model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), model)) e0, _, _, c0, _, _, _, _, s0 = np.loadtxt(f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e0: j = np.argmax(e0 == e) cdpp[i] = c0[j] saturated[i] = s0[j] # Comparison model f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(c), compare_to.lower())) if not os.path.exists(f): continue if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']: e1, c1 = np.loadtxt(f, unpack=True, skiprows=2) else: e1, _, _, c1, _, _, _, _, _ = np.loadtxt( f, unpack=True, skiprows=2) for i, e in enumerate(epic): if e in e1: j = np.argmax(e1 == e) cdpp_1[i] = c1[j] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title( 'K2 Planet Hosts: %s versus %s' % (model, compare_to)) x = kp y = (cdpp - cdpp_1) / cdpp_1 ax.scatter(x[unsat], y[unsat], color='b', marker='.', alpha=0.5, zorder=-1, picker=True) ax.scatter(x[sat], y[sat], color='r', marker='.', alpha=0.5, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'K2 Planet Hosts', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Pickable points Picker = StatsPicker([ax], [kp], [y], epic, model=model, compare_to=compare_to) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
Computes and plots the CDPP statistics comparison between short cadence and long cadence de-trended light curves :param campaign: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` def ShortCadenceStatistics(campaign=None, clobber=False, model='nPLD', plot=True, **kwargs): ''' Computes and plots the CDPP statistics comparison between short cadence and long cadence de-trended light curves :param campaign: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` ''' # Check campaign if campaign is None: campaign = np.arange(9) else: campaign = np.atleast_1d(campaign) # Update model name model = '%s.sc' % model # Compute the statistics for camp in campaign: sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP " + "Everest CDPP Saturated", file=f) print("--------- ------ --------- " + "------------ ---------", file=f) all = GetK2Campaign(int(camp), cadence='sc') stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write( '\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % camp, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], data['cdpp'][()], int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, 0), file=f) print("") if not plot: return # Running lists xsat = [] ysat = [] xunsat = [] yunsat = [] xall = [] yall = [] epics = [] # Plot for camp in campaign: # Load all stars sub = np.array(GetK2Campaign( camp, cadence='sc', epics_only=True), dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model)) epic, kp, cdpp6r, cdpp6, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcamp inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: camp 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): continue # Get the long cadence stats compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(camp), model[:-3])) epic_1, _, _, cdpp6_1, _, _, _, _, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 # Append to running lists xsat.extend(x[sat]) ysat.extend(y[sat]) xunsat.extend(x[unsat]) yunsat.extend(y[unsat]) xall.extend(x) yall.extend(y) epics.extend(epic) # Plot the equivalent of the Aigrain+16 figure fig, ax = pl.subplots(1) fig.canvas.set_window_title('K2 Short Cadence') ax.scatter(xunsat, yunsat, color='b', marker='.', alpha=0.35, zorder=-1, picker=True) ax.scatter(xsat, ysat, color='r', marker='.', alpha=0.35, zorder=-1, picker=True) ax.set_ylim(-1, 1) ax.set_xlim(8, 18) ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax.set_title(r'Short Versus Long Cadence', fontsize=18) ax.set_ylabel(r'Relative CDPP', fontsize=18) ax.set_xlabel('Kepler Magnitude', fontsize=18) # Bin the CDPP yall = np.array(yall) xall = np.array(xall) bins = np.arange(7.5, 18.5, 0.5) by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((yall > -np.inf) & (yall < np.inf) & (xall >= bin - 0.5) & (xall < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(yall[i]) ax.plot(bins[:9], by[:9], 'r--', lw=2) ax.plot(bins[8:], by[8:], 'k-', lw=2) # Pickable points Picker = StatsPicker([ax], [xall], [yall], epics, model=model[:-3], compare_to=model[:-3], cadence='sc', campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all long cadence light curves in a given campaign :param season: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or other \ K2 pipeline name :param bool plot: Default :py:obj:`True` :param bool injection: Statistics for injection tests? Default \ :py:obj:`False` :param bool planets: Statistics for known K2 planets? \ Default :py:obj:`False` def Statistics(season=None, clobber=False, model='nPLD', injection=False, compare_to='kepler', plot=True, cadence='lc', planets=False, **kwargs): ''' Computes and plots the CDPP statistics comparison between `model` and `compare_to` for all long cadence light curves in a given campaign :param season: The campaign number or list of campaign numbers. \ Default is to plot all campaigns :param bool clobber: Overwrite existing files? Default :py:obj:`False` :param str model: The :py:obj:`everest` model name :param str compare_to: The :py:obj:`everest` model name or other \ K2 pipeline name :param bool plot: Default :py:obj:`True` :param bool injection: Statistics for injection tests? Default \ :py:obj:`False` :param bool planets: Statistics for known K2 planets? \ Default :py:obj:`False` ''' # Multi-mission compatibility campaign = season # Is this short cadence? if cadence == 'sc': return ShortCadenceStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Check the campaign if campaign is None: campaign = 0 # Planet hosts only? if planets: return PlanetStatistics(model=model, compare_to=compare_to, **kwargs) # Is this an injection run? if injection: return InjectionStatistics(campaign=campaign, clobber=clobber, model=model, plot=plot, **kwargs) # Compute the statistics sub = np.array([s[0] for s in GetK2Campaign(campaign)], dtype=int) outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Kp Raw CDPP Everest CDPP" + " Validation Outliers[1] Outliers[2] " + "Datapoints Saturated", file=f) print("--------- ------ --------- ------------" + " ---------- ----------- ----------- " + "---------- ---------", file=f) all = GetK2Campaign(int(campaign)) stars = np.array([s[0] for s in all], dtype=int) kpmgs = np.array([s[1] for s in all], dtype=float) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign, ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:], model + '.npz') try: data = np.load(nf) # Remove NaNs and flagged cadences flux = np.delete(data['fraw'] - data['model'], np.array( list(set(np.concatenate([data['nanmask'], data['badmask']]))))) # Iterative sigma clipping to get 5 sigma outliers inds = np.array([], dtype=int) m = 1 while len(inds) < m: m = len(inds) ff = SavGol(np.delete(flux, inds)) med = np.nanmedian(ff) MAD = 1.4826 * np.nanmedian(np.abs(ff - med)) inds = np.append(inds, np.where( (ff > med + 5. * MAD) | (ff < med - 5. * MAD))[0]) nout = len(inds) ntot = len(flux) # HACK: Backwards compatibility fix try: cdpp = data['cdpp'][()] except KeyError: cdpp = data['cdpp6'][()] print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], data['cdppr'][()], cdpp, data['cdppv'][()], len(data['outmask']), nout, ntot, int(data['saturated'])), file=f) except: print("{:>09d} {:>15.3f} {:>15.3f} {:>15.3f} {:>15.3f} {:>15d} {:>15d} {:>15d} {:>15d}".format( stars[i], kpmgs[i], np.nan, np.nan, np.nan, 0, 0, 0, 0), file=f) print("") if plot: # Load all stars epic, kp, cdpp6r, cdpp6, cdpp6v, _, out, tot, saturated = np.loadtxt( outfile, unpack=True, skiprows=2) epic = np.array(epic, dtype=int) out = np.array(out, dtype=int) tot = np.array(tot, dtype=int) saturated = np.array(saturated, dtype=int) # Get only stars in this subcampaign inds = np.array([e in sub for e in epic]) epic = epic[inds] kp = kp[inds] # HACK: Campaign 0 magnitudes are reported only to the nearest tenth, # so let's add a little noise to spread them out for nicer plotting kp = kp + 0.1 * (0.5 - np.random.random(len(kp))) cdpp6r = cdpp6r[inds] cdpp6 = cdpp6[inds] cdpp6v = cdpp6v[inds] out = out[inds] tot = tot[inds] saturated = saturated[inds] sat = np.where(saturated == 1) unsat = np.where(saturated == 0) if not np.any([not np.isnan(x) for x in cdpp6]): raise Exception("No targets to plot.") # Control transparency alpha_kepler = 0.03 alpha_unsat = min(0.1, 2000. / (1 + len(unsat[0]))) alpha_sat = min(1., 180. / (1 + len(sat[0]))) # Get the comparison model stats if compare_to.lower() == 'everest1': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_everest1.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sc': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sc.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'k2sff': epic_1, cdpp6_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.cdpp' % int(campaign)), unpack=True) cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) # Outliers epic_1, out_1, tot_1 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_k2sff.out' % int(campaign)), unpack=True) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) elif compare_to.lower() == 'kepler': kic, kepler_kp, kepler_cdpp6 = np.loadtxt( os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'kepler.cdpp'), unpack=True) else: compfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.cdpp' % (int(campaign), compare_to)) epic_1, _, _, cdpp6_1, _, _, out_1, tot_1, saturated = np.loadtxt( compfile, unpack=True, skiprows=2) epic_1 = np.array(epic_1, dtype=int) inds = np.array([e in sub for e in epic_1]) epic_1 = epic_1[inds] cdpp6_1 = cdpp6_1[inds] out_1 = out_1[inds] tot_1 = tot_1[inds] cdpp6_1 = sort_like(cdpp6_1, epic, epic_1) out_1 = sort_like(out_1, epic, epic_1) tot_1 = sort_like(tot_1, epic, epic_1) # ------ 1. Plot cdpp vs. mag if compare_to.lower() != 'kepler': fig = pl.figure(figsize=(16, 5)) ax = [pl.subplot2grid((120, 120), (0, 0), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 40), colspan=35, rowspan=120), pl.subplot2grid((120, 120), (0, 80), colspan=35, rowspan=55), pl.subplot2grid((120, 120), (65, 80), colspan=35, rowspan=55)] else: fig = pl.figure(figsize=(12, 5)) ax = [pl.subplot2grid((120, 75), (0, 0), colspan=35, rowspan=120), None, pl.subplot2grid((120, 75), (0, 40), colspan=35, rowspan=55), pl.subplot2grid((120, 75), (65, 40), colspan=35, rowspan=55)] fig.canvas.set_window_title( 'K2 Campaign %s: %s versus %s' % (campaign, model, compare_to)) fig.subplots_adjust(left=0.05, right=0.95, bottom=0.125, top=0.9) bins = np.arange(7.5, 18.5, 0.5) if compare_to.lower() != 'kepler': ax[0].scatter(kp[unsat], cdpp6_1[unsat], color='y', marker='.', alpha=alpha_unsat) ax[0].scatter(kp[sat], cdpp6_1[sat], color='y', marker='s', alpha=alpha_sat, s=5) ax[0].scatter(kp[unsat], cdpp6[unsat], color='b', marker='.', alpha=alpha_unsat, picker=True) ax[0].scatter(kp[sat], cdpp6[sat], color='b', marker='s', alpha=alpha_sat, s=5, picker=True) for y, style in zip([cdpp6_1, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (kp >= bin - 0.5) & (kp < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') else: ax[0].scatter(kepler_kp, kepler_cdpp6, color='y', marker='.', alpha=alpha_kepler) ax[0].scatter(kp, cdpp6, color='b', marker='.', alpha=alpha_unsat, picker=True) for x, y, style in zip([kepler_kp, kp], [kepler_cdpp6, cdpp6], ['yo', 'bo']): by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[0].plot(bins, by, style, markeredgecolor='w') ax[0].set_ylim(-10, 500) ax[0].set_xlim(8, 18) ax[0].set_xlabel('Kepler Magnitude', fontsize=18) ax[0].set_title('CDPP6 (ppm)', fontsize=18) # ------ 2. Plot the equivalent of the Aigrain+16 figure if compare_to.lower() != 'kepler': x = kp y = (cdpp6 - cdpp6_1) / cdpp6_1 yv = (cdpp6v - cdpp6_1) / cdpp6_1 ax[1].scatter(x[unsat], y[unsat], color='b', marker='.', alpha=alpha_unsat, zorder=-1, picker=True) ax[1].scatter(x[sat], y[sat], color='r', marker='.', alpha=alpha_sat, zorder=-1, picker=True) ax[1].set_ylim(-1, 1) ax[1].set_xlim(8, 18) ax[1].axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) ax[1].axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5) bins = np.arange(7.5, 18.5, 0.5) # Bin the CDPP by = np.zeros_like(bins) * np.nan for b, bin in enumerate(bins): i = np.where((y > -np.inf) & (y < np.inf) & (x >= bin - 0.5) & (x < bin + 0.5))[0] if len(i) > 10: by[b] = np.median(y[i]) ax[1].plot(bins[:9], by[:9], 'k--', lw=2) ax[1].plot(bins[8:], by[8:], 'k-', lw=2) ax[1].set_title(r'Relative CDPP', fontsize=18) ax[1].set_xlabel('Kepler Magnitude', fontsize=18) # ------ 3. Plot the outliers i = np.argsort(out) a = int(0.95 * len(out)) omax = out[i][a] if compare_to.lower() != 'kepler': j = np.argsort(out_1) b = int(0.95 * len(out_1)) omax = max(omax, out_1[j][b]) ax[2].hist(out, 25, range=(0, omax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[2].hist(out_1, 25, range=(0, omax), histtype='step', color='y') ax[2].margins(0, None) ax[2].set_title('Number of Outliers', fontsize=18) # Plot the total number of data points i = np.argsort(tot) a = int(0.05 * len(tot)) b = int(0.95 * len(tot)) tmin = tot[i][a] tmax = tot[i][b] if compare_to.lower() != 'kepler': j = np.argsort(tot_1) c = int(0.05 * len(tot_1)) d = int(0.95 * len(tot_1)) tmin = min(tmin, tot_1[j][c]) tmax = max(tmax, tot_1[j][d]) ax[3].hist(tot, 25, range=(tmin, tmax), histtype='step', color='b') if compare_to.lower() != 'kepler': ax[3].hist(tot_1, 25, range=(tmin, tmax), histtype='step', color='y') ax[3].margins(0, None) ax[3].set_xlabel('Number of Data Points', fontsize=18) # Pickable points Picker = StatsPicker([ax[0], ax[1]], [kp, kp], [ cdpp6, y], epic, model=model, compare_to=compare_to, campaign=campaign) fig.canvas.mpl_connect('pick_event', Picker) # Show pl.show()
Returns `True` if short cadence data is available for this target. :param int EPIC: The EPIC ID number :param int season: The campaign number. Default :py:obj:`None` def HasShortCadence(EPIC, season=None): ''' Returns `True` if short cadence data is available for this target. :param int EPIC: The EPIC ID number :param int season: The campaign number. Default :py:obj:`None` ''' if season is None: season = Campaign(EPIC) if season is None: return None stars = GetK2Campaign(season) i = np.where([s[0] == EPIC for s in stars])[0] if len(i): return stars[i[0]][3] else: return None
Computes and plots the statistics for injection/recovery tests. :param int campaign: The campaign number. Default 0 :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` :param bool show: Show the plot? Default :py:obj:`True`. \ If :py:obj:`False`, returns the `fig, ax` instances. :param bool clobber: Overwrite existing files? Default :py:obj:`False` def InjectionStatistics(campaign=0, clobber=False, model='nPLD', plot=True, show=True, **kwargs): ''' Computes and plots the statistics for injection/recovery tests. :param int campaign: The campaign number. Default 0 :param str model: The :py:obj:`everest` model name :param bool plot: Default :py:obj:`True` :param bool show: Show the plot? Default :py:obj:`True`. \ If :py:obj:`False`, returns the `fig, ax` instances. :param bool clobber: Overwrite existing files? Default :py:obj:`False` ''' # Compute the statistics stars = GetK2Campaign(campaign, epics_only=True) if type(campaign) is int: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%02d_%s.inj' % (campaign, model)) else: outfile = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'c%04.1f_%s.inj' % (campaign, model)) if clobber or not os.path.exists(outfile): with open(outfile, 'w') as f: print("EPIC Depth UControl URecovered"+ " MControl MRecovered", file=f) print("--------- ---------- ---------- ----------"+ " ---------- ----------", file=f) for i, _ in enumerate(stars): sys.stdout.write('\rProcessing target %d/%d...' % (i + 1, len(stars))) sys.stdout.flush() path = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % stars[i])[:4] + '00000', ('%09d' % stars[i])[4:]) # Loop over all depths for depth in [0.01, 0.001, 0.0001]: try: # Unmasked data = np.load(os.path.join( path, '%s_Inject_U%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" ucontrol = data['inject'][()]['rec_depth_control'] urecovered = data['inject'][()]['rec_depth'] # Masked data = np.load(os.path.join( path, '%s_Inject_M%g.npz' % (model, depth))) assert depth == data['inject'][()]['depth'], "" mcontrol = data['inject'][()]['rec_depth_control'] mrecovered = data['inject'][()]['rec_depth'] # Log it print("{:>09d} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f} {:>13.8f}".format( stars[i], depth, ucontrol, urecovered, mcontrol, mrecovered), file=f) except: pass print("") if plot: # Load the statistics try: epic, depth, ucontrol, urecovered, mcontrol, mrecovered = \ np.loadtxt(outfile, unpack=True, skiprows=2) except ValueError: raise Exception("No targets to plot.") # Normalize to the injected depth ucontrol /= depth urecovered /= depth mcontrol /= depth mrecovered /= depth # Set up the plot fig, ax = pl.subplots(3, 2, figsize=(9, 12)) fig.subplots_adjust(hspace=0.29) ax[0, 0].set_title(r'Unmasked', fontsize=18) ax[0, 1].set_title(r'Masked', fontsize=18) ax[0, 0].set_ylabel( r'$D_0 = 10^{-2}$', rotation=90, fontsize=18, labelpad=10) ax[1, 0].set_ylabel( r'$D_0 = 10^{-3}$', rotation=90, fontsize=18, labelpad=10) ax[2, 0].set_ylabel( r'$D_0 = 10^{-4}$', rotation=90, fontsize=18, labelpad=10) # Define some useful stuff for plotting depths = [1e-2, 1e-3, 1e-4] ranges = [(0.75, 1.25), (0.5, 1.5), (0., 2.)] nbins = [30, 30, 20] ymax = [0.4, 0.25, 0.16] xticks = [[0.75, 0.875, 1., 1.125, 1.25], [ 0.5, 0.75, 1., 1.25, 1.5], [0., 0.5, 1., 1.5, 2.0]] # Plot for i in range(3): # Indices for this plot idx = np.where(depth == depths[i]) for j, control, recovered in zip([0, 1], [ucontrol[idx], mcontrol[idx]], [urecovered[idx], mrecovered[idx]]): # Control ax[i, j].hist(control, bins=nbins[i], range=ranges[i], color='r', histtype='step', weights=np.ones_like(control) / len(control)) # Recovered ax[i, j].hist(recovered, bins=nbins[i], range=ranges[i], color='b', histtype='step', weights=np.ones_like(recovered) / len(recovered)) # Indicate center ax[i, j].axvline(1., color='k', ls='--') # Indicate the fraction above and below if len(recovered): au = len(np.where(recovered > ranges[i][1])[ 0]) / len(recovered) al = len(np.where(recovered < ranges[i][0])[ 0]) / len(recovered) ax[i, j].annotate('%.2f' % al, xy=(0.01, 0.93), xycoords='axes fraction', xytext=(0.1, 0.93), ha='left', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) ax[i, j].annotate('%.2f' % au, xy=(0.99, 0.93), xycoords='axes fraction', xytext=(0.9, 0.93), ha='right', va='center', color='b', arrowprops=dict(arrowstyle="->", color='b')) if len(control): cu = len(np.where(control > ranges[i][1])[ 0]) / len(control) cl = len(np.where(control < ranges[i][0])[ 0]) / len(control) ax[i, j].annotate('%.2f' % cl, xy=(0.01, 0.86), xycoords='axes fraction', xytext=(0.1, 0.86), ha='left', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) ax[i, j].annotate('%.2f' % cu, xy=(0.99, 0.86), xycoords='axes fraction', xytext=(0.9, 0.86), ha='right', va='center', color='r', arrowprops=dict(arrowstyle="->", color='r')) # Indicate the median if len(recovered): ax[i, j].annotate('M = %.2f' % np.median(recovered), xy=(0.35, 0.5), ha='right', xycoords='axes fraction', color='b', fontsize=16) if len(control): ax[i, j].annotate('M = %.2f' % np.median(control), xy=(0.65, 0.5), ha='left', xycoords='axes fraction', color='r', fontsize=16) # Tweaks ax[i, j].set_xticks(xticks[i]) ax[i, j].set_xlim(xticks[i][0], xticks[i][-1]) ax[i, j].set_ylim(-0.005, ymax[i]) ax[i, j].set_xlabel(r'$D/D_0$', fontsize=16) ax[i, j].get_yaxis().set_major_locator(MaxNLocator(5)) for tick in ax[i, j].get_xticklabels() + \ ax[i, j].get_yticklabels(): tick.set_fontsize(14) if show: pl.show() else: return fig, ax
Generates HDU cards for inclusion in the de-trended light curve FITS file. Used internally. def HDUCards(headers, hdu=0): ''' Generates HDU cards for inclusion in the de-trended light curve FITS file. Used internally. ''' if headers is None: return [] if hdu == 0: # Get info from the TPF Primary HDU Header tpf_header = headers[0] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'CHANNEL', 'MODULE', 'OUTPUT', 'CAMPAIGN', 'DATA_REL', 'OBSMODE', 'TTABLEID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'KEPMAG'] elif (hdu == 1) or (hdu == 6): # Get info from the TPF BinTable HDU Header tpf_header = headers[1] entries = ['WCSN4P', 'WCAX4P', '1CTY4P', '2CTY4P', '1CUN4P', '2CUN4P', '1CRV4P', '2CRV4P', '1CDL4P', '2CDL4P', '1CRP4P', '2CRP4P', 'WCAX4', '1CTYP4', '2CTYP4', '1CRPX4', '2CRPX4', '1CRVL4', '2CRVL4', '1CUNI4', '2CUNI4', '1CDLT4', '2CDLT4', '11PC4', '12PC4', '21PC4', '22PC4', 'WCSN5P', 'WCAX5P', '1CTY5P', '2CTY5P', '1CUN5P', '2CUN5P', '1CRV5P', '2CRV5P', '1CDL5P', '2CDL5P', '1CRP5P', '2CRP5P', 'WCAX5', '1CTYP5', '2CTYP5', '1CRPX5', '2CRPX5', '1CRVL5', '2CRVL5', '1CUNI5', '2CUNI5', '1CDLT5', '2CDLT5', '11PC5', '12PC5', '21PC5', '22PC5', 'WCSN6P', 'WCAX6P', '1CTY6P', '2CTY6P', '1CUN6P', '2CUN6P', '1CRV6P', '2CRV6P', '1CDL6P', '2CDL6P', '1CRP6P', '2CRP6P', 'WCAX6', '1CTYP6', '2CTYP6', '1CRPX6', '2CRPX6', '1CRVL6', '2CRVL6', '1CUNI6', '2CUNI6', '1CDLT6', '2CDLT6', '11PC6', '12PC6', '21PC6', '22PC6', 'WCSN7P', 'WCAX7P', '1CTY7P', '2CTY7P', '1CUN7P', '2CUN7P', '1CRV7P', '2CRV7P', '1CDL7P', '2CDL7P', '1CRP7P', '2CRP7P', 'WCAX7', '1CTYP7', '2CTYP7', '1CRPX7', '2CRPX7', '1CRVL7', '2CRVL7', '1CUNI7', '2CUNI7', '1CDLT7', '2CDLT7', '11PC7', '12PC7', '21PC7', '22PC7', 'WCSN8P', 'WCAX8P', '1CTY8P', '2CTY8P', '1CUN8P', '2CUN8P', '1CRV8P', '2CRV8P', '1CDL8P', '2CDL8P', '1CRP8P', '2CRP8P', 'WCAX8', '1CTYP8', '2CTYP8', '1CRPX8', '2CRPX8', '1CRVL8', '2CRVL8', '1CUNI8', '2CUNI8', '1CDLT8', '2CDLT8', '11PC8', '12PC8', '21PC8', '22PC8', 'WCSN9P', 'WCAX9P', '1CTY9P', '2CTY9P', '1CUN9P', '2CUN9P', '1CRV9P', '2CRV9P', '1CDL9P', '2CDL9P', '1CRP9P', '2CRP9P', 'WCAX9', '1CTYP9', '2CTYP9', '1CRPX9', '2CRPX9', '1CRVL9', '2CRVL9', '1CUNI9', '2CUNI9', '1CDLT9', '2CDLT9', '11PC9', '12PC9', '21PC9', '22PC9', 'INHERIT', 'EXTNAME', 'EXTVER', 'TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'EXPOSURE', 'TIMEREF', 'TASSIGN', 'TIMESYS', 'BJDREFI', 'BJDREFF', 'TIMEUNIT', 'TELAPSE', 'LIVETIME', 'TSTART', 'TSTOP', 'LC_START', 'LC_END', 'DEADC', 'TIMEPIXR', 'TIERRELA', 'INT_TIME', 'READTIME', 'FRAMETIM', 'NUM_FRM', 'TIMEDEL', 'DATE-OBS', 'DATE-END', 'BACKAPP', 'DEADAPP', 'VIGNAPP', 'GAIN', 'READNOIS', 'NREADOUT', 'TIMSLICE', 'MEANBLCK', 'LCFXDOFF', 'SCFXDOFF'] elif (hdu == 3) or (hdu == 4) or (hdu == 5): # Get info from the TPF BinTable HDU Header tpf_header = headers[2] entries = ['TELESCOP', 'INSTRUME', 'OBJECT', 'KEPLERID', 'RADESYS', 'RA_OBJ', 'DEC_OBJ', 'EQUINOX', 'WCSAXES', 'CTYPE1', 'CTYPE2', 'CRPIX1', 'CRPIX2', 'CRVAL1', 'CRVAL2', 'CUNIT1', 'CUNIT2', 'CDELT1', 'CDELT2', 'PC1_1', 'PC1_2', 'PC2_1', 'PC2_2', 'WCSNAMEP', 'WCSAXESP', 'CTYPE1P', 'CUNIT1P', 'CRPIX1P', 'CRVAL1P', 'CDELT1P', 'CTYPE2P', 'CUNIT2P', 'CRPIX2P', 'CRVAL2P', 'CDELT2P', 'NPIXSAP', 'NPIXMISS'] else: return [] cards = [] cards.append(('COMMENT', '************************')) cards.append(('COMMENT', '* MISSION INFO *')) cards.append(('COMMENT', '************************')) for entry in entries: try: cards.append(tuple(tpf_header[entry])) except KeyError: pass return cards