text stringlengths 81 112k |
|---|
Retrieve the ``value`` for the attribute ``name``. The ``name``
can be nested following the :ref:`double underscore <tutorial-underscore>`
notation, for example ``group__name``. If the attribute is not available it
raises :class:`AttributeError`.
def get_attr_value(self, name):
'''Retrieve the ``value`` for the attribute ``name``. The ``name``
can be nested following the :ref:`double underscore <tutorial-underscore>`
notation, for example ``group__name``. If the attribute is not available it
raises :class:`AttributeError`.'''
if name in self._meta.dfields:
return self._meta.dfields[name].get_value(self)
elif not name.startswith('__') and JSPLITTER in name:
bits = name.split(JSPLITTER)
fname = bits[0]
if fname in self._meta.dfields:
return self._meta.dfields[fname].get_value(self, *bits[1:])
else:
return getattr(self, name)
else:
return getattr(self, name) |
Utility method for cloning the instance as a new object.
:parameter data: additional which override field data.
:rtype: a new instance of this class.
def clone(self, **data):
'''Utility method for cloning the instance as a new object.
:parameter data: additional which override field data.
:rtype: a new instance of this class.
'''
meta = self._meta
session = self.session
pkname = meta.pkname()
pkvalue = data.pop(pkname, None)
fields = self.todict(exclude_cache=True)
fields.update(data)
fields.pop('__dbdata__', None)
obj = self._meta.make_object((pkvalue, None, fields))
obj.session = session
return obj |
Return a dictionary of serialised scalar field for pickling.
If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` will be excluded.
def todict(self, exclude_cache=False):
'''Return a dictionary of serialised scalar field for pickling.
If the *exclude_cache* flag is ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` will be excluded.'''
odict = {}
for field, value in self.fieldvalue_pairs(exclude_cache=exclude_cache):
value = field.serialise(value)
if value:
odict[field.name] = value
if self._dbdata and 'id' in self._dbdata:
odict['__dbdata__'] = {'id': self._dbdata['id']}
return odict |
Load extra fields to this :class:`StdModel`.
def load_fields(self, *fields):
'''Load extra fields to this :class:`StdModel`.'''
if self._loadedfields is not None:
if self.session is None:
raise SessionNotAvailable('No session available')
meta = self._meta
kwargs = {meta.pkname(): self.pkvalue()}
obj = session.query(self).load_only(fields).get(**kwargs)
for name in fields:
field = meta.dfields.get(name)
if field is not None:
setattr(self, field.attname,
getattr(obj, field.attname, None)) |
Load a the :class:`ForeignKey` field ``name`` if this is part of the
fields of this model and if the related object is not already loaded.
It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>`
relationships.
:parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load.
:parameter load_only: Optional parameters which specify the fields to load.
:parameter dont_load: Optional parameters which specify the fields not to load.
:return: the related :class:`StdModel` instance.
def load_related_model(self, name, load_only=None, dont_load=None):
'''Load a the :class:`ForeignKey` field ``name`` if this is part of the
fields of this model and if the related object is not already loaded.
It is used by the lazy loading mechanism of :ref:`one-to-many <one-to-many>`
relationships.
:parameter name: the :attr:`Field.name` of the :class:`ForeignKey` to load.
:parameter load_only: Optional parameters which specify the fields to load.
:parameter dont_load: Optional parameters which specify the fields not to load.
:return: the related :class:`StdModel` instance.
'''
field = self._meta.dfields.get(name)
if not field:
raise ValueError('Field "%s" not available' % name)
elif not field.type == 'related object':
raise ValueError('Field "%s" not a foreign key' % name)
return self._load_related_model(field, load_only, dont_load) |
Load a :class:`StdModel` from possibly base64encoded data.
This method is used to load models from data obtained from the :meth:`tojson`
method.
def from_base64_data(cls, **kwargs):
'''Load a :class:`StdModel` from possibly base64encoded data.
This method is used to load models from data obtained from the :meth:`tojson`
method.'''
o = cls()
meta = cls._meta
pkname = meta.pkname()
for name, value in iteritems(kwargs):
if name == pkname:
field = meta.pk
elif name in meta.dfields:
field = meta.dfields[name]
else:
continue
value = field.to_python(value)
setattr(o, field.attname, value)
return o |
De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.
:param str fitsfile: The full path to the FITS file
:param ndarray aperture: A 2D integer array corresponding to the \
desired photometric aperture (1 = in aperture, 0 = outside \
aperture). Default is to interactively select an aperture.
:param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.
:returns: An :py:class:`everest.Everest` instance.
def DetrendFITS(fitsfile, raw=False, season=None, clobber=False, **kwargs):
"""
De-trend a K2 FITS file using :py:class:`everest.detrender.rPLD`.
:param str fitsfile: The full path to the FITS file
:param ndarray aperture: A 2D integer array corresponding to the \
desired photometric aperture (1 = in aperture, 0 = outside \
aperture). Default is to interactively select an aperture.
:param kwargs: Any kwargs accepted by :py:class:`everest.detrender.rPLD`.
:returns: An :py:class:`everest.Everest` instance.
"""
# Get info
EPIC = pyfits.getheader(fitsfile, 0)['KEPLERID']
if season is None:
season = pyfits.getheader(fitsfile, 0)['CAMPAIGN']
if season is None or season == "":
season = 0
everestfile = os.path.join(
everest.missions.k2.TargetDirectory(EPIC, season),
everest.missions.k2.FITSFile(EPIC, season))
# De-trend?
if clobber or not os.path.exists(everestfile):
# Get raw data
data = GetData(fitsfile, EPIC, season, clobber=clobber, **kwargs)
# De-trend
model = everest.rPLD(EPIC,
data=data,
season=season, debug=True,
clobber=clobber, **kwargs)
# Publish it
everest.fits.MakeFITS(model)
shutil.copyfile(os.path.join(model.dir, model.name + '.pdf'),
os.path.join(model.dir,
model._mission.DVSFile(model.ID,
model.season,
model.cadence)))
# Return an Everest instance
return everest.Everest(EPIC, season=season) |
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param str fitsfile: The full raw target pixel file path
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated \
if flux is within this fraction of the pixel well depth. \
Default -0.1
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \
outliers when computing the model. \
Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]`
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve location of nearby sources? \
Default :py:obj:`True`
def GetData(fitsfile, EPIC, campaign, clobber=False,
saturation_tolerance=-0.1,
bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17],
get_hires=False, get_nearby=False,
aperture=None, **kwargs):
'''
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param str fitsfile: The full raw target pixel file path
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param float saturation_tolerance: Target is considered saturated \
if flux is within this fraction of the pixel well depth. \
Default -0.1
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \
outliers when computing the model. \
Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]`
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve location of nearby sources? \
Default :py:obj:`True`
'''
# Get the npz file name
filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign,
('%09d' % EPIC)[:4] +
'00000', ('%09d' % EPIC)[4:],
'data.npz')
# Create the dir
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
# Check for saved data
if not os.path.exists(filename) or clobber:
log.info("Fetching data for target...")
# Load the tpf
with pyfits.open(fitsfile) as f:
qdata = f[1].data
# Get the header info
fitsheader = [pyfits.getheader(fitsfile, 0).cards,
pyfits.getheader(fitsfile, 1).cards,
pyfits.getheader(fitsfile, 2).cards]
# Get a hi res image of the target
if get_hires:
try:
hires = GetHiResImage(EPIC)
except ValueError:
hires = None
else:
hires = None
# Get nearby sources
if get_nearby:
try:
nearby = GetSources(EPIC)
except ValueError:
nearby = []
else:
nearby = []
# Get the arrays
cadn = np.array(qdata.field('CADENCENO'), dtype='int32')
time = np.array(qdata.field('TIME'), dtype='float64')
fpix = np.array(qdata.field('FLUX'), dtype='float64')
fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64')
qual = np.array(qdata.field('QUALITY'), dtype=int)
# Get rid of NaNs in the time array by interpolating
naninds = np.where(np.isnan(time))
time = Interpolate(np.arange(0, len(time)), naninds, time)
# Get the motion vectors (if available!)
pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64')
pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)):
pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1)
pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2)
else:
pc1 = None
pc2 = None
# Get the static pixel images for plotting
pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]]
# Get the aperture interactively
if aperture is None:
aperture = ApertureSelector(time[::10], fpix[::10],
title='EPIC %d' % EPIC).aperture
if np.sum(aperture) == 0:
raise ValueError("Empty aperture!")
# Atomically write to disk.
# http://stackoverflow.com/questions/2333872/
# atomic-writing-to-file-with-python
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = NamedTemporaryFile("wb", delete=False)
np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix,
fpix_err=fpix_err,
qual=qual, aperture=aperture,
pc1=pc1, pc2=pc2, fitsheader=fitsheader,
pixel_images=pixel_images, nearby=nearby,
hires=hires)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, filename)
# Load
data = np.load(filename)
aperture = data['aperture'][()]
pixel_images = data['pixel_images']
nearby = data['nearby'][()]
hires = data['hires'][()]
fitsheader = data['fitsheader']
cadn = data['cadn']
time = data['time']
fpix = data['fpix']
fpix_err = data['fpix_err']
qual = data['qual']
pc1 = data['pc1']
pc2 = data['pc2']
# Compute the saturation flux and the 97.5th percentile
# flux in each pixel of the aperture. We're going
# to compare these to decide if the star is saturated.
satflx = SaturationFlux(EPIC, campaign=campaign) * \
(1. + saturation_tolerance)
f97 = np.zeros((fpix.shape[1], fpix.shape[2]))
for i in range(fpix.shape[1]):
for j in range(fpix.shape[2]):
if aperture[i, j]:
# Let's remove NaNs...
tmp = np.delete(fpix[:, i, j], np.where(
np.isnan(fpix[:, i, j])))
# ... and really bad outliers...
if len(tmp):
f = SavGol(tmp)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) |
(f < med - 10. * MAD))[0]
np.delete(tmp, bad)
# ... so we can compute the 97.5th percentile flux
i97 = int(0.975 * len(tmp))
tmp = tmp[np.argsort(tmp)[i97]]
f97[i, j] = tmp
# Check if any of the pixels are actually saturated
if np.nanmax(f97) <= satflx:
log.info("No saturated columns detected.")
saturated = False
aperture[np.isnan(fpix[0])] = 0
ap = np.where(aperture & 1)
fpix2D = np.array([f[ap] for f in fpix], dtype='float64')
fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64')
else:
# We need to collapse the saturated columns
saturated = True
ncol = 0
fpixnew = []
ferrnew = []
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
marked = False
collapsed = np.zeros(len(fpix[:, 0, 0]))
collapsed_err2 = np.zeros(len(fpix[:, 0, 0]))
for i in range(aperture.shape[0]):
if aperture[i, j]:
if not marked:
aperture[i, j] = AP_COLLAPSED_PIXEL
marked = True
else:
aperture[i, j] = AP_SATURATED_PIXEL
collapsed += fpix[:, i, j]
collapsed_err2 += fpix_err[:, i, j] ** 2
if np.any(collapsed):
fpixnew.append(collapsed)
ferrnew.append(np.sqrt(collapsed_err2))
ncol += 1
else:
for i in range(aperture.shape[0]):
if aperture[i, j]:
fpixnew.append(fpix[:, i, j])
ferrnew.append(fpix_err[:, i, j])
fpix2D = np.array(fpixnew).T
fpix_err2D = np.array(ferrnew).T
log.info("Collapsed %d saturated column(s)." % ncol)
# Compute the background
binds = np.where(aperture ^ 1)
if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0):
bkg = np.nanmedian(np.array([f[binds]
for f in fpix], dtype='float64'), axis=1)
# Uncertainty of the median:
# http://davidmlane.com/hyperstat/A106993.html
bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err],
dtype='float64'), axis=1) \
/ np.sqrt(len(binds[0]))
bkg = bkg.reshape(-1, 1)
bkg_err = bkg_err.reshape(-1, 1)
else:
bkg = 0.
bkg_err = 0.
# Make everything 2D and remove the background
fpix = fpix2D - bkg
fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2)
flux = np.sum(fpix, axis=1)
# Get NaN data points
nanmask = np.where(np.isnan(flux) | (flux == 0))[0]
# Get flagged data points -- we won't train our model on them
badmask = []
for b in bad_bits:
badmask += list(np.where(qual & 2 ** (b - 1))[0])
# Flag >10 sigma outliers -- same thing.
tmpmask = np.array(list(set(np.concatenate([badmask, nanmask]))))
t = np.delete(time, tmpmask)
f = np.delete(flux, tmpmask)
f = SavGol(f)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0]
badmask.extend([np.argmax(time == t[i]) for i in bad])
# Campaign 2 hack: the first day or two are screwed up
if campaign == 2:
badmask.extend(np.where(time < 2061.5)[0])
# Finalize the mask
badmask = np.array(sorted(list(set(badmask))))
# Interpolate the nans
fpix = Interpolate(time, nanmask, fpix)
fpix_err = Interpolate(time, nanmask, fpix_err)
# Return
data = DataContainer()
data.ID = EPIC
data.campaign = campaign
data.cadn = cadn
data.time = time
data.fpix = fpix
data.fpix_err = fpix_err
data.nanmask = nanmask
data.badmask = badmask
data.aperture = aperture
data.aperture_name = 'custom'
data.apertures = dict(custom=aperture)
data.quality = qual
data.Xpos = pc1
data.Ypos = pc2
data.meta = fitsheader
data.mag = fitsheader[0]['KEPMAG'][1]
if type(data.mag) is pyfits.card.Undefined:
data.mag = np.nan
data.pixel_images = pixel_images
data.nearby = nearby
data.hires = hires
data.saturated = saturated
data.bkg = bkg
return data |
Returns the axis instance where the title will be printed
def title(self):
'''
Returns the axis instance where the title will be printed
'''
return self.title_left(on=False), self.title_center(on=False), \
self.title_right(on=False) |
Returns the axis instance where the footer will be printed
def footer(self):
'''
Returns the axis instance where the footer will be printed
'''
return self.footer_left(on=False), self.footer_center(on=False), \
self.footer_right(on=False) |
Returns the axis instance at the top right of the page,
where the postage stamp and aperture is displayed
def top_right(self):
'''
Returns the axis instance at the top right of the page,
where the postage stamp and aperture is displayed
'''
res = self.body_top_right[self.tcount]()
self.tcount += 1
return res |
Returns the current axis instance on the left side of
the page where each successive light curve is displayed
def left(self):
'''
Returns the current axis instance on the left side of
the page where each successive light curve is displayed
'''
res = self.body_left[self.lcount]()
self.lcount += 1
return res |
Returns the current axis instance on the right side of the
page, where cross-validation information is displayed
def right(self):
'''
Returns the current axis instance on the right side of the
page, where cross-validation information is displayed
'''
res = self.body_right[self.rcount]()
self.rcount += 1
return res |
Returns the axis instance where the light curves will be shown
def body(self):
'''
Returns the axis instance where the light curves will be shown
'''
res = self._body[self.bcount]()
self.bcount += 1
return res |
Calculate the Hash id of metaclass ``meta``
def hashmodel(model, library=None):
'''Calculate the Hash id of metaclass ``meta``'''
library = library or 'python-stdnet'
meta = model._meta
sha = hashlib.sha1(to_bytes('{0}({1})'.format(library, meta)))
hash = sha.hexdigest()[:8]
meta.hash = hash
if hash in _model_dict:
raise KeyError('Model "{0}" already in hash table.\
Rename your model or the module containing the model.'.format(meta))
_model_dict[hash] = model |
Bind a ``callback`` for a given ``sender``.
def bind(self, callback, sender=None):
'''Bind a ``callback`` for a given ``sender``.'''
key = (_make_id(callback), _make_id(sender))
self.callbacks.append((key, callback)) |
Fire callbacks from a ``sender``.
def fire(self, sender=None, **params):
'''Fire callbacks from a ``sender``.'''
keys = (_make_id(None), _make_id(sender))
results = []
for (_, key), callback in self.callbacks:
if key in keys:
results.append(callback(self, sender, **params))
return results |
Execute a command and return a parsed response
def execute_command(self, cmnd, *args, **options):
"Execute a command and return a parsed response"
args, options = self.preprocess_command(cmnd, *args, **options)
return self.client.execute_command(cmnd, *args, **options) |
Returns the 10th-90th percentile range of array :py:obj:`x`.
def _range10_90(x):
'''
Returns the 10th-90th percentile range of array :py:obj:`x`.
'''
x = np.delete(x, np.where(np.isnan(x)))
i = np.argsort(x)
a = int(0.1 * len(x))
b = int(0.9 * len(x))
return x[i][b] - x[i][a] |
Returns the campaign number(s) for a given EPIC target. If target
is not found, returns :py:obj:`None`.
:param int EPIC: The EPIC number of the target.
def Campaign(EPIC, **kwargs):
'''
Returns the campaign number(s) for a given EPIC target. If target
is not found, returns :py:obj:`None`.
:param int EPIC: The EPIC number of the target.
'''
campaigns = []
for campaign, stars in GetK2Stars().items():
if EPIC in [s[0] for s in stars]:
campaigns.append(campaign)
if len(campaigns) == 0:
return None
elif len(campaigns) == 1:
return campaigns[0]
else:
return campaigns |
Download and return a :py:obj:`dict` of all *K2* stars organized by
campaign. Saves each campaign to a `.stars` file in the
`everest/missions/k2/tables` directory.
:param bool clobber: If :py:obj:`True`, download and overwrite \
existing files. Default :py:obj:`False`
.. note:: The keys of the dictionary returned by this function are the \
(integer) numbers of each campaign. Each item in the \
:py:obj:`dict` is a list of the targets in the corresponding \
campaign, and each item in that list is in turn a list of the \
following: **EPIC number** (:py:class:`int`), \
**Kp magnitude** (:py:class:`float`), **CCD channel number** \
(:py:class:`int`), and **short cadence available** \
(:py:class:`bool`).
def GetK2Stars(clobber=False):
'''
Download and return a :py:obj:`dict` of all *K2* stars organized by
campaign. Saves each campaign to a `.stars` file in the
`everest/missions/k2/tables` directory.
:param bool clobber: If :py:obj:`True`, download and overwrite \
existing files. Default :py:obj:`False`
.. note:: The keys of the dictionary returned by this function are the \
(integer) numbers of each campaign. Each item in the \
:py:obj:`dict` is a list of the targets in the corresponding \
campaign, and each item in that list is in turn a list of the \
following: **EPIC number** (:py:class:`int`), \
**Kp magnitude** (:py:class:`float`), **CCD channel number** \
(:py:class:`int`), and **short cadence available** \
(:py:class:`bool`).
'''
# Download
if clobber:
print("Downloading K2 star list...")
stars = kplr_client.k2_star_info()
print("Writing star list to disk...")
for campaign in stars.keys():
if not os.path.exists(os.path.join(EVEREST_SRC, 'missions',
'k2', 'tables')):
os.makedirs(os.path.join(
EVEREST_SRC, 'missions', 'k2', 'tables'))
with open(os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables',
'c%02d.stars' % campaign), 'w') as f:
for star in stars[campaign]:
print(",".join([str(s) for s in star]), file=f)
# Return
res = {}
for campaign in K2_CAMPAIGNS:
f = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d.stars' % campaign)
if os.path.exists(f):
with open(f, 'r') as file:
lines = file.readlines()
if len(lines[0].split(',')) == 4:
# EPIC number, Kp magnitude, channel number,
# short cadence available?
stars = [[int(l.split(',')[0]),
_float(l.split(',')[1]),
int(l.split(',')[2]),
eval(l.split(',')[3])] for l in lines]
else:
stars = [[int(l), np.nan, -1, None] for l in lines]
res.update({campaign: stars})
return res |
Return all stars in a given *K2* campaign.
:param campaign: The *K2* campaign number. If this is an :py:class:`int`, \
returns all targets in that campaign. If a :py:class:`float` in \
the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \
:py:obj:`X`.
:param bool clobber: If :py:obj:`True`, download and overwrite existing \
files. Default :py:obj:`False`
:param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \
:py:class:`int`, returns each of the subcampaigns as a separate \
list. Default :py:obj:`False`
:param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \
If :py:obj:`False`, returns metadata associated with each target. \
Default :py:obj:`False`
:param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \
Default :py:obj:`lc`.
def GetK2Campaign(campaign, clobber=False, split=False,
epics_only=False, cadence='lc'):
'''
Return all stars in a given *K2* campaign.
:param campaign: The *K2* campaign number. If this is an :py:class:`int`, \
returns all targets in that campaign. If a :py:class:`float` in \
the form :py:obj:`X.Y`, runs the :py:obj:`Y^th` decile of campaign \
:py:obj:`X`.
:param bool clobber: If :py:obj:`True`, download and overwrite existing \
files. Default :py:obj:`False`
:param bool split: If :py:obj:`True` and :py:obj:`campaign` is an \
:py:class:`int`, returns each of the subcampaigns as a separate \
list. Default :py:obj:`False`
:param bool epics_only: If :py:obj:`True`, returns only the EPIC numbers. \
If :py:obj:`False`, returns metadata associated with each target. \
Default :py:obj:`False`
:param str cadence: Long (:py:obj:`lc`) or short (:py:obj:`sc`) cadence? \
Default :py:obj:`lc`.
'''
all = GetK2Stars(clobber=clobber)
if int(campaign) in all.keys():
all = all[int(campaign)]
else:
return []
if cadence == 'sc':
all = [a for a in all if a[3]]
if epics_only:
all = [a[0] for a in all]
if type(campaign) is int or type(campaign) is np.int64:
if not split:
return all
else:
all_split = list(Chunks(all, len(all) // 10))
# HACK: Sometimes we're left with a few targets
# dangling at the end. Insert them back evenly
# into the first few subcampaigns.
if len(all_split) > 10:
tmp1 = all_split[:10]
tmp2 = all_split[10:]
for n in range(len(tmp2)):
tmp1[n] = np.append(tmp1[n], tmp2[n])
all_split = tmp1
res = []
for subcampaign in range(10):
res.append(all_split[subcampaign])
return res
elif type(campaign) is float:
x, y = divmod(campaign, 1)
campaign = int(x)
subcampaign = round(y * 10)
return list(Chunks(all, len(all) // 10))[subcampaign]
else:
raise Exception('Argument `subcampaign` must be an `int` ' +
'or a `float` in the form `X.Y`') |
Returns the channel number for a given EPIC target.
def Channel(EPIC, campaign=None):
'''
Returns the channel number for a given EPIC target.
'''
if campaign is None:
campaign = Campaign(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." % campaign)
try:
stars = GetK2Stars()[campaign]
except KeyError:
# Not sure what else to do here!
log.warn("Unknown channel for target. Defaulting to channel 2.")
return 2
i = np.argmax([s[0] == EPIC for s in stars])
return stars[i][2] |
Returns the module number for a given EPIC target.
def Module(EPIC, campaign=None):
'''
Returns the module number for a given EPIC target.
'''
channel = Channel(EPIC, campaign=campaign)
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25,
10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49,
16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73,
23: 77, 24: 81}
for c in [channel, channel - 1, channel - 2, channel - 3]:
if c in nums.values():
for mod, chan in nums.items():
if chan == c:
return mod
return None |
Returns the channels contained in the given K2 module.
def Channels(module):
'''
Returns the channels contained in the given K2 module.
'''
nums = {2: 1, 3: 5, 4: 9, 6: 13, 7: 17, 8: 21, 9: 25,
10: 29, 11: 33, 12: 37, 13: 41, 14: 45, 15: 49,
16: 53, 17: 57, 18: 61, 19: 65, 20: 69, 22: 73,
23: 77, 24: 81}
if module in nums:
return [nums[module], nums[module] + 1,
nums[module] + 2, nums[module] + 3]
else:
return None |
Returns the *Kepler* magnitude for a given EPIC target.
def KepMag(EPIC, campaign=None):
'''
Returns the *Kepler* magnitude for a given EPIC target.
'''
if campaign is None:
campaign = Campaign(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." % campaign)
stars = GetK2Stars()[campaign]
i = np.argmax([s[0] == EPIC for s in stars])
return stars[i][1] |
Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not
to remove the background flux for the target. If ``campaign < 3``,
returns :py:obj:`True`, otherwise returns :py:obj:`False`.
def RemoveBackground(EPIC, campaign=None):
'''
Returns :py:obj:`True` or :py:obj:`False`, indicating whether or not
to remove the background flux for the target. If ``campaign < 3``,
returns :py:obj:`True`, otherwise returns :py:obj:`False`.
'''
if campaign is None:
campaign = Campaign(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." % campaign)
if campaign < 3:
return True
else:
return False |
Returns all channels on the same module as :py:obj:`channel`.
def GetNeighboringChannels(channel):
'''
Returns all channels on the same module as :py:obj:`channel`.
'''
x = divmod(channel - 1, 4)[1]
return channel + np.array(range(-x, -x + 4), dtype=int) |
Detector location retrieval based upon RA and Dec.
Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_.
def MASTRADec(ra, dec, darcsec, stars_only=False):
'''
Detector location retrieval based upon RA and Dec.
Adapted from `PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_.
'''
# coordinate limits
darcsec /= 3600.0
ra1 = ra - darcsec / np.cos(dec * np.pi / 180)
ra2 = ra + darcsec / np.cos(dec * np.pi / 180)
dec1 = dec - darcsec
dec2 = dec + darcsec
# build mast query
url = 'http://archive.stsci.edu/k2/epic/search.php?'
url += 'action=Search'
url += '&k2_ra=' + str(ra1) + '..' + str(ra2)
url += '&k2_dec=' + str(dec1) + '..' + str(dec2)
url += '&max_records=10000'
url += '&selectedColumnsCsv=id,k2_ra,k2_dec,kp'
url += '&outputformat=CSV'
if stars_only:
url += '&ktc_target_type=LC'
url += '&objtype=star'
# retrieve results from MAST
try:
lines = urllib.request.urlopen(url)
except:
log.warn('Unable to retrieve source data from MAST.')
lines = ''
# collate nearby sources
epicid = []
kepmag = []
ra = []
dec = []
for line in lines:
line = line.strip().decode('ascii')
if (len(line) > 0 and 'EPIC' not in line and 'integer' not in line and
'no rows found' not in line):
out = line.split(',')
r, d = sex2dec(out[1], out[2])
epicid.append(int(out[0]))
kepmag.append(float(out[3]))
ra.append(r)
dec.append(d)
epicid = np.array(epicid)
kepmag = np.array(kepmag)
ra = np.array(ra)
dec = np.array(dec)
return epicid, ra, dec, kepmag |
Convert sexadecimal hours to decimal degrees. Adapted from
`PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_.
:param float ra: The right ascension
:param float dec: The declination
:returns: The same values, but in decimal degrees
def sex2dec(ra, dec):
'''
Convert sexadecimal hours to decimal degrees. Adapted from
`PyKE <http://keplergo.arc.nasa.gov/PyKE.shtml>`_.
:param float ra: The right ascension
:param float dec: The declination
:returns: The same values, but in decimal degrees
'''
ra = re.sub('\s+', '|', ra.strip())
ra = re.sub(':', '|', ra.strip())
ra = re.sub(';', '|', ra.strip())
ra = re.sub(',', '|', ra.strip())
ra = re.sub('-', '|', ra.strip())
ra = ra.split('|')
outra = (float(ra[0]) + float(ra[1]) / 60. + float(ra[2]) / 3600.) * 15.0
dec = re.sub('\s+', '|', dec.strip())
dec = re.sub(':', '|', dec.strip())
dec = re.sub(';', '|', dec.strip())
dec = re.sub(',', '|', dec.strip())
dec = dec.split('|')
if float(dec[0]) > 0.0:
outdec = float(dec[0]) + float(dec[1]) / 60. + float(dec[2]) / 3600.
else:
outdec = float(dec[0]) - float(dec[1]) / 60. - float(dec[2]) / 3600.
return outra, outdec |
Grabs the EPIC coordinates from the TPF and searches MAST
for other EPIC targets within the same aperture.
:param int ID: The 9-digit :py:obj:`EPIC` number of the target
:param float darcsec: The search radius in arcseconds. \
Default is four times the largest dimension of the aperture.
:param bool stars_only: If :py:obj:`True`, only returns objects \
explicitly designated as `"stars"` in MAST. Default :py:obj:`False`
:returns: A list of :py:class:`Source` instances containing \
other :py:obj:`EPIC` targets within or close to this \
target's aperture
def GetSources(ID, darcsec=None, stars_only=False):
'''
Grabs the EPIC coordinates from the TPF and searches MAST
for other EPIC targets within the same aperture.
:param int ID: The 9-digit :py:obj:`EPIC` number of the target
:param float darcsec: The search radius in arcseconds. \
Default is four times the largest dimension of the aperture.
:param bool stars_only: If :py:obj:`True`, only returns objects \
explicitly designated as `"stars"` in MAST. Default :py:obj:`False`
:returns: A list of :py:class:`Source` instances containing \
other :py:obj:`EPIC` targets within or close to this \
target's aperture
'''
client = kplr.API()
star = client.k2_star(ID)
tpf = star.get_target_pixel_files()[0]
with tpf.open() as f:
crpix1 = f[2].header['CRPIX1']
crpix2 = f[2].header['CRPIX2']
crval1 = f[2].header['CRVAL1']
crval2 = f[2].header['CRVAL2']
cdelt1 = f[2].header['CDELT1']
cdelt2 = f[2].header['CDELT2']
pc1_1 = f[2].header['PC1_1']
pc1_2 = f[2].header['PC1_2']
pc2_1 = f[2].header['PC2_1']
pc2_2 = f[2].header['PC2_2']
pc = np.array([[pc1_1, pc1_2], [pc2_1, pc2_2]])
pc = np.linalg.inv(pc)
crpix1p = f[2].header['CRPIX1P']
crpix2p = f[2].header['CRPIX2P']
crval1p = f[2].header['CRVAL1P']
crval2p = f[2].header['CRVAL2P']
cdelt1p = f[2].header['CDELT1P']
cdelt2p = f[2].header['CDELT2P']
if darcsec is None:
darcsec = 4 * max(f[2].data.shape)
epicid, ra, dec, kepmag = MASTRADec(
star.k2_ra, star.k2_dec, darcsec, stars_only)
sources = []
for i, epic in enumerate(epicid):
dra = (ra[i] - crval1) * np.cos(np.radians(dec[i])) / cdelt1
ddec = (dec[i] - crval2) / cdelt2
sx = pc[0, 0] * dra + pc[0, 1] * ddec + crpix1 + crval1p - 1.0
sy = pc[1, 0] * dra + pc[1, 1] * ddec + crpix2 + crval2p - 1.0
sources.append(dict(ID=epic, x=sx, y=sy, mag=kepmag[i],
x0=crval1p, y0=crval2p))
return sources |
Queries the Palomar Observatory Sky Survey II catalog to
obtain a higher resolution optical image of the star with EPIC number
:py:obj:`ID`.
def GetHiResImage(ID):
'''
Queries the Palomar Observatory Sky Survey II catalog to
obtain a higher resolution optical image of the star with EPIC number
:py:obj:`ID`.
'''
# Get the TPF info
client = kplr.API()
star = client.k2_star(ID)
k2ra = star.k2_ra
k2dec = star.k2_dec
tpf = star.get_target_pixel_files()[0]
with tpf.open() as f:
k2wcs = WCS(f[2].header)
shape = np.array(f[1].data.field('FLUX'), dtype='float64')[0].shape
# Get the POSS URL
hou = int(k2ra * 24 / 360.)
min = int(60 * (k2ra * 24 / 360. - hou))
sec = 60 * (60 * (k2ra * 24 / 360. - hou) - min)
ra = '%02d+%02d+%.2f' % (hou, min, sec)
sgn = '' if np.sign(k2dec) >= 0 else '-'
deg = int(np.abs(k2dec))
min = int(60 * (np.abs(k2dec) - deg))
sec = 3600 * (np.abs(k2dec) - deg - min / 60)
dec = '%s%02d+%02d+%.1f' % (sgn, deg, min, sec)
url = 'https://archive.stsci.edu/cgi-bin/dss_search?v=poss2ukstu_red&' + \
'r=%s&d=%s&e=J2000&h=3&w=3&f=fits&c=none&fov=NONE&v3=' % (ra, dec)
# Query the server
r = urllib.request.Request(url)
handler = urllib.request.urlopen(r)
code = handler.getcode()
if int(code) != 200:
# Unavailable
return None
data = handler.read()
# Atomically write to a temp file
f = NamedTemporaryFile("wb", delete=False)
f.write(data)
f.flush()
os.fsync(f.fileno())
f.close()
# Now open the POSS fits file
with pyfits.open(f.name) as ff:
img = ff[0].data
# Map POSS pixels onto K2 pixels
xy = np.empty((img.shape[0] * img.shape[1], 2))
z = np.empty(img.shape[0] * img.shape[1])
pwcs = WCS(f.name)
k = 0
for i in range(img.shape[0]):
for j in range(img.shape[1]):
ra, dec = pwcs.all_pix2world(float(j), float(i), 0)
xy[k] = k2wcs.all_world2pix(ra, dec, 0)
z[k] = img[i, j]
k += 1
# Resample
grid_x, grid_y = np.mgrid[-0.5:shape[1] - 0.5:0.1, -0.5:shape[0] - 0.5:0.1]
resampled = griddata(xy, z, (grid_x, grid_y), method='cubic')
# Rotate to align with K2 image. Not sure why, but it is necessary
resampled = np.rot90(resampled)
return resampled |
Returns the well depth for the target. If any of the target's pixels
have flux larger than this value, they are likely to be saturated and
cause charge bleeding. The well depths were obtained from Table 13
of the Kepler instrument handbook. We assume an exposure time of 6.02s.
def SaturationFlux(EPIC, campaign=None, **kwargs):
'''
Returns the well depth for the target. If any of the target's pixels
have flux larger than this value, they are likely to be saturated and
cause charge bleeding. The well depths were obtained from Table 13
of the Kepler instrument handbook. We assume an exposure time of 6.02s.
'''
channel, well_depth = np.loadtxt(os.path.join(EVEREST_SRC, 'missions',
'k2',
'tables', 'well_depth.tsv'),
unpack=True)
satflx = well_depth[channel == Channel(EPIC, campaign=campaign)][0] / 6.02
return satflx |
Returns the indices corresponding to a given light curve chunk.
:param int b: The index of the chunk to return
def GetChunk(time, breakpoints, b, mask=[]):
'''
Returns the indices corresponding to a given light curve chunk.
:param int b: The index of the chunk to return
'''
M = np.delete(np.arange(len(time)), mask, axis=0)
if b > 0:
res = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])]
else:
res = M[M <= breakpoints[b]]
return res |
Returns de-trended light curves for all stars on a given module in
a given campaign.
def GetStars(campaign, module, model='nPLD', **kwargs):
'''
Returns de-trended light curves for all stars on a given module in
a given campaign.
'''
# Get the channel numbers
channels = Channels(module)
assert channels is not None, "No channels available on this module."
# Get the EPIC numbers
all = GetK2Campaign(campaign)
stars = np.array([s[0] for s in all if s[2] in channels and
os.path.exists(
os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign),
('%09d' % s[0])[:4] + '00000',
('%09d' % s[0])[4:], model + '.npz'))], dtype=int)
N = len(stars)
assert N > 0, "No light curves found for campaign %d, module %d." % (
campaign, module)
# Loop over all stars and store the fluxes in a list
fluxes = []
errors = []
kpars = []
for n in range(N):
# De-trended light curve file name
nf = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign),
('%09d' % stars[n])[:4] + '00000',
('%09d' % stars[n])[4:], model + '.npz')
# Get the data
data = np.load(nf)
t = data['time']
if n == 0:
time = t
breakpoints = data['breakpoints']
# Get de-trended light curve
y = data['fraw'] - data['model']
err = data['fraw_err']
# De-weight outliers and bad timestamps
m = np.array(list(set(np.concatenate([data['outmask'], data['badmask'],
data['nanmask'],
data['transitmask']]))),
dtype=int)
# Interpolate over the outliers
y = np.interp(t, np.delete(t, m), np.delete(y, m))
err = np.interp(t, np.delete(t, m), np.delete(err, m))
# Append to our running lists
fluxes.append(y)
errors.append(err)
kpars.append(data['kernel_params'])
return time, breakpoints, np.array(fluxes), \
np.array(errors), np.array(kpars) |
Applies :py:obj:`SysRem` to a given set of light curves.
:param array_like time: The time array for all of the light curves
:param array_like flux: A 2D array of the fluxes for each of the light \
curves, shape `(nfluxes, ntime)`
:param array_like err: A 2D array of the flux errors for each of the \
light curves, shape `(nfluxes, ntime)`
:param int ncbv: The number of signals to recover. Default 5
:param int niter: The number of :py:obj:`SysRem` iterations to perform. \
Default 50
:param int sv_win: The Savitsky-Golay filter window size. Default 999
:param int sv_order: The Savitsky-Golay filter order. Default 3
def SysRem(time, flux, err, ncbv=5, niter=50, sv_win=999,
sv_order=3, **kwargs):
'''
Applies :py:obj:`SysRem` to a given set of light curves.
:param array_like time: The time array for all of the light curves
:param array_like flux: A 2D array of the fluxes for each of the light \
curves, shape `(nfluxes, ntime)`
:param array_like err: A 2D array of the flux errors for each of the \
light curves, shape `(nfluxes, ntime)`
:param int ncbv: The number of signals to recover. Default 5
:param int niter: The number of :py:obj:`SysRem` iterations to perform. \
Default 50
:param int sv_win: The Savitsky-Golay filter window size. Default 999
:param int sv_order: The Savitsky-Golay filter order. Default 3
'''
nflx, tlen = flux.shape
# Get normalized fluxes
med = np.nanmedian(flux, axis=1).reshape(-1, 1)
y = flux - med
# Compute the inverse of the variances
invvar = 1. / err ** 2
# The CBVs for this set of fluxes
cbvs = np.zeros((ncbv, tlen))
# Recover `ncbv` components
for n in range(ncbv):
# Initialize the weights and regressors
c = np.zeros(nflx)
a = np.ones(tlen)
f = y * invvar
# Perform `niter` iterations
for i in range(niter):
# Compute the `c` vector (the weights)
c = np.dot(f, a) / np.dot(invvar, a ** 2)
# Compute the `a` vector (the regressors)
a = np.dot(c, f) / np.dot(c ** 2, invvar)
# Remove this component from all light curves
y -= np.outer(c, a)
# Save this regressor after smoothing it a bit
if sv_win >= len(a):
sv_win = len(a) - 1
if sv_win % 2 == 0:
sv_win -= 1
cbvs[n] = savgol_filter(a - np.nanmedian(a), sv_win, sv_order)
return cbvs |
Computes the CBVs for a given campaign.
:param int campaign: The campaign number
:param str model: The name of the :py:obj:`everest` model. Default `nPLD`
:param bool clobber: Overwrite existing files? Default `False`
def GetCBVs(campaign, model='nPLD', clobber=False, **kwargs):
'''
Computes the CBVs for a given campaign.
:param int campaign: The campaign number
:param str model: The name of the :py:obj:`everest` model. Default `nPLD`
:param bool clobber: Overwrite existing files? Default `False`
'''
# Initialize logging?
if len(logging.getLogger().handlers) == 0:
InitLog(file_name=None, screen_level=logging.DEBUG)
log.info('Computing CBVs for campaign %d...' % (campaign))
# Output path
path = os.path.join(EVEREST_DAT, 'k2', 'cbv', 'c%02d' % campaign)
if not os.path.exists(path):
os.makedirs(path)
# Get the design matrix
xfile = os.path.join(path, 'X.npz')
if clobber or not os.path.exists(xfile):
log.info('Obtaining light curves...')
time = None
for module in range(2, 25):
# Get the light curves
lcfile = os.path.join(path, '%d.npz' % module)
if clobber or not os.path.exists(lcfile):
try:
time, breakpoints, fluxes, errors, kpars = GetStars(
campaign, module, model=model, **kwargs)
except AssertionError:
continue
np.savez(lcfile, time=time, breakpoints=breakpoints,
fluxes=fluxes, errors=errors, kpars=kpars)
# Load the light curves
lcs = np.load(lcfile)
if time is None:
time = lcs['time']
breakpoints = lcs['breakpoints']
fluxes = lcs['fluxes']
errors = lcs['errors']
kpars = lcs['kpars']
else:
fluxes = np.vstack([fluxes, lcs['fluxes']])
errors = np.vstack([errors, lcs['errors']])
kpars = np.vstack([kpars, lcs['kpars']])
# Compute the design matrix
log.info('Running SysRem...')
X = np.ones((len(time), 1 + kwargs.get('ncbv', 5)))
# Loop over the segments
new_fluxes = np.zeros_like(fluxes)
for b in range(len(breakpoints)):
# Get the current segment's indices
inds = GetChunk(time, breakpoints, b)
# Update the error arrays with the white GP component
for j in range(len(errors)):
errors[j] = np.sqrt(errors[j] ** 2 + kpars[j][0] ** 2)
# Get de-trended fluxes
X[inds, 1:] = SysRem(time[inds], fluxes[:, inds],
errors[:, inds], **kwargs).T
# Save
np.savez(xfile, X=X, time=time, breakpoints=breakpoints)
else:
# Load from disk
data = np.load(xfile)
X = data['X'][()]
time = data['time'][()]
breakpoints = data['breakpoints'][()]
# Plot
plotfile = os.path.join(path, 'X.pdf')
if clobber or not os.path.exists(plotfile):
fig, ax = pl.subplots(2, 3, figsize=(12, 8))
fig.subplots_adjust(left=0.05, right=0.95)
ax = ax.flatten()
for axis in ax:
axis.set_xticks([])
axis.set_yticks([])
for b in range(len(breakpoints)):
inds = GetChunk(time, breakpoints, b)
for n in range(min(6, X.shape[1])):
ax[n].plot(time[inds], X[inds, n])
ax[n].set_title(n, fontsize=14)
fig.savefig(plotfile, bbox_inches='tight')
return X |
Load lua script from the stdnet/lib/lua directory
def read_lua_file(dotted_module, path=None, context=None):
'''Load lua script from the stdnet/lib/lua directory'''
path = path or DEFAULT_LUA_PATH
bits = dotted_module.split('.')
bits[-1] += '.lua'
name = os.path.join(path, *bits)
with open(name) as f:
data = f.read()
if context:
data = data.format(context)
return data |
Parse the response of Redis's INFO command into a Python dict.
In doing so, convert byte data into unicode.
def parse_info(response):
'''Parse the response of Redis's INFO command into a Python dict.
In doing so, convert byte data into unicode.'''
info = {}
response = response.decode('utf-8')
def get_value(value):
if ',' and '=' not in value:
return value
sub_dict = {}
for item in value.split(','):
k, v = item.split('=')
try:
sub_dict[k] = int(v)
except ValueError:
sub_dict[k] = v
return sub_dict
data = info
for line in response.splitlines():
keyvalue = line.split(':')
if len(keyvalue) == 2:
key, value = keyvalue
try:
data[key] = int(value)
except ValueError:
data[key] = get_value(value)
else:
data = {}
info[line[2:]] = data
return info |
Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``.
def zdiffstore(self, dest, keys, withscores=False):
'''Compute the difference of multiple sorted.
The difference of sets specified by ``keys`` into a new sorted set
in ``dest``.
'''
keys = (dest,) + tuple(keys)
wscores = 'withscores' if withscores else ''
return self.execute_script('zdiffstore', keys, wscores,
withscores=withscores) |
Pop a range by rank.
def zpopbyrank(self, name, start, stop=None, withscores=False, desc=False):
'''Pop a range by rank.
'''
stop = stop if stop is not None else start
return self.execute_script('zpop', (name,), 'rank', start,
stop, int(desc), int(withscores),
withscores=withscores) |
Delete an instance
def delete(self, instance):
'''Delete an instance'''
flushdb(self.client) if flushdb else self.client.flushdb() |
Return the log prior given parameter vector `x`.
def lnprior(x):
"""Return the log prior given parameter vector `x`."""
per, t0, b = x
if b < -1 or b > 1:
return -np.inf
elif per < 7 or per > 10:
return -np.inf
elif t0 < 1978 or t0 > 1979:
return -np.inf
else:
return 0. |
Return the log likelihood given parameter vector `x`.
def lnlike(x, star):
"""Return the log likelihood given parameter vector `x`."""
ll = lnprior(x)
if np.isinf(ll):
return ll, (np.nan, np.nan)
per, t0, b = x
model = TransitModel('b', per=per, t0=t0, b=b, rhos=10.)(star.time)
like, d, vard = star.lnlike(model, full_output=True)
ll += like
return ll, (d,) |
username and email (if provided) must be unique.
def check_user(self, username, email):
'''username and email (if provided) must be unique.'''
users = self.router.user
avail = yield users.filter(username=username).count()
if avail:
raise FieldError('Username %s not available' % username)
if email:
avail = yield users.filter(email=email).count()
if avail:
raise FieldError('Email %s not available' % email) |
Change the ``query`` so that only instances for which
``group`` has roles with permission on ``operations`` are returned.
def permitted_query(self, query, group, operations):
'''Change the ``query`` so that only instances for which
``group`` has roles with permission on ``operations`` are returned.'''
session = query.session
models = session.router
user = group.user
if user.is_superuser: # super-users have all permissions
return query
roles = group.roles.query()
roles = group.roles.query() # query on all roles for group
# The throgh model for Role/Permission relationship
throgh_model = models.role.permissions.model
models[throgh_model].filter(role=roles,
permission__model_type=query.model,
permission__operations=operations)
# query on all relevant permissions
permissions = router.permission.filter(model_type=query.model,
level=operations)
owner_query = query.filter(user=user)
# all roles for the query model with appropriate permission level
roles = models.role.filter(model_type=query.model, level__ge=level)
# Now we need groups which have these roles
groups = Role.groups.throughquery(
session).filter(role=roles).get_field('group')
# I need to know if user is in any of these groups
if user.groups.filter(id=groups).count():
# it is, lets get the model with permissions less
# or equal permission level
permitted = models.instancerole.filter(
role=roles).get_field('object_id')
return owner_query.union(model.objects.filter(id=permitted))
else:
return owner_query |
Create a new :class:`Role` owned by this :class:`Subject`
def create_role(self, name):
'''Create a new :class:`Role` owned by this :class:`Subject`'''
models = self.session.router
return models.role.new(name=name, owner=self) |
Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.
def assign(self, role):
'''Assign :class:`Role` ``role`` to this :class:`Subject`. If this
:class:`Subject` is the :attr:`Role.owner`, this method does nothing.'''
if role.owner_id != self.id:
return self.roles.add(role) |
Check if this :class:`Subject` has permissions for ``operations``
on an ``object``. It returns the number of valid permissions.
def has_permissions(self, object, group, operations):
'''Check if this :class:`Subject` has permissions for ``operations``
on an ``object``. It returns the number of valid permissions.'''
if self.is_superuser:
return 1
else:
models = self.session.router
# valid permissions
query = models.permission.for_object(object, operation=operations)
objects = models[models.role.permissions.model]
return objects.filter(role=self.role.query(),
permission=query).count() |
Add a new :class:`Permission` for ``resource`` to perform an
``operation``. The resource can be either an object or a model.
def add_permission(self, resource, operation):
'''Add a new :class:`Permission` for ``resource`` to perform an
``operation``. The resource can be either an object or a model.'''
if isclass(resource):
model_type = resource
pk = ''
else:
model_type = resource.__class__
pk = resource.pkvalue()
p = Permission(model_type=model_type, object_pk=pk,
operation=operation)
session = self.session
if session.transaction:
session.add(p)
self.permissions.add(p)
return p
else:
with session.begin() as t:
t.add(p)
self.permissions.add(p)
return t.add_callback(lambda r: p) |
Initializes snow extension
Set config default and find out which client type to use
:param app: App passed from constructor or directly to init_app (factory)
:param session: requests-compatible session to pass along to init_app
:param parameters: `ParamsBuilder` object passed to `Client` after instantiation
:raises:
- ConfigError - if unable to determine client type
def init_app(self, app, session=None, parameters=None):
"""Initializes snow extension
Set config default and find out which client type to use
:param app: App passed from constructor or directly to init_app (factory)
:param session: requests-compatible session to pass along to init_app
:param parameters: `ParamsBuilder` object passed to `Client` after instantiation
:raises:
- ConfigError - if unable to determine client type
"""
if parameters is not None and not isinstance(parameters, ParamsBuilder):
raise InvalidUsage("parameters should be a pysnow.ParamsBuilder object, not %r" % type(parameters).__name__)
self._session = session
self._parameters = parameters
app.config.setdefault('SNOW_INSTANCE', None)
app.config.setdefault('SNOW_HOST', None)
app.config.setdefault('SNOW_USER', None)
app.config.setdefault('SNOW_PASSWORD', None)
app.config.setdefault('SNOW_OAUTH_CLIENT_ID', None)
app.config.setdefault('SNOW_OAUTH_CLIENT_SECRET', None)
app.config.setdefault('SNOW_USE_SSL', True)
if app.config['SNOW_OAUTH_CLIENT_ID'] and app.config['SNOW_OAUTH_CLIENT_SECRET']:
self._client_type_oauth = True
elif self._session or (app.config['SNOW_USER'] and app.config['SNOW_PASSWORD']):
self._client_type_basic = True
else:
raise ConfigError("You must supply user credentials, a session or OAuth credentials to use flask-snow") |
Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
def connection(self):
"""Snow connection instance, stores a `pysnow.Client` instance and `pysnow.Resource` instances
Creates a new :class:`pysnow.Client` object if it doesn't exist in the app slice of the context stack
:returns: :class:`pysnow.Client` object
"""
ctx = stack.top.app
if ctx is not None:
if not hasattr(ctx, 'snow'):
if self._client_type_oauth:
if not self._token_updater:
warnings.warn("No token updater has been set. Token refreshes will be ignored.")
client = self._get_oauth_client()
else:
client = self._get_basic_client()
if self._parameters:
# Set parameters passed on app init
client.parameters = self._parameters
ctx.snow = client
return ctx.snow |
Print out a usage message
def usage():
"""Print out a usage message"""
global options
l = len(options['long'])
options['shortlist'] = [s for s in options['short'] if s is not ":"]
print("python -m behave2cucumber [-h] [-d level|--debug=level]")
for i in range(l):
print(" -{0}|--{1:20} {2}".format(options['shortlist'][i], options['long'][i], options['descriptions'][i])) |
Main
def main(argv):
"""Main"""
global options
opts = None
try:
opts, args = getopt.getopt(argv, options['short'], options['long'])
except getopt.GetoptError:
usage()
exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
exit()
elif opt in ("-d", "--debug"):
try:
arg = int(arg)
log.debug("Debug level received: " + str(arg))
except ValueError:
log.warning("Invalid log level: " + arg)
continue
if 0 <= arg <= 5:
log.setLevel(60 - (arg*10))
log.critical("Log level changed to: " + str(logging.getLevelName(60 - (arg*10))))
else:
log.warning("Invalid log level: " + str(arg))
infile = None
outfile = None
remove_background = False
duration_format = False
deduplicate = False
for opt, arg in opts:
if opt in ("-i", "--infile"):
log.info("Input File: " + arg)
infile = arg
if opt in ("-o", "--outfile"):
log.info("Output File: " + arg)
outfile = arg
if opt in ("-r", "--remove-background"):
log.info("Remove Background: Enabled")
remove_background = True
if opt in ("-f", "--format-duration"):
log.info("Format Duration: Enabled")
duration_format = True
if opt in ("-D", "--deduplicate"):
log.info("Deduplicate: Enabled")
deduplicate = True
if infile is None:
log.critical("No input JSON provided.")
usage()
exit(3)
with open(infile) as f:
cucumber_output = convert(json.load(f),
remove_background=remove_background,
duration_format=duration_format,
deduplicate=deduplicate)
if outfile is not None:
with open(outfile, 'w') as f:
json.dump(cucumber_output, f, indent=4, separators=(',', ': '))
else:
pprint(cucumber_output) |
Return the direction vector of a cylinder defined
by the spherical coordinates theta and phi.
def direction(theta, phi):
'''Return the direction vector of a cylinder defined
by the spherical coordinates theta and phi.
'''
return np.array([np.cos(phi) * np.sin(theta), np.sin(phi) * np.sin(theta),
np.cos(theta)]) |
Return the projection matrix of a direction w.
def projection_matrix(w):
'''Return the projection matrix of a direction w.'''
return np.identity(3) - np.dot(np.reshape(w, (3,1)), np.reshape(w, (1, 3))) |
Return the skew matrix of a direction w.
def skew_matrix(w):
'''Return the skew matrix of a direction w.'''
return np.array([[0, -w[2], w[1]],
[w[2], 0, -w[0]],
[-w[1], w[0], 0]]) |
Return the matrix A from a list of Y vectors.
def calc_A(Ys):
'''Return the matrix A from a list of Y vectors.'''
return sum(np.dot(np.reshape(Y, (3,1)), np.reshape(Y, (1, 3)))
for Y in Ys) |
Return the A_hat matrix of A given the skew matrix S
def calc_A_hat(A, S):
'''Return the A_hat matrix of A given the skew matrix S'''
return np.dot(S, np.dot(A, np.transpose(S))) |
Translate the center of mass (COM) of the data to the origin.
Return the prossed data and the shift of the COM
def preprocess_data(Xs_raw):
'''Translate the center of mass (COM) of the data to the origin.
Return the prossed data and the shift of the COM'''
n = len(Xs_raw)
Xs_raw_mean = sum(X for X in Xs_raw) / n
return [X - Xs_raw_mean for X in Xs_raw], Xs_raw_mean |
Calculate the G function given a cylinder direction w and a
list of data points Xs to be fitted.
def G(w, Xs):
'''Calculate the G function given a cylinder direction w and a
list of data points Xs to be fitted.'''
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
u = sum(np.dot(Y, Y) for Y in Ys) / n
v = np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A))
return sum((np.dot(Y, Y) - u - 2 * np.dot(Y, v)) ** 2 for Y in Ys) |
Calculate the cylinder center given the cylinder direction and
a list of data points.
def C(w, Xs):
'''Calculate the cylinder center given the cylinder direction and
a list of data points.
'''
n = len(Xs)
P = projection_matrix(w)
Ys = [np.dot(P, X) for X in Xs]
A = calc_A(Ys)
A_hat = calc_A_hat(A, skew_matrix(w))
return np.dot(A_hat, sum(np.dot(Y, Y) * Y for Y in Ys)) / np.trace(np.dot(A_hat, A)) |
Calculate the radius given the cylinder direction and a list
of data points.
def r(w, Xs):
'''Calculate the radius given the cylinder direction and a list
of data points.
'''
n = len(Xs)
P = projection_matrix(w)
c = C(w, Xs)
return np.sqrt(sum(np.dot(c - X, np.dot(P, c - X)) for X in Xs) / n) |
Fit a list of data points to a cylinder surface. The algorithm implemented
here is from David Eberly's paper "Fitting 3D Data with a Cylinder" from
https://www.geometrictools.com/Documentation/CylinderFitting.pdf
Arguments:
data - A list of 3D data points to be fitted.
guess_angles[0] - Guess of the theta angle of the axis direction
guess_angles[1] - Guess of the phi angle of the axis direction
Return:
Direction of the cylinder axis
A point on the cylinder axis
Radius of the cylinder
Fitting error (G function)
def fit(data, guess_angles=None):
'''Fit a list of data points to a cylinder surface. The algorithm implemented
here is from David Eberly's paper "Fitting 3D Data with a Cylinder" from
https://www.geometrictools.com/Documentation/CylinderFitting.pdf
Arguments:
data - A list of 3D data points to be fitted.
guess_angles[0] - Guess of the theta angle of the axis direction
guess_angles[1] - Guess of the phi angle of the axis direction
Return:
Direction of the cylinder axis
A point on the cylinder axis
Radius of the cylinder
Fitting error (G function)
'''
Xs, t = preprocess_data(data)
# Set the start points
start_points = [(0, 0), (np.pi / 2, 0), (np.pi / 2, np.pi / 2)]
if guess_angles:
start_points = guess_angles
# Fit the cylinder from different start points
best_fit = None
best_score = float('inf')
for sp in start_points:
fitted = minimize(lambda x : G(direction(x[0], x[1]), Xs),
sp, method='Powell', tol=1e-6)
if fitted.fun < best_score:
best_score = fitted.fun
best_fit = fitted
w = direction(best_fit.x[0], best_fit.x[1])
return w, C(w, Xs) + t, r(w, Xs), best_fit.fun |
A Kafka client that publishes records to the Kafka cluster.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default to `localhost:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: `Robot`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
def connect_producer(self, bootstrap_servers='127.0.0.1:9092', client_id='Robot', **kwargs):
"""A Kafka client that publishes records to the Kafka cluster.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the producer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default to `localhost:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client.
Default: `Robot`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html
"""
self.producer = KafkaProducer(bootstrap_servers=bootstrap_servers, client_id=client_id, **kwargs) |
Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None):
"""Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
"""
future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms)
future.get(timeout=timeout) |
Connect kafka consumer.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default: `127.0.0.1:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: `Robot`.
- ``group_id`` (str or None): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: `None`.
- ``auto_offset_reset`` (str): A policy for resetting offsets on
OffsetOutOfRange errors: `earliest` will move to the oldest
available message, `latest` will move to the most recent. Any
other value will raise the exception. Default: `latest`.
- ``enable_auto_commit`` (bool): If true the consumer's offset will be
periodically committed in the background. Default: `True`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
def connect_consumer(
self,
bootstrap_servers='127.0.0.1:9092',
client_id='Robot',
group_id=None,
auto_offset_reset='latest',
enable_auto_commit=True,
**kwargs
):
"""Connect kafka consumer.
Keyword Arguments:
- ``bootstrap_servers``: 'host[:port]' string (or list of 'host[:port]'
strings) that the consumer should contact to bootstrap initial
cluster metadata. This does not have to be the full node list.
It just needs to have at least one broker that will respond to a
Metadata API Request. Default: `127.0.0.1:9092`.
- ``client_id`` (str): a name for this client. This string is passed in
each request to servers and can be used to identify specific
server-side log entries that correspond to this client. Also
submitted to GroupCoordinator for logging with respect to
consumer group administration. Default: `Robot`.
- ``group_id`` (str or None): name of the consumer group to join for dynamic
partition assignment (if enabled), and to use for fetching and
committing offsets. If None, auto-partition assignment (via
group coordinator) and offset commits are disabled.
Default: `None`.
- ``auto_offset_reset`` (str): A policy for resetting offsets on
OffsetOutOfRange errors: `earliest` will move to the oldest
available message, `latest` will move to the most recent. Any
other value will raise the exception. Default: `latest`.
- ``enable_auto_commit`` (bool): If true the consumer's offset will be
periodically committed in the background. Default: `True`.
Note:
Configuration parameters are described in more detail at
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
"""
self.consumer = KafkaConsumer(
bootstrap_servers=bootstrap_servers,
auto_offset_reset=auto_offset_reset,
client_id=client_id,
group_id=group_id,
enable_auto_commit=enable_auto_commit,
**kwargs
) |
Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
def assign_to_topic_partition(self, topic_partition=None):
"""Assign a list of TopicPartitions to this consumer.
- ``partitions`` (list of `TopicPartition`): Assignment for this instance.
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
if not self._is_assigned(topic_partition):
self.consumer.assign(topic_partition) |
Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
def subscribe_topic(self, topics=[], pattern=None):
"""Subscribe to a list of topics, or a topic regex pattern.
- ``topics`` (list): List of topics for subscription.
- ``pattern`` (str): Pattern to match available topics. You must provide either topics or pattern,
but not both.
"""
if not isinstance(topics, list):
topics = [topics]
self.consumer.subscribe(topics, pattern=pattern) |
Return offset of the next record that will be fetched.
- ``topic_partition`` (TopicPartition): Partition to check
def get_position(self, topic_partition=None):
"""Return offset of the next record that will be fetched.
- ``topic_partition`` (TopicPartition): Partition to check
"""
if isinstance(topic_partition, TopicPartition):
return self.consumer.position(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") |
Manually specify the fetch offset for a TopicPartition.
- ``offset``: Message offset in partition
- ``topic_partition`` (`TopicPartition`): Partition for seek operation
def seek(self, offset, topic_partition=None):
"""Manually specify the fetch offset for a TopicPartition.
- ``offset``: Message offset in partition
- ``topic_partition`` (`TopicPartition`): Partition for seek operation
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek(topic_partition, offset=offset)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") |
Seek to the oldest available offset for partitions.
- ``topic_partition``: Optionally provide specific TopicPartitions,
otherwise default to all assigned partitions.
def seek_to_beginning(self, topic_partition=None):
"""Seek to the oldest available offset for partitions.
- ``topic_partition``: Optionally provide specific TopicPartitions,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_beginning(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") |
Seek to the most recent available offset for partitions.
- ``topic_partition``: Optionally provide specific `TopicPartitions`,
otherwise default to all assigned partitions.
def seek_to_end(self, topic_partition=None):
"""Seek to the most recent available offset for partitions.
- ``topic_partition``: Optionally provide specific `TopicPartitions`,
otherwise default to all assigned partitions.
"""
if isinstance(topic_partition, TopicPartition):
self.consumer.seek_to_end(topic_partition)
else:
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.") |
Retrun number of messages in topics.
- ``topics`` (list): list of topics.
def get_number_of_messages_in_topics(self, topics):
"""Retrun number of messages in topics.
- ``topics`` (list): list of topics.
"""
if not isinstance(topics, list):
topics = [topics]
number_of_messages = 0
for t in topics:
part = self.get_kafka_partitions_for_topic(topic=t)
Partitions = map(lambda p: TopicPartition(topic=t, partition=p), part)
number_of_messages += self.get_number_of_messages_in_topicpartition(Partitions)
return number_of_messages |
Return number of messages in TopicPartition.
- ``topic_partition`` (list of TopicPartition)
def get_number_of_messages_in_topicpartition(self, topic_partition=None):
"""Return number of messages in TopicPartition.
- ``topic_partition`` (list of TopicPartition)
"""
if isinstance(topic_partition, TopicPartition):
topic_partition = [topic_partition]
number_of_messages = 0
assignment = self.consumer.assignment()
self.consumer.unsubscribe()
for Partition in topic_partition:
if not isinstance(Partition, TopicPartition):
raise TypeError("topic_partition must be of type TopicPartition, create it with Create TopicPartition keyword.")
self.assign_to_topic_partition(Partition)
self.consumer.seek_to_end(Partition)
end = self.consumer.position(Partition)
self.consumer.seek_to_beginning(Partition)
start = self.consumer.position(Partition)
number_of_messages += end-start
self.consumer.unsubscribe()
self.consumer.assign(assignment)
return number_of_messages |
Fetch data from assigned topics / partitions.
- ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records.
- ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer.
If 0, returns immediately with any records that are available currently in the buffer, else returns empty.
Must not be negative. Default: `0`
def poll(self, timeout_ms=0, max_records=None):
"""Fetch data from assigned topics / partitions.
- ``max_records`` (int): maximum number of records to poll. Default: Inherit value from max_poll_records.
- ``timeout_ms`` (int): Milliseconds spent waiting in poll if data is not available in the buffer.
If 0, returns immediately with any records that are available currently in the buffer, else returns empty.
Must not be negative. Default: `0`
"""
messages = self.consumer.poll(timeout_ms=timeout_ms, max_records=max_records)
result = []
for _, msg in messages.items():
for item in msg:
result.append(item)
return result |
Validate an email with the given key
def get(self, request, key):
"""Validate an email with the given key"""
try:
email_val = EmailAddressValidation.objects.get(validation_key=key)
except EmailAddressValidation.DoesNotExist:
messages.error(request, _('The email address you are trying to '
'verify either has already been verified'
' or does not exist.'))
return redirect('/')
try:
email = EmailAddress.objects.get(address=email_val.address)
except EmailAddress.DoesNotExist:
email = EmailAddress(address=email_val.address)
if email.user and email.user.is_active:
messages.error(request, _('The email address you are trying to '
'verify is already an active email '
'address.'))
email_val.delete()
return redirect('/')
email.user = email_val.user
email.save()
email_val.delete()
user = User.objects.get(username=email.user.username)
user.is_active = True
user.save()
messages.success(request, _('Email address verified!'))
return redirect('user_profile', username=email_val.user.username) |
Remove an email address, validated or not.
def delete(self, request, key):
"""Remove an email address, validated or not."""
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204) |
Set an email address as primary address.
def update(self, request, key):
"""Set an email address as primary address."""
request.UPDATE = http.QueryDict(request.body)
email_addr = request.UPDATE.get('email')
user_id = request.UPDATE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user.email = email_addr
email.user.save()
return http.HttpResponse(status=204) |
Check if a logged user is trying to access the register page.
If so, redirect him/her to his/her profile
def is_logged(self, user):
"""Check if a logged user is trying to access the register page.
If so, redirect him/her to his/her profile"""
response = None
if user.is_authenticated():
if not user.needs_update:
response = redirect('user_profile', username=user.username)
return response |
Get the environment setting or return exception
def get_env_setting(setting):
""" Get the environment setting or return exception """
try:
return os.environ[setting]
except KeyError:
error_msg = "Set the %s env variable" % setting
raise ImproperlyConfigured(error_msg) |
This function will use the custom JsonDecoder and the conventions.mappers to recreate your custom object
in the parse json string state just call this method with the json_string your complete object_type and with your
mappers dict.
the mappers dict must contain in the key the object_type (ex. User) and the value will contain a method that get
key, value (the key will be the name of the object property we like to parse and the value
will be the properties of the object)
def parse_json(json_string, object_type, mappers):
"""
This function will use the custom JsonDecoder and the conventions.mappers to recreate your custom object
in the parse json string state just call this method with the json_string your complete object_type and with your
mappers dict.
the mappers dict must contain in the key the object_type (ex. User) and the value will contain a method that get
key, value (the key will be the name of the object property we like to parse and the value
will be the properties of the object)
"""
obj = json.loads(json_string, cls=JsonDecoder, object_mapper=mappers.get(object_type, None))
if obj is not None:
try:
obj = object_type(**obj)
except TypeError:
initialize_dict, set_needed = Utils.make_initialize_dict(obj, object_type.__init__)
o = object_type(**initialize_dict)
if set_needed:
for key, value in obj.items():
setattr(o, key, value)
obj = o
return obj |
Verifies if a social account is valid.
Examples:
>>> validate_social_account('seocam', 'http://twitter.com')
True
>>> validate_social_account('seocam-fake-should-fail',
'http://twitter.com')
False
def validate_social_account(account, url):
"""Verifies if a social account is valid.
Examples:
>>> validate_social_account('seocam', 'http://twitter.com')
True
>>> validate_social_account('seocam-fake-should-fail',
'http://twitter.com')
False
"""
request = urllib2.Request(urlparse.urljoin(url, account))
request.get_method = lambda: 'HEAD'
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError:
return False
return response.code == 200 |
Calculate the RMSD of fitting.
def fitting_rmsd(w_fit, C_fit, r_fit, Xs):
'''Calculate the RMSD of fitting.'''
return np.sqrt(sum((geometry.point_line_distance(p, C_fit, w_fit) - r_fit) ** 2
for p in Xs) / len(Xs)) |
Iterator yielding unprefixed events.
Parameters:
- response: a stream response from requests
def basic_parse(response, buf_size=ijson.backend.BUFSIZE):
"""
Iterator yielding unprefixed events.
Parameters:
- response: a stream response from requests
"""
lexer = iter(IncrementalJsonParser.lexer(response, buf_size))
for value in ijson.backend.parse_value(lexer):
yield value
try:
next(lexer)
except StopIteration:
pass
else:
raise ijson.common.JSONError('Additional data') |
Connect to kafka
- ``bootstrap_servers``: default 127.0.0.1:9092
- ``client_id``: default: Robot
def connect_to_kafka(self, bootstrap_servers='127.0.0.1:9092',
auto_offset_reset='latest',
client_id='Robot',
**kwargs
):
"""Connect to kafka
- ``bootstrap_servers``: default 127.0.0.1:9092
- ``client_id``: default: Robot
"""
self.connect_consumer(
bootstrap_servers=bootstrap_servers,
auto_offset_reset=auto_offset_reset,
client_id=client_id,
**kwargs
)
self.connect_producer(bootstrap_servers=bootstrap_servers, client_id=client_id) |
Force server to close current client subscription connection to the server
@param str name: The name of the subscription
@param str database: The name of the database
def drop_connection(self, name, database=None):
"""
Force server to close current client subscription connection to the server
@param str name: The name of the subscription
@param str database: The name of the database
"""
request_executor = self._store.get_request_executor(database)
command = DropSubscriptionConnectionCommand(name)
request_executor.execute(command) |
A simple method that runs a ManagementUtility.
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "colab.settings")
from django.conf import settings
if not hasattr(settings, 'SECRET_KEY') and 'initconfig' in sys.argv:
command = initconfig.Command()
command.handle()
else:
utility = ManagementUtility(argv)
utility.execute() |
Dashboard page
def dashboard(request):
"""Dashboard page"""
user = None
if request.user.is_authenticated():
user = User.objects.get(username=request.user)
latest_results, count_types = get_collaboration_data(user)
latest_results.sort(key=lambda elem: elem.modified, reverse=True)
context = {
'type_count': count_types,
'latest_results': latest_results[:6],
}
return render(request, 'home.html', context) |
Normalize a vector based on its 2 norm.
def normalize(v):
'''Normalize a vector based on its 2 norm.'''
if 0 == np.linalg.norm(v):
return v
return v / np.linalg.norm(v) |
Calculate a rotation matrix from an axis and an angle.
def rotation_matrix_from_axis_and_angle(u, theta):
'''Calculate a rotation matrix from an axis and an angle.'''
x = u[0]
y = u[1]
z = u[2]
s = np.sin(theta)
c = np.cos(theta)
return np.array([[c + x**2 * (1 - c), x * y * (1 - c) - z * s, x * z * (1 - c) + y * s],
[y * x * (1 - c) + z * s, c + y**2 * (1 - c), y * z * (1 - c) - x * s ],
[z * x * (1 - c) - y * s, z * y * (1 - c) + x * s, c + z**2 * (1 - c) ]]) |
Calculate the distance between a point and a line defined
by a point and a direction vector.
def point_line_distance(p, l_p, l_v):
'''Calculate the distance between a point and a line defined
by a point and a direction vector.
'''
l_v = normalize(l_v)
u = p - l_p
return np.linalg.norm(u - np.dot(u, l_v) * l_v) |
To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value}
def raw_query(self, query, query_parameters=None):
"""
To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value}
"""
self.assert_no_raw_query()
if len(self._where_tokens) != 0 or len(self._select_tokens) != 0 or len(
self._order_by_tokens) != 0 or len(self._group_by_tokens) != 0:
raise InvalidOperationException(
"You can only use raw_query on a new query, without applying any operations "
"(such as where, select, order_by, group_by, etc)")
if query_parameters:
self.query_parameters = query_parameters
self._query = query
return self |
To get all the document that equal to the value in the given field_name
@param str field_name: The field name in the index you want to query.
@param value: The value will be the fields value you want to query
@param bool exact: If True getting exact match of the query
def where_equals(self, field_name, value, exact=False):
"""
To get all the document that equal to the value in the given field_name
@param str field_name: The field name in the index you want to query.
@param value: The value will be the fields value you want to query
@param bool exact: If True getting exact match of the query
"""
if field_name is None:
raise ValueError("None field_name is invalid")
field_name = Query.escape_if_needed(field_name)
self._add_operator_if_needed()
token = "equals"
if self.negate:
self.negate = False
token = "not_equals"
self.last_equality = {field_name: value}
token = _Token(field_name=field_name, value=self.add_query_parameter(value), token=token, exact=exact)
token.write = self.rql_where_write(token)
self._where_tokens.append(token)
return self |
To get all the document that equal to the value within kwargs with the specific key
@param bool exact: If True getting exact match of the query
@param kwargs: the keys of the kwargs will be the fields name in the index you want to query.
The value will be the the fields value you want to query
(if kwargs[field_name] is a list it will behave has the where_in method)
def where(self, exact=False, **kwargs):
"""
To get all the document that equal to the value within kwargs with the specific key
@param bool exact: If True getting exact match of the query
@param kwargs: the keys of the kwargs will be the fields name in the index you want to query.
The value will be the the fields value you want to query
(if kwargs[field_name] is a list it will behave has the where_in method)
"""
for field_name in kwargs:
if isinstance(kwargs[field_name], list):
self.where_in(field_name, kwargs[field_name], exact)
else:
self.where_equals(field_name, kwargs[field_name], exact)
return self |
For more complex text searching
@param str field_name: The field name in the index you want to query.
:type str
@param str search_terms: The terms you want to query
@param QueryOperator operator: OR or AND
def search(self, field_name, search_terms, operator=QueryOperator.OR):
"""
For more complex text searching
@param str field_name: The field name in the index you want to query.
:type str
@param str search_terms: The terms you want to query
@param QueryOperator operator: OR or AND
"""
if field_name is None:
raise ValueError("None field_name is invalid")
field_name = Query.escape_if_needed(field_name)
self._add_operator_if_needed()
self.negate_if_needed(field_name)
self.last_equality = {field_name: "(" + search_terms + ")" if ' ' in search_terms else search_terms}
token = _Token(field_name=field_name, token="search", value=self.add_query_parameter(search_terms),
search_operator=operator)
token.write = self.rql_where_write(token)
self._where_tokens.append(token)
return self |
To get all the document that ends with the value in the giving field_name
@param str field_name:The field name in the index you want to query.
@param str value: The value will be the fields value you want to query
def where_ends_with(self, field_name, value):
"""
To get all the document that ends with the value in the giving field_name
@param str field_name:The field name in the index you want to query.
@param str value: The value will be the fields value you want to query
"""
if field_name is None:
raise ValueError("None field_name is invalid")
field_name = Query.escape_if_needed(field_name)
self._add_operator_if_needed()
self.negate_if_needed(field_name)
self.last_equality = {field_name: value}
token = _Token(field_name=field_name, token="endsWith", value=self.add_query_parameter(value))
token.write = self.rql_where_write(token)
self._where_tokens.append(token)
return self |
Check that the field has one of the specified values
@param str field_name: Name of the field
@param str values: The values we wish to query
@param bool exact: Getting the exact query (ex. case sensitive)
def where_in(self, field_name, values, exact=False):
"""
Check that the field has one of the specified values
@param str field_name: Name of the field
@param str values: The values we wish to query
@param bool exact: Getting the exact query (ex. case sensitive)
"""
field_name = Query.escape_if_needed(field_name)
self._add_operator_if_needed()
self.negate_if_needed(field_name)
token = _Token(field_name=field_name, value=self.add_query_parameter(list(Utils.unpack_iterable(values))),
token="in", exact=exact)
token.write = self.rql_where_write(token)
self._where_tokens.append(token)
return self |
Query the facets results for this query using the specified list of facets with the given start and pageSize
@param List[Facet] facets: List of facets
@param int start: Start index for paging
@param page_size: Paging PageSize. If set, overrides Facet.max_result
def to_facets(self, facets, start=0, page_size=None):
"""
Query the facets results for this query using the specified list of facets with the given start and pageSize
@param List[Facet] facets: List of facets
@param int start: Start index for paging
@param page_size: Paging PageSize. If set, overrides Facet.max_result
"""
if len(facets) == 0:
raise ValueError("Facets must contain at least one entry", "facets")
str_query = self.__str__()
facet_query = FacetQuery(str_query, None, facets, start, page_size, query_parameters=self.query_parameters,
wait_for_non_stale_results=self.wait_for_non_stale_results,
wait_for_non_stale_results_timeout=self.timeout, cutoff_etag=self.cutoff_etag)
command = GetFacetsCommand(query=facet_query)
return self.session.requests_executor.execute(command) |
Show the distribution of the G function.
def show_G_distribution(data):
'''Show the distribution of the G function.'''
Xs, t = fitting.preprocess_data(data)
Theta, Phi = np.meshgrid(np.linspace(0, np.pi, 50), np.linspace(0, 2 * np.pi, 50))
G = []
for i in range(len(Theta)):
G.append([])
for j in range(len(Theta[i])):
w = fitting.direction(Theta[i][j], Phi[i][j])
G[-1].append(fitting.G(w, Xs))
plt.imshow(G, extent=[0, np.pi, 0, 2 * np.pi], origin='lower')
plt.show() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.