text stringlengths 81 112k |
|---|
Returns the location of the :py:mod:`everest` data on disk
for a given target.
:param ID: The target ID
:param int season: The target season number
:param bool relative: Relative path? Default :py:obj:`False`
def TargetDirectory(ID, season, relative=False, **kwargs):
'''
Returns the location of the :py:mod:`everest` data on disk
for a given target.
:param ID: The target ID
:param int season: The target season number
:param bool relative: Relative path? Default :py:obj:`False`
'''
if season is None:
return None
if relative:
path = ''
else:
path = EVEREST_DAT
return os.path.join(path, 'k2', 'c%02d' % season,
('%09d' % ID)[:4] + '00000',
('%09d' % ID)[4:]) |
Returns the name of the DVS PDF for a given target.
:param ID: The target ID
:param int season: The target season number
:param str cadence: The cadence type. Default `lc`
def DVSFile(ID, season, cadence='lc'):
'''
Returns the name of the DVS PDF for a given target.
:param ID: The target ID
:param int season: The target season number
:param str cadence: The cadence type. Default `lc`
'''
if cadence == 'sc':
strcadence = '_sc'
else:
strcadence = ''
return 'hlsp_everest_k2_llc_%d-c%02d_kepler_v%s_dvs%s.pdf' \
% (ID, season, EVEREST_MAJOR_MINOR, strcadence) |
Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target
def GetTargetCBVs(model):
'''
Returns the design matrix of CBVs for the given target.
:param model: An instance of the :py:obj:`everest` model for the target
'''
# Get the info
season = model.season
name = model.name
# We use the LC light curves as CBVs; there aren't
# enough SC light curves to get a good set
if name.endswith('.sc'):
name = name[:-3]
model.XCBV = sysrem.GetCBVs(season, model=name,
niter=model.cbv_niter,
sv_win=model.cbv_win,
sv_order=model.cbv_order) |
Fits the CBV design matrix to the de-trended flux of a given target. This
is called internally whenever the user accesses the :py:attr:`fcor`
attribute.
:param model: An instance of the :py:obj:`everest` model for the target
def FitCBVs(model):
'''
Fits the CBV design matrix to the de-trended flux of a given target. This
is called internally whenever the user accesses the :py:attr:`fcor`
attribute.
:param model: An instance of the :py:obj:`everest` model for the target
'''
# Get cbvs?
if model.XCBV is None:
GetTargetCBVs(model)
# The number of CBVs to use
ncbv = model.cbv_num
# Need to treat short and long cadences differently
if model.cadence == 'lc':
# Loop over all the light curve segments
m = [None for b in range(len(model.breakpoints))]
weights = [None for b in range(len(model.breakpoints))]
for b in range(len(model.breakpoints)):
# Get the indices for this light curve segment
inds = model.get_chunk(b, pad=False)
masked_inds = model.get_masked_chunk(b, pad=False)
# Regress
mX = model.XCBV[masked_inds, :ncbv + 1]
A = np.dot(mX.T, mX)
B = np.dot(mX.T, model.flux[masked_inds])
try:
weights[b] = np.linalg.solve(A, B)
except np.linalg.linalg.LinAlgError:
# Singular matrix
log.warn('Singular matrix!')
weights[b] = np.zeros(mX.shape[1])
m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b])
# Vertical alignment
if b == 0:
m[b] -= np.nanmedian(m[b])
else:
# Match the first finite model point on either side of the
# break
# We could consider something more elaborate in the future
i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i])
for i in range(1, len(m[b - 1]) - 1)])
i1 = np.argmax([np.isfinite(m[b][i])
for i in range(len(m[b]))])
m[b] += (m[b - 1][i0] - m[b][i1])
# Join model and normalize
m = np.concatenate(m)
m -= np.nanmedian(m)
else:
# Interpolate over outliers so we don't have to worry
# about masking the arrays below
flux = Interpolate(model.time, model.mask, model.flux)
# Get downbinned light curve
newsize = len(model.time) // 30
time = Downbin(model.time, newsize, operation='mean')
flux = Downbin(flux, newsize, operation='mean')
# Get LC breakpoints
breakpoints = list(Breakpoints(
model.ID, season=model.season, cadence='lc'))
breakpoints += [len(time) - 1]
# Loop over all the light curve segments
m = [None for b in range(len(breakpoints))]
weights = [None for b in range(len(breakpoints))]
for b in range(len(breakpoints)):
# Get the indices for this light curve segment
M = np.arange(len(time))
if b > 0:
inds = M[(M > breakpoints[b - 1]) & (M <= breakpoints[b])]
else:
inds = M[M <= breakpoints[b]]
# Regress
A = np.dot(model.XCBV[inds, :ncbv + 1].T,
model.XCBV[inds, :ncbv + 1])
B = np.dot(model.XCBV[inds, :ncbv + 1].T, flux[inds])
weights[b] = np.linalg.solve(A, B)
m[b] = np.dot(model.XCBV[inds, :ncbv + 1], weights[b])
# Vertical alignment
if b == 0:
m[b] -= np.nanmedian(m[b])
else:
# Match the first finite model point on either side of the
# break
# We could consider something more elaborate in the future
i0 = -1 - np.argmax([np.isfinite(m[b - 1][-i])
for i in range(1, len(m[b - 1]) - 1)])
i1 = np.argmax([np.isfinite(m[b][i])
for i in range(len(m[b]))])
m[b] += (m[b - 1][i0] - m[b][i1])
# Join model and normalize
m = np.concatenate(m)
m -= np.nanmedian(m)
# Finally, interpolate back to short cadence
m = np.interp(model.time, time, m)
return m |
Generate the CSV file used in the search database for the documentation.
def StatsToCSV(campaign, model='nPLD'):
'''
Generate the CSV file used in the search database for the documentation.
'''
statsfile = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d_%s.cdpp' % (campaign, model))
csvfile = os.path.join(os.path.dirname(EVEREST_SRC), 'docs',
'c%02d.csv' % campaign)
epic, kp, cdpp6r, cdpp6, _, _, _, _, saturated = \
np.loadtxt(statsfile, unpack=True, skiprows=2)
with open(csvfile, 'w') as f:
print('c%02d' % campaign, file=f)
for i in range(len(epic)):
print('%09d,%.3f,%.3f,%.3f,%d' % (epic[i], kp[i],
cdpp6r[i], cdpp6[i],
int(saturated[i])),
file=f) |
Remove ``item`` for the :class:`zset` it it exists.
If found it returns the score of the item removed.
def remove(self, item):
'''Remove ``item`` for the :class:`zset` it it exists.
If found it returns the score of the item removed.'''
score = self._dict.pop(item, None)
if score is not None:
self._sl.remove(score)
return score |
Return a tuple of ordered fields for this :class:`ColumnTS`.
def fields(self):
'''Return a tuple of ordered fields for this :class:`ColumnTS`.'''
key = self.id + ':fields'
encoding = self.client.encoding
return tuple(sorted((f.decode(encoding)
for f in self.client.smembers(key)))) |
Handle any pending relations to the sending model.
Sent from class_prepared.
def do_pending_lookups(event, sender, **kwargs):
"""Handle any pending relations to the sending model.
Sent from class_prepared."""
key = (sender._meta.app_label, sender._meta.name)
for callback in pending_lookups.pop(key, []):
callback(sender) |
Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys.
def Many2ManyThroughModel(field):
'''Create a Many2Many through model with two foreign key fields and a
CompositeFieldId depending on the two foreign keys.'''
from stdnet.odm import ModelType, StdModel, ForeignKey, CompositeIdField
name_model = field.model._meta.name
name_relmodel = field.relmodel._meta.name
# The two models are the same.
if name_model == name_relmodel:
name_relmodel += '2'
through = field.through
# Create the through model
if through is None:
name = '{0}_{1}'.format(name_model, name_relmodel)
class Meta:
app_label = field.model._meta.app_label
through = ModelType(name, (StdModel,), {'Meta': Meta})
field.through = through
# The first field
field1 = ForeignKey(field.model,
related_name=field.name,
related_manager_class=makeMany2ManyRelatedManager(
field.relmodel,
name_model,
name_relmodel)
)
field1.register_with_model(name_model, through)
# The second field
field2 = ForeignKey(field.relmodel,
related_name=field.related_name,
related_manager_class=makeMany2ManyRelatedManager(
field.model,
name_relmodel,
name_model)
)
field2.register_with_model(name_relmodel, through)
pk = CompositeIdField(name_model, name_relmodel)
pk.register_with_model('id', through) |
formodel is the model which the manager .
def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel):
'''formodel is the model which the manager .'''
class _Many2ManyRelatedManager(Many2ManyRelatedManager):
pass
_Many2ManyRelatedManager.formodel = formodel
_Many2ManyRelatedManager.name_relmodel = name_relmodel
_Many2ManyRelatedManager.name_formodel = name_formodel
return _Many2ManyRelatedManager |
Override :meth:`Manager.session` so that this
:class:`RelatedManager` can retrieve the session from the
:attr:`related_instance` if available.
def session(self, session=None):
'''Override :meth:`Manager.session` so that this
:class:`RelatedManager` can retrieve the session from the
:attr:`related_instance` if available.
'''
if self.related_instance:
session = self.related_instance.session
# we have a session, we either create a new one return the same session
if session is None:
raise QuerySetError('Related manager can be accessed only from\
a loaded instance of its related model.')
return session |
Add ``value``, an instance of :attr:`formodel` to the
:attr:`through` model. This method can only be accessed by an instance of the
model for which this related manager is an attribute.
def add(self, value, session=None, **kwargs):
'''Add ``value``, an instance of :attr:`formodel` to the
:attr:`through` model. This method can only be accessed by an instance of the
model for which this related manager is an attribute.'''
s, instance = self.session_instance('add', value, session, **kwargs)
return s.add(instance) |
Remove *value*, an instance of ``self.model`` from the set of
elements contained by the field.
def remove(self, value, session=None):
'''Remove *value*, an instance of ``self.model`` from the set of
elements contained by the field.'''
s, instance = self.session_instance('remove', value, session)
# update state so that the instance does look persistent
instance.get_state(iid=instance.pkvalue(), action='update')
return s.delete(instance) |
Double metaphone word processor.
def metaphone_processor(words):
'''Double metaphone word processor.'''
for word in words:
for w in double_metaphone(word):
if w:
w = w.strip()
if w:
yield w |
Double metaphone word processor slightly modified so that when no
words are returned by the algorithm, the original word is returned.
def tolerant_metaphone_processor(words):
'''Double metaphone word processor slightly modified so that when no
words are returned by the algorithm, the original word is returned.'''
for word in words:
r = 0
for w in double_metaphone(word):
if w:
w = w.strip()
if w:
r += 1
yield w
if not r:
yield word |
Porter Stemmer word processor
def stemming_processor(words):
'''Porter Stemmer word processor'''
stem = PorterStemmer().stem
for word in words:
word = stem(word, 0, len(word)-1)
yield word |
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
def Pool(pool='AnyPool', **kwargs):
'''
Chooses between the different pools.
If ``pool == 'AnyPool'``, chooses based on availability.
'''
if pool == 'MPIPool':
return MPIPool(**kwargs)
elif pool == 'MultiPool':
return MultiPool(**kwargs)
elif pool == 'SerialPool':
return SerialPool(**kwargs)
elif pool == 'AnyPool':
if MPIPool.enabled():
return MPIPool(**kwargs)
elif MultiPool.enabled():
return MultiPool(**kwargs)
else:
return SerialPool(**kwargs)
else:
raise ValueError('Invalid pool ``%s``.' % pool) |
If this isn't the master process, wait for instructions.
def wait(self):
"""
If this isn't the master process, wait for instructions.
"""
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, type(task), status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, type(result), status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
# Kill the process?
if self.exit_on_end:
sys.exit() |
Like the built-in :py:func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
def map(self, function, tasks):
"""
Like the built-in :py:func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
"""
ntask = len(tasks)
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
return
if function is not self.function:
if self.debug:
print("Master replacing pool function with {0}."
.format(function))
self.function = function
F = _function_wrapper(function)
# Tell all the workers what function to use.
requests = []
for i in range(self.size):
r = self.comm.isend(F, dest=i + 1)
requests.append(r)
# Wait until all of the workers have responded. See:
# https://gist.github.com/4176241
MPI.Request.waitall(requests)
if (not self.loadbalance) or (ntask <= self.size):
# Do not perform load-balancing - the default load-balancing
# scheme emcee uses.
# Send all the tasks off and wait for them to be received.
# Again, see the bug in the above gist.
requests = []
for i, task in enumerate(tasks):
worker = i % self.size + 1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(type(task), worker, i))
r = self.comm.isend(task, dest=worker, tag=i)
requests.append(r)
MPI.Request.waitall(requests)
# Now wait for the answers.
results = []
for i in range(ntask):
worker = i % self.size + 1
if self.debug:
print("Master waiting for worker {0} with tag {1}"
.format(worker, i))
result = self.comm.recv(source=worker, tag=i)
results.append(result)
return results
else:
# Perform load-balancing. The order of the results are likely to
# be different from the previous case.
for i, task in enumerate(tasks[0:self.size]):
worker = i + 1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(type(task), worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched = self.size
results = [None] * ntask
for itask in range(ntask):
status = MPI.Status()
# Receive input from workers.
try:
result = self.comm.recv(source=MPI.ANY_SOURCE,
tag=MPI.ANY_TAG, status=status)
except Exception as e:
self.close()
raise e
worker = status.source
i = status.tag
results[i] = result
if self.debug:
print("Master received from worker {0} with tag {1}"
.format(worker, i))
# Now send the next task to this idle worker (if there are any
# left).
if ntasks_dispatched < ntask:
task = tasks[ntasks_dispatched]
i = ntasks_dispatched
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(type(task), worker, i))
# Send out the tasks asynchronously.
self.comm.isend(task, dest=worker, tag=i)
ntasks_dispatched += 1
return results |
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
def close(self):
"""
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
"""
if self.is_master():
for i in range(self.size):
self.comm.isend(_close_pool_message(), dest=i + 1) |
Decorator for committing changes when the instance session is
not in a transaction.
def commit_when_no_transaction(f):
'''Decorator for committing changes when the instance session is
not in a transaction.'''
def _(self, *args, **kwargs):
r = f(self, *args, **kwargs)
return self.session.add(self) if self.session is not None else r
_.__name__ = f.__name__
_.__doc__ = f.__doc__
return _ |
Returns the :class:`stdnet.BackendStructure`.
def backend(self):
'''Returns the :class:`stdnet.BackendStructure`.
'''
session = self.session
if session is not None:
if self._field:
return session.model(self._field.model).backend
else:
return session.model(self).backend |
Returns the :class:`stdnet.BackendStructure`.
def read_backend(self):
'''Returns the :class:`stdnet.BackendStructure`.
'''
session = self.session
if session is not None:
if self._field:
return session.model(self._field.model).read_backend
else:
return session.model(self).read_backend |
Number of elements in the :class:`Structure`.
def size(self):
'''Number of elements in the :class:`Structure`.'''
if self.cache.cache is None:
return self.read_backend_structure().size()
else:
return len(self.cache.cache) |
Load ``data`` from the :class:`stdnet.BackendDataServer`.
def load_data(self, data, callback=None):
'''Load ``data`` from the :class:`stdnet.BackendDataServer`.'''
return self.backend.execute(
self.value_pickler.load_iterable(data, self.session), callback) |
Iteratir over values of :class:`PairMixin`.
def values(self):
'''Iteratir over values of :class:`PairMixin`.'''
if self.cache.cache is None:
backend = self.read_backend
return backend.execute(backend.structure(self).values(),
self.load_values)
else:
return self.cache.cache.values() |
Add a *pair* to the structure.
def pair(self, pair):
'''Add a *pair* to the structure.'''
if len(pair) == 1:
# if only one value is passed, the value must implement a
# score function which retrieve the first value of the pair
# (score in zset, timevalue in timeseries, field value in
# hashtable)
return (pair[0].score(), pair[0])
elif len(pair) != 2:
raise TypeError('add expected 2 arguments, got {0}'
.format(len(pair)))
else:
return pair |
Remove *keys* from the key-value container.
def remove(self, *keys):
'''Remove *keys* from the key-value container.'''
dumps = self.pickler.dumps
self.cache.remove([dumps(v) for v in keys]) |
Count the number of elements bewteen *start* and *stop*.
def count(self, start, stop):
'''Count the number of elements bewteen *start* and *stop*.'''
s1 = self.pickler.dumps(start)
s2 = self.pickler.dumps(stop)
return self.backend_structure().count(s1, s2) |
Return the range by rank between start and end.
def irange(self, start=0, end=-1, callback=None, withscores=True,
**options):
'''Return the range by rank between start and end.'''
backend = self.read_backend
res = backend.structure(self).irange(start, end,
withscores=withscores,
**options)
if not callback:
callback = self.load_data if withscores else self.load_values
return backend.execute(res, callback) |
pop a range by score from the :class:`OrderedMixin`
def pop_range(self, start, stop, callback=None, withscores=True):
'''pop a range by score from the :class:`OrderedMixin`'''
s1 = self.pickler.dumps(start)
s2 = self.pickler.dumps(stop)
backend = self.backend
res = backend.structure(self).pop_range(s1, s2, withscores=withscores)
if not callback:
callback = self.load_data if withscores else self.load_values
return backend.execute(res, callback) |
pop a range from the :class:`OrderedMixin`
def ipop_range(self, start=0, stop=-1, callback=None, withscores=True):
'''pop a range from the :class:`OrderedMixin`'''
backend = self.backend
res = backend.structure(self).ipop_range(start, stop,
withscores=withscores)
if not callback:
callback = self.load_data if withscores else self.load_values
return backend.execute(res, callback) |
Appends a copy of *value* at the end of the :class:`Sequence`.
def push_back(self, value):
'''Appends a copy of *value* at the end of the :class:`Sequence`.'''
self.cache.push_back(self.value_pickler.dumps(value))
return self |
Remove the last element from the :class:`Sequence`.
def pop_back(self):
'''Remove the last element from the :class:`Sequence`.'''
backend = self.backend
return backend.execute(backend.structure(self).pop_back(),
self.value_pickler.loads) |
Add *value* to the set
def add(self, value):
'''Add *value* to the set'''
return self.cache.update((self.value_pickler.dumps(value),)) |
Add iterable *values* to the set
def update(self, values):
'''Add iterable *values* to the set'''
d = self.value_pickler.dumps
return self.cache.update(tuple((d(v) for v in values))) |
Remove an element *value* from a set if it is a member.
def discard(self, value):
'''Remove an element *value* from a set if it is a member.'''
return self.cache.remove((self.value_pickler.dumps(value),)) |
Remove an iterable of *values* from the set.
def difference_update(self, values):
'''Remove an iterable of *values* from the set.'''
d = self.value_pickler.dumps
return self.cache.remove(tuple((d(v) for v in values))) |
Remove the first element from of the list.
def pop_front(self):
'''Remove the first element from of the list.'''
backend = self.backend
return backend.execute(backend.structure(self).pop_front(),
self.value_pickler.loads) |
Remove the last element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.
def block_pop_back(self, timeout=10):
'''Remove the last element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.'''
value = yield self.backend_structure().block_pop_back(timeout)
if value is not None:
yield self.value_pickler.loads(value) |
Remove the first element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.
def block_pop_front(self, timeout=10):
'''Remove the first element from of the list. If no elements are
available, blocks for at least ``timeout`` seconds.'''
value = yield self.backend_structure().block_pop_front(timeout)
if value is not None:
yield self.value_pickler.loads(value) |
Appends a copy of ``value`` to the beginning of the list.
def push_front(self, value):
'''Appends a copy of ``value`` to the beginning of the list.'''
self.cache.push_front(self.value_pickler.dumps(value)) |
The rank of a given *value*. This is the position of *value*
in the :class:`OrderedMixin` container.
def rank(self, value):
'''The rank of a given *value*. This is the position of *value*
in the :class:`OrderedMixin` container.'''
value = self.value_pickler.dumps(value)
return self.backend_structure().rank(value) |
The rank of a given *dte* in the timeseries
def rank(self, dte):
'''The rank of a given *dte* in the timeseries'''
timestamp = self.pickler.dumps(dte)
return self.backend_structure().rank(timestamp) |
Pop a value at *index* from the :class:`TS`. Return ``None`` if
index is not out of bound.
def ipop(self, index):
'''Pop a value at *index* from the :class:`TS`. Return ``None`` if
index is not out of bound.'''
backend = self.backend
res = backend.structure(self).ipop(index)
return backend.execute(res,
lambda r: self._load_get_data(r, index, None)) |
The times between times *start* and *stop*.
def times(self, start, stop, callback=None, **kwargs):
'''The times between times *start* and *stop*.'''
s1 = self.pickler.dumps(start)
s2 = self.pickler.dumps(stop)
backend = self.read_backend
res = backend.structure(self).times(s1, s2, **kwargs)
return backend.execute(res, callback or self.load_keys) |
The times between rank *start* and *stop*.
def itimes(self, start=0, stop=-1, callback=None, **kwargs):
'''The times between rank *start* and *stop*.'''
backend = self.read_backend
res = backend.structure(self).itimes(start, stop, **kwargs)
return backend.execute(res, callback or self.load_keys) |
A :class:`Q` performs a series of operations and ultimately
generate of set of matched elements ``ids``. If on the other hand, a
different field is required, it can be specified with the :meth:`get_field`
method. For example, lets say a model has a field called ``object_id``
which contains ids of another model, we could use::
qs = session.query(MyModel).get_field('object_id')
to obtain a set containing the values of matched elements ``object_id``
fields.
:parameter field: the name of the field which will be used to obtained the
matched elements value. Must be an index.
:rtype: a new :class:`Q` instance.
def get_field(self, field):
'''A :class:`Q` performs a series of operations and ultimately
generate of set of matched elements ``ids``. If on the other hand, a
different field is required, it can be specified with the :meth:`get_field`
method. For example, lets say a model has a field called ``object_id``
which contains ids of another model, we could use::
qs = session.query(MyModel).get_field('object_id')
to obtain a set containing the values of matched elements ``object_id``
fields.
:parameter field: the name of the field which will be used to obtained the
matched elements value. Must be an index.
:rtype: a new :class:`Q` instance.
'''
if field != self._get_field:
if field not in self._meta.dfields:
raise QuerySetError('Model "{0}" has no field "{1}".'
.format(self._meta, field))
q = self._clone()
q.data['get_field'] = field
return q
else:
return self |
Create a new :class:`Query` with additional clauses corresponding to
``where`` or ``limit`` in a ``SQL SELECT`` statement.
:parameter kwargs: dictionary of limiting clauses.
:rtype: a new :class:`Query` instance.
For example::
qs = session.query(MyModel)
result = qs.filter(group = 'planet')
def filter(self, **kwargs):
'''Create a new :class:`Query` with additional clauses corresponding to
``where`` or ``limit`` in a ``SQL SELECT`` statement.
:parameter kwargs: dictionary of limiting clauses.
:rtype: a new :class:`Query` instance.
For example::
qs = session.query(MyModel)
result = qs.filter(group = 'planet')
'''
if kwargs:
q = self._clone()
if self.fargs:
kwargs = update_dictionary(self.fargs.copy(), kwargs)
q.fargs = kwargs
return q
else:
return self |
Returns a new :class:`Query` with additional clauses corresponding
to ``EXCEPT`` in a ``SQL SELECT`` statement.
:parameter kwargs: dictionary of limiting clauses.
:rtype: a new :class:`Query` instance.
Using an equivalent example to the :meth:`filter` method::
qs = session.query(MyModel)
result1 = qs.exclude(group = 'planet')
result2 = qs.exclude(group__in = ('planet','stars'))
def exclude(self, **kwargs):
'''Returns a new :class:`Query` with additional clauses corresponding
to ``EXCEPT`` in a ``SQL SELECT`` statement.
:parameter kwargs: dictionary of limiting clauses.
:rtype: a new :class:`Query` instance.
Using an equivalent example to the :meth:`filter` method::
qs = session.query(MyModel)
result1 = qs.exclude(group = 'planet')
result2 = qs.exclude(group__in = ('planet','stars'))
'''
if kwargs:
q = self._clone()
if self.eargs:
kwargs = update_dictionary(self.eargs.copy(), kwargs)
q.eargs = kwargs
return q
else:
return self |
Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
def union(self, *queries):
'''Return a new :class:`Query` obtained form the union of this
:class:`Query` with one or more *queries*.
For example, lets say we want to have the union
of two queries obtained from the :meth:`filter` method::
query = session.query(MyModel)
qs = query.filter(field1 = 'bla').union(query.filter(field2 = 'foo'))
'''
q = self._clone()
q.unions += queries
return q |
Return a new :class:`Query` obtained form the intersection of this
:class:`Query` with one or more *queries*. Workds the same way as
the :meth:`union` method.
def intersect(self, *queries):
'''Return a new :class:`Query` obtained form the intersection of this
:class:`Query` with one or more *queries*. Workds the same way as
the :meth:`union` method.'''
q = self._clone()
q.intersections += queries
return q |
Sort the query by the given field
:parameter ordering: a string indicating the class:`Field` name to sort by.
If prefixed with ``-``, the sorting will be in descending order, otherwise
in ascending order.
:return type: a new :class:`Query` instance.
def sort_by(self, ordering):
'''Sort the query by the given field
:parameter ordering: a string indicating the class:`Field` name to sort by.
If prefixed with ``-``, the sorting will be in descending order, otherwise
in ascending order.
:return type: a new :class:`Query` instance.
'''
if ordering:
ordering = self._meta.get_sorting(ordering, QuerySetError)
q = self._clone()
q.data['ordering'] = ordering
return q |
Search *text* in model. A search engine needs to be installed
for this function to be available.
:parameter text: a string to search.
:return type: a new :class:`Query` instance.
def search(self, text, lookup=None):
'''Search *text* in model. A search engine needs to be installed
for this function to be available.
:parameter text: a string to search.
:return type: a new :class:`Query` instance.
'''
q = self._clone()
q.text = (text, lookup)
return q |
For :ref:`backend <db-index>` supporting scripting, it is possible
to construct complex queries which execute the scripting *code* against
each element in the query. The *coe* should reference an instance of
:attr:`model` by ``this`` keyword.
:parameter code: a valid expression in the scripting language of the database.
:parameter load_only: Load only the selected fields when performing the query
(this is different from the :meth:`load_only` method which is used when
fetching data from the database). This field is an optimization which is
used by the :ref:`redis backend <redis-server>` only and can be safely
ignored in most use-cases.
:return: a new :class:`Query`
def where(self, code, load_only=None):
'''For :ref:`backend <db-index>` supporting scripting, it is possible
to construct complex queries which execute the scripting *code* against
each element in the query. The *coe* should reference an instance of
:attr:`model` by ``this`` keyword.
:parameter code: a valid expression in the scripting language of the database.
:parameter load_only: Load only the selected fields when performing the query
(this is different from the :meth:`load_only` method which is used when
fetching data from the database). This field is an optimization which is
used by the :ref:`redis backend <redis-server>` only and can be safely
ignored in most use-cases.
:return: a new :class:`Query`
'''
if code:
q = self._clone()
q.data['where'] = (code, load_only)
return q
else:
return self |
Return a new :class:`QueryElem` for *q* applying a text search.
def search_queries(self, q):
'''Return a new :class:`QueryElem` for *q* applying a text search.'''
if self.text:
searchengine = self.session.router.search_engine
if searchengine:
return searchengine.search_model(q, *self.text)
else:
raise QuerySetError('Search not available for %s' % self._meta)
else:
return q |
It returns a new :class:`Query` that automatically
follows the foreign-key relationship ``related``.
:parameter related: A field name corresponding to a :class:`ForeignKey`
in :attr:`Query.model`.
:parameter related_fields: optional :class:`Field` names for the ``related``
model to load. If not provided, all fields will be loaded.
This function is :ref:`performance boost <performance-loadrelated>` when
accessing the related fields of all (most) objects in your query.
If Your model contains more than one foreign key, you can use this function
in a generative way::
qs = myquery.load_related('rel1').load_related('rel2','field1','field2')
:rtype: a new :class:`Query`.
def load_related(self, related, *related_fields):
'''It returns a new :class:`Query` that automatically
follows the foreign-key relationship ``related``.
:parameter related: A field name corresponding to a :class:`ForeignKey`
in :attr:`Query.model`.
:parameter related_fields: optional :class:`Field` names for the ``related``
model to load. If not provided, all fields will be loaded.
This function is :ref:`performance boost <performance-loadrelated>` when
accessing the related fields of all (most) objects in your query.
If Your model contains more than one foreign key, you can use this function
in a generative way::
qs = myquery.load_related('rel1').load_related('rel2','field1','field2')
:rtype: a new :class:`Query`.'''
field = self._get_related_field(related)
if not field:
raise FieldError('"%s" is not a related field for "%s"' %
(related, self._meta))
q = self._clone()
return q._add_to_load_related(field, *related_fields) |
This is provides a :ref:`performance boost <increase-performance>`
in cases when you need to load a subset of fields of your model. The boost
achieved is less than the one obtained when using
:meth:`Query.load_related`, since it does not reduce the number of requests
to the database. However, it can save you lots of bandwidth when excluding
data intensive fields you don't need.
def load_only(self, *fields):
'''This is provides a :ref:`performance boost <increase-performance>`
in cases when you need to load a subset of fields of your model. The boost
achieved is less than the one obtained when using
:meth:`Query.load_related`, since it does not reduce the number of requests
to the database. However, it can save you lots of bandwidth when excluding
data intensive fields you don't need.
'''
q = self._clone()
new_fields = []
for field in fields:
if JSPLITTER in field:
bits = field.split(JSPLITTER)
related = self._get_related_field(bits[0])
if related:
q._add_to_load_related(related, JSPLITTER.join(bits[1:]))
continue
new_fields.append(field)
if fields and not new_fields:
# if we added a field to the load_related list and not fields are
# are left we add the primary key so that other firls are not
# loaded.
new_fields.append(self._meta.pkname())
fs = unique_tuple(q.fields, new_fields)
q.data['fields'] = fs if fs else None
return q |
Works like :meth:`load_only` to provides a
:ref:`performance boost <increase-performance>` in cases when you need
to load all fields except a subset specified by *fields*.
def dont_load(self, *fields):
'''Works like :meth:`load_only` to provides a
:ref:`performance boost <increase-performance>` in cases when you need
to load all fields except a subset specified by *fields*.
'''
q = self._clone()
fs = unique_tuple(q.exclude_fields, fields)
q.exclude_fields = fs if fs else None
return q |
Return an instance of a model matching the query. A special case is
the query on ``id`` which provides a direct access to the :attr:`session`
instances. If the given primary key is present in the session, the object
is returned directly without performing any query.
def get(self, **kwargs):
'''Return an instance of a model matching the query. A special case is
the query on ``id`` which provides a direct access to the :attr:`session`
instances. If the given primary key is present in the session, the object
is returned directly without performing any query.'''
return self.filter(**kwargs).items(
callback=self.model.get_unique_instance) |
Build the :class:`QueryElement` representing this query.
def construct(self):
'''Build the :class:`QueryElement` representing this query.'''
if self.__construct is None:
self.__construct = self._construct()
return self.__construct |
Build and return the :class:`stdnet.utils.async.BackendQuery`.
This is a lazy method in the sense that it is evaluated once only and its
result stored for future retrieval.
def backend_query(self, **kwargs):
'''Build and return the :class:`stdnet.utils.async.BackendQuery`.
This is a lazy method in the sense that it is evaluated once only and its
result stored for future retrieval.'''
q = self.construct()
return q if isinstance(q, EmptyQuery) else q.backend_query(**kwargs) |
Aggregate lookup parameters.
def aggregate(self, kwargs):
'''Aggregate lookup parameters.'''
meta = self._meta
fields = meta.dfields
field_lookups = {}
for name, value in iteritems(kwargs):
bits = name.split(JSPLITTER)
field_name = bits.pop(0)
if field_name not in fields:
raise QuerySetError('Could not filter on model "{0}".\
Field "{1}" does not exist.'.format(meta, field_name))
field = fields[field_name]
attname = field.attname
lookup = None
if bits:
bits = [n.lower() for n in bits]
if bits[-1] == 'in':
bits.pop()
elif bits[-1] in range_lookups:
lookup = bits.pop()
remaining = JSPLITTER.join(bits)
if lookup: # this is a range lookup
attname, nested = field.get_lookup(remaining,
QuerySetError)
lookups = get_lookups(attname, field_lookups)
lookups.append(lookup_value(lookup, (value, nested)))
continue
elif remaining: # Not a range lookup, must be a nested filter
value = field.filter(self.session, remaining, value)
lookups = get_lookups(attname, field_lookups)
# If we are here the field must be an index
if not field.index:
raise QuerySetError("%s %s is not an index. Cannot query." %
(field.__class__.__name__, field_name))
if not iterable(value):
value = (value,)
for v in value:
if isinstance(v, Q):
v = lookup_value('set', v.construct())
else:
v = lookup_value('value', field.serialise(v, lookup))
lookups.append(v)
#
return [queryset(self, name=name, underlying=field_lookups[name])
for name in sorted(field_lookups)] |
Generator of all model in model.
def models_from_model(model, include_related=False, exclude=None):
'''Generator of all model in model.'''
if exclude is None:
exclude = set()
if model and model not in exclude:
exclude.add(model)
if isinstance(model, ModelType) and not model._meta.abstract:
yield model
if include_related:
exclude.add(model)
for field in model._meta.fields:
if hasattr(field, 'relmodel'):
through = getattr(field, 'through', None)
for rmodel in (field.relmodel, field.model, through):
for m in models_from_model(
rmodel, include_related=include_related,
exclude=exclude):
yield m
for manytomany in model._meta.manytomany:
related = getattr(model, manytomany)
for m in models_from_model(related.model,
include_related=include_related,
exclude=exclude):
yield m
elif not isinstance(model, ModelType) and isclass(model):
# This is a class which is not o ModelType
yield model |
A generator of :class:`StdModel` classes found in *application*.
:parameter application: A python dotted path or an iterable over python
dotted-paths where models are defined.
Only models defined in these paths are considered.
For example::
from stdnet.odm import model_iterator
APPS = ('stdnet.contrib.searchengine',
'stdnet.contrib.timeseries')
for model in model_iterator(APPS):
...
def model_iterator(application, include_related=True, exclude=None):
'''A generator of :class:`StdModel` classes found in *application*.
:parameter application: A python dotted path or an iterable over python
dotted-paths where models are defined.
Only models defined in these paths are considered.
For example::
from stdnet.odm import model_iterator
APPS = ('stdnet.contrib.searchengine',
'stdnet.contrib.timeseries')
for model in model_iterator(APPS):
...
'''
if exclude is None:
exclude = set()
application = native_str(application)
if ismodule(application) or isinstance(application, str):
if ismodule(application):
mod, application = application, application.__name__
else:
try:
mod = import_module(application)
except ImportError:
# the module is not there
mod = None
if mod:
label = application.split('.')[-1]
try:
mod_models = import_module('.models', application)
except ImportError:
mod_models = mod
label = getattr(mod_models, 'app_label', label)
models = set()
for name in dir(mod_models):
value = getattr(mod_models, name)
meta = getattr(value, '_meta', None)
if isinstance(value, ModelType) and meta:
for model in models_from_model(
value, include_related=include_related,
exclude=exclude):
if (model._meta.app_label == label
and model not in models):
models.add(model)
yield model
else:
for app in application:
for m in model_iterator(app):
yield m |
Set the search ``engine`` for this :class:`Router`.
def set_search_engine(self, engine):
'''Set the search ``engine`` for this :class:`Router`.'''
self._search_engine = engine
self._search_engine.set_router(self) |
Register a :class:`Model` with this :class:`Router`. If the
model was already registered it does nothing.
:param model: a :class:`Model` class.
:param backend: a :class:`stdnet.BackendDataServer` or a
:ref:`connection string <connection-string>`.
:param read_backend: Optional :class:`stdnet.BackendDataServer` for read
operations. This is useful when the server has a master/slave
configuration, where the master accept write and read operations
and the ``slave`` read only operations.
:param include_related: ``True`` if related models to ``model`` needs to be
registered. Default ``True``.
:param params: Additional parameters for the :func:`getdb` function.
:return: the number of models registered.
def register(self, model, backend=None, read_backend=None,
include_related=True, **params):
'''Register a :class:`Model` with this :class:`Router`. If the
model was already registered it does nothing.
:param model: a :class:`Model` class.
:param backend: a :class:`stdnet.BackendDataServer` or a
:ref:`connection string <connection-string>`.
:param read_backend: Optional :class:`stdnet.BackendDataServer` for read
operations. This is useful when the server has a master/slave
configuration, where the master accept write and read operations
and the ``slave`` read only operations.
:param include_related: ``True`` if related models to ``model`` needs to be
registered. Default ``True``.
:param params: Additional parameters for the :func:`getdb` function.
:return: the number of models registered.
'''
backend = backend or self._default_backend
backend = getdb(backend=backend, **params)
if read_backend:
read_backend = getdb(read_backend)
registered = 0
if isinstance(model, Structure):
self._structures[model] = StructureManager(model, backend,
read_backend, self)
return model
for model in models_from_model(model, include_related=include_related):
if model in self._registered_models:
continue
registered += 1
default_manager = backend.default_manager or Manager
manager_class = getattr(model, 'manager_class', default_manager)
manager = manager_class(model, backend, read_backend, self)
self._registered_models[model] = manager
if isinstance(model, ModelType):
attr_name = model._meta.name
else:
attr_name = model.__name__.lower()
if attr_name not in self._registered_names:
self._registered_names[attr_name] = manager
if self._install_global:
model.objects = manager
if registered:
return backend |
Retrieve a :class:`Model` from its universally unique identifier
``uuid``. If the ``uuid`` does not match any instance an exception will raise.
def from_uuid(self, uuid, session=None):
'''Retrieve a :class:`Model` from its universally unique identifier
``uuid``. If the ``uuid`` does not match any instance an exception will raise.
'''
elems = uuid.split('.')
if len(elems) == 2:
model = get_model_from_hash(elems[0])
if not model:
raise Model.DoesNotExist(
'model id "{0}" not available'.format(elems[0]))
if not session or session.router is not self:
session = self.session()
return session.query(model).get(id=elems[1])
raise Model.DoesNotExist('uuid "{0}" not recognized'.format(uuid)) |
Flush :attr:`registered_models`.
:param exclude: optional list of model names to exclude.
:param include: optional list of model names to include.
:param dryrun: Doesn't remove anything, simply collect managers
to flush.
:return:
def flush(self, exclude=None, include=None, dryrun=False):
'''Flush :attr:`registered_models`.
:param exclude: optional list of model names to exclude.
:param include: optional list of model names to include.
:param dryrun: Doesn't remove anything, simply collect managers
to flush.
:return:
'''
exclude = exclude or []
results = []
for manager in self._registered_models.values():
m = manager._meta
if include is not None and not (m.modelkey in include or
m.app_label in include):
continue
if not (m.modelkey in exclude or m.app_label in exclude):
if dryrun:
results.append(manager)
else:
results.append(manager.flush())
return results |
Unregister a ``model`` if provided, otherwise it unregister all
registered models. Return a list of unregistered model managers or ``None``
if no managers were removed.
def unregister(self, model=None):
'''Unregister a ``model`` if provided, otherwise it unregister all
registered models. Return a list of unregistered model managers or ``None``
if no managers were removed.'''
if model is not None:
try:
manager = self._registered_models.pop(model)
except KeyError:
return
if self._registered_names.get(manager._meta.name) == manager:
self._registered_names.pop(manager._meta.name)
return [manager]
else:
managers = list(self._registered_models.values())
self._registered_models.clear()
return managers |
A higher level registration functions for group of models located
on application modules.
It uses the :func:`model_iterator` function to iterate
through all :class:`Model` models available in ``applications``
and register them using the :func:`register` low level method.
:parameter applications: A String or a list of strings representing
python dotted paths where models are implemented.
:parameter models: Optional list of models to include. If not provided
all models found in *applications* will be included.
:parameter backends: optional dictionary which map a model or an
application to a backend :ref:`connection string <connection-string>`.
:rtype: A list of registered :class:`Model`.
For example::
mapper.register_application_models('mylib.myapp')
mapper.register_application_models(['mylib.myapp', 'another.path'])
mapper.register_application_models(pythonmodule)
mapper.register_application_models(['mylib.myapp',pythonmodule])
def register_applications(self, applications, models=None, backends=None):
'''A higher level registration functions for group of models located
on application modules.
It uses the :func:`model_iterator` function to iterate
through all :class:`Model` models available in ``applications``
and register them using the :func:`register` low level method.
:parameter applications: A String or a list of strings representing
python dotted paths where models are implemented.
:parameter models: Optional list of models to include. If not provided
all models found in *applications* will be included.
:parameter backends: optional dictionary which map a model or an
application to a backend :ref:`connection string <connection-string>`.
:rtype: A list of registered :class:`Model`.
For example::
mapper.register_application_models('mylib.myapp')
mapper.register_application_models(['mylib.myapp', 'another.path'])
mapper.register_application_models(pythonmodule)
mapper.register_application_models(['mylib.myapp',pythonmodule])
'''
return list(self._register_applications(applications, models,
backends)) |
Execute a script.
makes sure all required scripts are loaded.
def execute_script(self, name, keys, *args, **options):
'''Execute a script.
makes sure all required scripts are loaded.
'''
script = get_script(name)
if not script:
raise redis.RedisError('No such script "%s"' % name)
address = self.address()
if address not in all_loaded_scripts:
all_loaded_scripts[address] = set()
loaded = all_loaded_scripts[address]
toload = script.required_scripts.difference(loaded)
for name in toload:
s = get_script(name)
yield self.script_load(s.script)
loaded.update(toload)
yield script(self, keys, args, options) |
Register a :class:`StdModel` with this search :class:`SearchEngine`.
When registering a model, every time an instance is created, it will be
indexed by the search engine.
:param model: a :class:`StdModel` class.
:param related: a list of related fields to include in the index.
def register(self, model, related=None):
'''Register a :class:`StdModel` with this search :class:`SearchEngine`.
When registering a model, every time an instance is created, it will be
indexed by the search engine.
:param model: a :class:`StdModel` class.
:param related: a list of related fields to include in the index.
'''
update_model = UpdateSE(self, related)
self.REGISTERED_MODELS[model] = update_model
self.router.post_commit.bind(update_model, model)
self.router.post_delete.bind(update_model, model) |
Generator of indexable words in *text*.
This functions loop through the :attr:`word_middleware` attribute
to process the text.
:param text: string from which to extract words.
:param for_search: flag indicating if the the words will be used for search
or to index the database. This flug is used in conjunction with the
middleware flag *for_search*. If this flag is ``True`` (i.e. we need to
search the database for the words in *text*), only the
middleware functions in :attr:`word_middleware` enabled for searching are
used.
Default: ``False``.
return a *list* of cleaned words.
def words_from_text(self, text, for_search=False):
'''Generator of indexable words in *text*.
This functions loop through the :attr:`word_middleware` attribute
to process the text.
:param text: string from which to extract words.
:param for_search: flag indicating if the the words will be used for search
or to index the database. This flug is used in conjunction with the
middleware flag *for_search*. If this flag is ``True`` (i.e. we need to
search the database for the words in *text*), only the
middleware functions in :attr:`word_middleware` enabled for searching are
used.
Default: ``False``.
return a *list* of cleaned words.
'''
if not text:
return []
word_gen = self.split_text(text)
for middleware, fors in self.word_middleware:
if for_search and not fors:
continue
word_gen = middleware(word_gen)
if isgenerator(word_gen):
word_gen = list(word_gen)
return word_gen |
Add a *middleware* function to the list of :attr:`word_middleware`,
for preprocessing words to be indexed.
:param middleware: a callable receving an iterable over words.
:param for_search: flag indicating if the *middleware* can be used for the
text to search. Default: ``True``.
def add_word_middleware(self, middleware, for_search=True):
'''Add a *middleware* function to the list of :attr:`word_middleware`,
for preprocessing words to be indexed.
:param middleware: a callable receving an iterable over words.
:param for_search: flag indicating if the *middleware* can be used for the
text to search. Default: ``True``.
'''
if hasattr(middleware, '__call__'):
self.word_middleware.append((middleware, for_search)) |
Return a query for ``model`` when it needs to be indexed.
def query(self, model):
'''Return a query for ``model`` when it needs to be indexed.
'''
session = self.router.session()
fields = tuple((f.name for f in model._meta.scalarfields
if f.type == 'text'))
qs = session.query(model).load_only(*fields)
for related in self.get_related_fields(model):
qs = qs.load_related(related)
return qs |
Returns a PEP 386-compliant version number from *version*.
def get_version(version):
"Returns a PEP 386-compliant version number from *version*."
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version[2] == 0 else 3
main = '.'.join(map(str, version[:parts]))
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return main + sub |
Returns the value of the planet radius over the stellar radius
for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
def Get_RpRs(d, **kwargs):
'''
Returns the value of the planet radius over the stellar radius
for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
if ps is None:
raise Exception("Unable to import `pysyzygy`.")
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs=RpRs, **kwargs)([kwargs.get('t0', 0.)])
def DiffSq(r):
return 1.e10 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp=False) |
Returns the value of the stellar density for a given transit
duration :py:obj:`dur`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
def Get_rhos(dur, **kwargs):
'''
Returns the value of the stellar density for a given transit
duration :py:obj:`dur`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
if ps is None:
raise Exception("Unable to import `pysyzygy`.")
assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration."
def Dur(rhos, **kwargs):
t0 = kwargs.get('t0', 0.)
time = np.linspace(t0 - 0.5, t0 + 0.5, 1000)
try:
t = time[np.where(ps.Transit(rhos=rhos, **kwargs)(time) < 1)]
except:
return 0.
return t[-1] - t[0]
def DiffSq(rhos):
return (dur - Dur(rhos, **kwargs)) ** 2
return fmin(DiffSq, [0.2], disp=False) |
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_
transit model, but with the depth and the duration as primary
input variables.
:param numpy.ndarray time: The time array
:param float t0: The time of first transit in units of \
:py:obj:`BJD` - 2454833.
:param float dur: The transit duration in days. Don't go too crazy on \
this one -- very small or very large values will break the \
inverter. Default 0.1
:param float per: The orbital period in days. Default 3.56789
:param float depth: The fractional transit depth. Default 0.001
:param dict kwargs: Any additional keyword arguments, passed directly \
to :py:func:`pysyzygy.Transit`
:returns tmod: The transit model evaluated at the same times as the \
:py:obj:`time` array
def Transit(time, t0=0., dur=0.1, per=3.56789, depth=0.001, **kwargs):
'''
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_
transit model, but with the depth and the duration as primary
input variables.
:param numpy.ndarray time: The time array
:param float t0: The time of first transit in units of \
:py:obj:`BJD` - 2454833.
:param float dur: The transit duration in days. Don't go too crazy on \
this one -- very small or very large values will break the \
inverter. Default 0.1
:param float per: The orbital period in days. Default 3.56789
:param float depth: The fractional transit depth. Default 0.001
:param dict kwargs: Any additional keyword arguments, passed directly \
to :py:func:`pysyzygy.Transit`
:returns tmod: The transit model evaluated at the same times as the \
:py:obj:`time` array
'''
if ps is None:
raise Exception("Unable to import `pysyzygy`.")
# Note that rhos can affect RpRs, so we should really do this iteratively,
# but the effect is pretty negligible!
RpRs = Get_RpRs(depth, t0=t0, per=per, **kwargs)
rhos = Get_rhos(dur, t0=t0, per=per, **kwargs)
return ps.Transit(t0=t0, per=per, RpRs=RpRs, rhos=rhos, **kwargs)(time) |
Given a ``startdate`` and an ``enddate`` dates, evaluate the
date intervals from which data is not available. It return a list
of two-dimensional tuples containing start and end date for the
interval. The list could contain 0, 1 or 2 tuples.
def intervals(self, startdate, enddate, parseinterval=None):
'''Given a ``startdate`` and an ``enddate`` dates, evaluate the
date intervals from which data is not available. It return a list
of two-dimensional tuples containing start and end date for the
interval. The list could contain 0, 1 or 2 tuples.'''
return missing_intervals(startdate, enddate, self.data_start,
self.data_end, dateconverter=self.todate,
parseinterval=parseinterval) |
Return the front pair of the structure
def front(self, *fields):
'''Return the front pair of the structure'''
v, f = tuple(self.irange(0, 0, fields=fields))
if v:
return (v[0], dict(((field, f[field][0]) for field in f))) |
Perform a multivariate statistic calculation of this
:class:`ColumnTS` from *start* to *end*.
:param start: Optional index (rank) where to start the analysis.
:param end: Optional index (rank) where to end the analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
def istats(self, start=0, end=-1, fields=None):
'''Perform a multivariate statistic calculation of this
:class:`ColumnTS` from *start* to *end*.
:param start: Optional index (rank) where to start the analysis.
:param end: Optional index (rank) where to end the analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
'''
backend = self.read_backend
return backend.execute(
backend.structure(self).istats(start, end, fields), self._stats) |
Perform a multivariate statistic calculation of this
:class:`ColumnTS` from a *start* date/datetime to an
*end* date/datetime.
:param start: Start date for analysis.
:param end: End date for analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
def stats(self, start, end, fields=None):
'''Perform a multivariate statistic calculation of this
:class:`ColumnTS` from a *start* date/datetime to an
*end* date/datetime.
:param start: Start date for analysis.
:param end: End date for analysis.
:param fields: Optional subset of :meth:`fields` to perform analysis on.
If not provided all fields are included in the analysis.
'''
start = self.pickler.dumps(start)
end = self.pickler.dumps(end)
backend = self.read_backend
return backend.execute(
backend.structure(self).stats(start, end, fields), self._stats) |
Perform cross multivariate statistics calculation of
this :class:`ColumnTS` and other optional *series* from *start*
to *end*.
:parameter start: the start rank.
:parameter start: the end rank
:parameter field: name of field to perform multivariate statistics.
:parameter series: a list of two elements tuple containing the id of the
a :class:`columnTS` and a field name.
:parameter stats: list of statistics to evaluate.
Default: ['covariance']
def imulti_stats(self, start=0, end=-1, series=None, fields=None,
stats=None):
'''Perform cross multivariate statistics calculation of
this :class:`ColumnTS` and other optional *series* from *start*
to *end*.
:parameter start: the start rank.
:parameter start: the end rank
:parameter field: name of field to perform multivariate statistics.
:parameter series: a list of two elements tuple containing the id of the
a :class:`columnTS` and a field name.
:parameter stats: list of statistics to evaluate.
Default: ['covariance']
'''
stats = stats or self.default_multi_stats
backend = self.read_backend
return backend.execute(
backend.structure(self).imulti_stats(start, end, fields, series,
stats), self._stats) |
Merge this :class:`ColumnTS` with several other *series*.
:parameters series: a list of tuples where the nth element is a tuple
of the form::
(wight_n, ts_n1, ts_n2, ..., ts_nMn)
The result will be calculated using the formula::
ts = weight_1*ts_11*ts_12*...*ts_1M1 + weight_2*ts_21*ts_22*...*ts_2M2 +
...
def merge(self, *series, **kwargs):
'''Merge this :class:`ColumnTS` with several other *series*.
:parameters series: a list of tuples where the nth element is a tuple
of the form::
(wight_n, ts_n1, ts_n2, ..., ts_nMn)
The result will be calculated using the formula::
ts = weight_1*ts_11*ts_12*...*ts_1M1 + weight_2*ts_21*ts_22*...*ts_2M2 +
...
'''
session = self.session
if not session:
raise SessionNotAvailable('No session available')
self.check_router(session.router, *series)
return self._merge(*series, **kwargs) |
Merge ``series`` and return the results without storing data
in the backend server.
def merged_series(cls, *series, **kwargs):
'''Merge ``series`` and return the results without storing data
in the backend server.'''
router, backend = cls.check_router(None, *series)
if backend:
target = router.register(cls(), backend)
router.session().add(target)
target._merge(*series, **kwargs)
backend = target.backend
return backend.execute(
backend.structure(target).irange_and_delete(),
target.load_data) |
Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.
def rank(self, score):
'''Return the 0-based index (rank) of ``score``. If the score is not
available it returns a negative integer which absolute score is the
left most closest index with score less than *score*.'''
node = self.__head
rank = 0
for i in range(self.__level-1, -1, -1):
while node.next[i] and node.next[i].score <= score:
rank += node.width[i]
node = node.next[i]
if node.score == score:
return rank - 1
else:
return -1 - rank |
Create a new instance of :attr:`model` from a *state* tuple.
def make_object(self, state=None, backend=None):
'''Create a new instance of :attr:`model` from a *state* tuple.'''
model = self.model
obj = model.__new__(model)
self.load_state(obj, state, backend)
return obj |
Perform validation for *instance* and stores serialized data,
indexes and errors into local cache.
Return ``True`` if the instance is ready to be saved to database.
def is_valid(self, instance):
'''Perform validation for *instance* and stores serialized data,
indexes and errors into local cache.
Return ``True`` if the instance is ready to be saved to database.'''
dbdata = instance.dbdata
data = dbdata['cleaned_data'] = {}
errors = dbdata['errors'] = {}
#Loop over scalar fields first
for field, value in instance.fieldvalue_pairs():
name = field.attname
try:
svalue = field.set_get_value(instance, value)
except Exception as e:
errors[name] = str(e)
else:
if (svalue is None or svalue is '') and field.required:
errors[name] = ("Field '{0}' is required for '{1}'."
.format(name, self))
else:
if isinstance(svalue, dict):
data.update(svalue)
elif svalue is not None:
data[name] = svalue
return len(errors) == 0 |
Return a two elements tuple containing a list
of fields names and a list of field attribute names.
def backend_fields(self, fields):
'''Return a two elements tuple containing a list
of fields names and a list of field attribute names.'''
dfields = self.dfields
processed = set()
names = []
atts = []
pkname = self.pkname()
for name in fields:
if name == pkname or name in processed:
continue
elif name in dfields:
processed.add(name)
field = dfields[name]
names.append(field.name)
atts.append(field.attname)
else:
bname = name.split(JSPLITTER)[0]
if bname in dfields:
field = dfields[bname]
if field.type in ('json object', 'related object'):
processed.add(name)
names.append(name)
atts.append(name)
return names, atts |
Model metadata in a dictionary
def as_dict(self):
'''Model metadata in a dictionary'''
pk = self.pk
id_type = 3
if pk.type == 'auto':
id_type = 1
return {'id_name': pk.name,
'id_type': id_type,
'sorted': bool(self.ordering),
'autoincr': self.ordering and self.ordering.auto,
'multi_fields': [field.name for field in self.multifields],
'indices': dict(((idx.attname, idx.unique)
for idx in self.indices))} |
Return the current :class:`ModelState` for this :class:`Model`.
If ``kwargs`` parameters are passed a new :class:`ModelState` is created,
otherwise it returns the cached value.
def get_state(self, **kwargs):
'''Return the current :class:`ModelState` for this :class:`Model`.
If ``kwargs`` parameters are passed a new :class:`ModelState` is created,
otherwise it returns the cached value.'''
dbdata = self.dbdata
if 'state' not in dbdata or kwargs:
dbdata['state'] = ModelState(self, **kwargs)
return dbdata['state'] |
Universally unique identifier for an instance of a :class:`Model`.
def uuid(self):
'''Universally unique identifier for an instance of a :class:`Model`.
'''
pk = self.pkvalue()
if not pk:
raise self.DoesNotExist(
'Object not saved. Cannot obtain universally unique id')
return self.get_uuid(pk) |
The :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
def backend(self, client=None):
'''The :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
'''
session = self.session
if session:
return session.model(self).backend |
The read :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
def read_backend(self, client=None):
'''The read :class:`stdnet.BackendDatServer` for this instance.
It can be ``None``.
'''
session = self.session
if session:
return session.model(self).read_backend |
Create a :class:`Model` class for objects requiring
and interface similar to :class:`StdModel`. We refers to this type
of models as :ref:`local models <local-models>` since instances of such
models are not persistent on a :class:`stdnet.BackendDataServer`.
:param name: Name of the model class.
:param attributes: positiona attribute names. These are the only attribute
available to the model during the default constructor.
:param params: key-valued parameter to pass to the :class:`ModelMeta`
constructor.
:return: a local :class:`Model` class.
def create_model(name, *attributes, **params):
'''Create a :class:`Model` class for objects requiring
and interface similar to :class:`StdModel`. We refers to this type
of models as :ref:`local models <local-models>` since instances of such
models are not persistent on a :class:`stdnet.BackendDataServer`.
:param name: Name of the model class.
:param attributes: positiona attribute names. These are the only attribute
available to the model during the default constructor.
:param params: key-valued parameter to pass to the :class:`ModelMeta`
constructor.
:return: a local :class:`Model` class.
'''
params['register'] = False
params['attributes'] = attributes
kwargs = {'manager_class': params.pop('manager_class', Manager),
'Meta': params}
return ModelType(name, (StdModel,), kwargs) |
Generator of fields loaded from database
def loadedfields(self):
'''Generator of fields loaded from database'''
if self._loadedfields is None:
for field in self._meta.scalarfields:
yield field
else:
fields = self._meta.dfields
processed = set()
for name in self._loadedfields:
if name in processed:
continue
if name in fields:
processed.add(name)
yield fields[name]
else:
name = name.split(JSPLITTER)[0]
if name in fields and name not in processed:
field = fields[name]
if field.type == 'json object':
processed.add(name)
yield field |
Generator of fields,values pairs. Fields correspond to
the ones which have been loaded (usually all of them) or
not loaded but modified.
Check the :ref:`load_only <performance-loadonly>` query function for more
details.
If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` won't be included.
:rtype: a generator of two-elements tuples
def fieldvalue_pairs(self, exclude_cache=False):
'''Generator of fields,values pairs. Fields correspond to
the ones which have been loaded (usually all of them) or
not loaded but modified.
Check the :ref:`load_only <performance-loadonly>` query function for more
details.
If *exclude_cache* evaluates to ``True``, fields with :attr:`Field.as_cache`
attribute set to ``True`` won't be included.
:rtype: a generator of two-elements tuples'''
for field in self._meta.scalarfields:
if exclude_cache and field.as_cache:
continue
name = field.attname
if hasattr(self, name):
yield field, getattr(self, name) |
Set cache fields to ``None``. Check :attr:`Field.as_cache`
for information regarding fields which are considered cache.
def clear_cache_fields(self):
'''Set cache fields to ``None``. Check :attr:`Field.as_cache`
for information regarding fields which are considered cache.'''
for field in self._meta.scalarfields:
if field.as_cache:
setattr(self, field.name, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.