_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q262600 | diffusion | validation | def diffusion(diffusion_constant=0.2, exposure_time=0.05, samples=200):
"""
See `diffusion_correlated` for information related to units, etc
"""
radius = 5
psfsize = np.array([2.0, 1.0, 3.0])
# create a base image of one particle
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[s0.inner]
position = 0*s0.obj.pos[0]
for i in xrange(samples):
offset = np.sqrt(6*diffusion_constant*exposure_time)*np.random.randn(3)
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage += s0.get_model_image()[s0.inner]
position += s0.obj.pos[0]
finalimage /= float(samples)
position /= float(samples)
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
# measure the true inferred parameters
return s, finalimage, position | python | {
"resource": ""
} |
q262601 | feature_guess | validation | def feature_guess(st, rad, invert='guess', minmass=None, use_tp=False,
trim_edge=False, **kwargs):
"""
Makes a guess at particle positions using heuristic centroid methods.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
rad : Float
The feature size for featuring.
invert : {'guess', True, False}, optional
Whether to invert the image; set to True for there are dark
particles on a bright background, False for bright particles.
The default is to guess from the state's current particles.
minmass : Float or None, optional
The minimum mass/masscut of a particle. Default is ``None`` =
calculated internally.
use_tp : Bool, optional
Whether or not to use trackpy. Default is ``False``, since trackpy
cuts out particles at the edge.
trim_edge : Bool, optional
Whether to trim particles at the edge pixels of the image. Can be
useful for initial featuring but is bad for adding missing particles
as they are frequently at the edge. Default is ``False``.
Returns
-------
guess : [N,3] numpy.ndarray
The featured positions of the particles, sorted in order of decreasing
feature mass.
npart : Int
The number of added particles.
"""
# FIXME does not use the **kwargs, but needs b/c called with wrong kwargs
if invert == 'guess':
invert = guess_invert(st)
if invert:
im = 1 - st.residuals
else:
im = st.residuals
return _feature_guess(im, rad, minmass=minmass, use_tp=use_tp,
trim_edge=trim_edge) | python | {
"resource": ""
} |
q262602 | _feature_guess | validation | def _feature_guess(im, rad, minmass=None, use_tp=False, trim_edge=False):
"""Workhorse of feature_guess"""
if minmass is None:
# we use 1% of the feature size mass as a cutoff;
# it's easier to remove than to add
minmass = rad**3 * 4/3.*np.pi * 0.01
# 0.03 is a magic number; works well
if use_tp:
diameter = np.ceil(2*rad)
diameter += 1-(diameter % 2)
df = peri.trackpy.locate(im, int(diameter), minmass=minmass)
npart = np.array(df['mass']).size
guess = np.zeros([npart, 3])
guess[:, 0] = df['z']
guess[:, 1] = df['y']
guess[:, 2] = df['x']
mass = df['mass']
else:
guess, mass = initializers.local_max_featuring(
im, radius=rad, minmass=minmass, trim_edge=trim_edge)
npart = guess.shape[0]
# I want to return these sorted by mass:
inds = np.argsort(mass)[::-1] # biggest mass first
return guess[inds].copy(), npart | python | {
"resource": ""
} |
q262603 | check_add_particles | validation | def check_add_particles(st, guess, rad='calc', do_opt=True, im_change_frac=0.2,
min_derr='3sig', **kwargs):
"""
Checks whether to add particles at a given position by seeing if adding
the particle improves the fit of the state.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
guess : [N,3] list-like
The positions of particles to check to add.
rad : {Float, ``'calc'``}, optional.
The radius of the newly-added particles. Default is ``'calc'``,
which uses the states current radii's median.
do_opt : Bool, optional
Whether to optimize the particle position before checking if it
should be kept. Default is True (optimizes position).
im_change_frac : Float
How good the change in error needs to be relative to the change in
the difference image. Default is 0.2; i.e. if the error does not
decrease by 20% of the change in the difference image, do not add
the particle.
min_derr : Float or '3sig'
The minimal improvement in error to add a particle. Default
is ``'3sig' = 3*st.sigma``.
Returns
-------
accepts : Int
The number of added particles
new_poses : [N,3] list
List of the positions of the added particles. If ``do_opt==True``,
then these positions will differ from the input 'guess'.
"""
# FIXME does not use the **kwargs, but needs b/c called with wrong kwargs
if min_derr == '3sig':
min_derr = 3 * st.sigma
accepts = 0
new_poses = []
if rad == 'calc':
rad = guess_add_radii(st)
message = ('-'*30 + 'ADDING' + '-'*30 +
'\n Z\t Y\t X\t R\t|\t ERR0\t\t ERR1')
with log.noformat():
CLOG.info(message)
for a in range(guess.shape[0]):
p0 = guess[a]
absent_err = st.error
absent_d = st.residuals.copy()
ind = st.obj_add_particle(p0, rad)
if do_opt:
# the slowest part of this
opt.do_levmarq_particles(
st, ind, damping=1.0, max_iter=1, run_length=3,
eig_update=False, include_rad=False)
present_err = st.error
present_d = st.residuals.copy()
dont_kill = should_particle_exist(
absent_err, present_err, absent_d, present_d,
im_change_frac=im_change_frac, min_derr=min_derr)
if dont_kill:
accepts += 1
p = tuple(st.obj_get_positions()[ind].ravel())
r = tuple(st.obj_get_radii()[ind].ravel())
new_poses.append(p)
part_msg = '%2.2f\t%3.2f\t%3.2f\t%3.2f\t|\t%4.3f \t%4.3f' % (
p + r + (absent_err, st.error))
with log.noformat():
CLOG.info(part_msg)
else:
st.obj_remove_particle(ind)
if np.abs(absent_err - st.error) > 1e-4:
raise RuntimeError('updates not exact?')
return accepts, new_poses | python | {
"resource": ""
} |
q262604 | should_particle_exist | validation | def should_particle_exist(absent_err, present_err, absent_d, present_d,
im_change_frac=0.2, min_derr=0.1):
"""
Checks whether or not adding a particle should be present.
Parameters
----------
absent_err : Float
The state error without the particle.
present_err : Float
The state error with the particle.
absent_d : numpy.ndarray
The state residuals without the particle.
present_d : numpy.ndarray
The state residuals with the particle.
im_change_frac : Float, optional
How good the change in error needs to be relative to the change in
the residuals. Default is 0.2; i.e. return False if the error does
not decrease by 0.2 x the change in the residuals.
min_derr : Float, optional
The minimal improvement in error. Default is 0.1
Returns
-------
Bool
True if the errors is better with the particle present.
"""
delta_im = np.ravel(present_d - absent_d)
im_change = np.dot(delta_im, delta_im)
err_cutoff = max([im_change_frac * im_change, min_derr])
return (absent_err - present_err) >= err_cutoff | python | {
"resource": ""
} |
q262605 | add_missing_particles | validation | def add_missing_particles(st, rad='calc', tries=50, **kwargs):
"""
Attempts to add missing particles to the state.
Operates by:
(1) featuring the difference image using feature_guess,
(2) attempting to add the featured positions using check_add_particles.
Parameters
----------
st : :class:`peri.states.State`
The state to check adding particles to.
rad : Float or 'calc', optional
The radius of the newly-added particles and of the feature size for
featuring. Default is 'calc', which uses the median of the state's
current radii.
tries : Int, optional
How many particles to attempt to add. Only tries to add the first
``tries`` particles, in order of mass. Default is 50.
Other Parameters
----------------
invert : Bool, optional
Whether to invert the image. Default is ``True``, i.e. dark particles
minmass : Float or None, optionals
The minimum mass/masscut of a particle. Default is ``None``=calcualted
by ``feature_guess``.
use_tp : Bool, optional
Whether to use trackpy in feature_guess. Default is False, since
trackpy cuts out particles at the edge.
do_opt : Bool, optional
Whether to optimize the particle position before checking if it
should be kept. Default is True (optimizes position).
im_change_frac : Float, optional
How good the change in error needs to be relative to the change
in the difference image. Default is 0.2; i.e. if the error does
not decrease by 20% of the change in the difference image, do
not add the particle.
min_derr : Float or '3sig', optional
The minimal improvement in error to add a particle. Default
is ``'3sig' = 3*st.sigma``.
Returns
-------
accepts : Int
The number of added particles
new_poses : [N,3] list
List of the positions of the added particles. If ``do_opt==True``,
then these positions will differ from the input 'guess'.
"""
if rad == 'calc':
rad = guess_add_radii(st)
guess, npart = feature_guess(st, rad, **kwargs)
tries = np.min([tries, npart])
accepts, new_poses = check_add_particles(
st, guess[:tries], rad=rad, **kwargs)
return accepts, new_poses | python | {
"resource": ""
} |
q262606 | add_subtract | validation | def add_subtract(st, max_iter=7, max_npart='calc', max_mem=2e8,
always_check_remove=False, **kwargs):
"""
Automatically adds and subtracts missing & extra particles.
Operates by removing bad particles then adding missing particles on
repeat, until either no particles are added/removed or after `max_iter`
attempts.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
max_iter : Int, optional
The maximum number of add-subtract loops to use. Default is 7.
Terminates after either max_iter loops or when nothing has changed.
max_npart : Int or 'calc', optional
The maximum number of particles to add before optimizing the non-psf
globals. Default is ``'calc'``, which uses 5% of the initial number
of particles.
max_mem : Int, optional
The maximum memory to use for optimization after adding max_npart
particles. Default is 2e8.
always_check_remove : Bool, optional
Set to True to always check whether to remove particles. If ``False``,
only checks for removal while particles were removed on the previous
attempt. Default is False.
Other Parameters
----------------
invert : Bool, optional
``True`` if the particles are dark on a bright background, ``False``
if they are bright on a dark background. Default is ``True``.
min_rad : Float, optional
Particles with radius below ``min_rad`` are automatically deleted.
Default is ``'calc'`` = median rad - 25* radius std.
max_rad : Float, optional
Particles with radius above ``max_rad`` are automatically deleted.
Default is ``'calc'`` = median rad + 15* radius std, but you should
change this for your particle sizes.
min_edge_dist : Float, optional
Particles closer to the edge of the padded image than this are
automatically deleted. Default is 2.0.
check_rad_cutoff : 2-element float list.
Particles with ``radii < check_rad_cutoff[0]`` or ``> check...[1]``
are checked if they should be deleted (not automatic). Default is
``[3.5, 15]``.
check_outside_im : Bool, optional
Set to True to check whether to delete particles whose positions are
outside the un-padded image.
rad : Float, optional
The initial radius for added particles; added particles radii are
not fit until the end of ``add_subtract``. Default is ``'calc'``,
which uses the median radii of active particles.
tries : Int, optional
The number of particles to attempt to remove or add, per iteration.
Default is 50.
im_change_frac : Float, optional
How good the change in error needs to be relative to the change in
the difference image. Default is 0.2; i.e. if the error does not
decrease by 20% of the change in the difference image, do not add
the particle.
min_derr : Float, optional
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding.
minmass : Float, optional
The minimum mass for a particle to be identified as a feature,
as used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out particles
at the edge of the image. Default is ``False``.
Returns
-------
total_changed : Int
The total number of adds and subtracts done on the data. Not the
same as ``changed_inds.size`` since the same particle or particle
index can be added/subtracted multiple times.
added_positions : [N_added,3] numpy.ndarray
The positions of particles that have been added at any point in the
add-subtract cycle.
removed_positions : [N_added,3] numpy.ndarray
The positions of particles that have been removed at any point in
the add-subtract cycle.
Notes
------
Occasionally after the intial featuring a cluster of particles is
featured as 1 big particle. To fix these mistakes, it helps to set
max_rad to a physical value. This removes the big particle and allows
it to be re-featured by (several passes of) the adds.
The added/removed positions returned are whether or not the position
has been added or removed ever. It's possible that a position is
added, then removed during a later iteration.
"""
if max_npart == 'calc':
max_npart = 0.05 * st.obj_get_positions().shape[0]
total_changed = 0
_change_since_opt = 0
removed_poses = []
added_poses0 = []
added_poses = []
nr = 1 # Check removal on the first loop
for _ in range(max_iter):
if (nr != 0) or (always_check_remove):
nr, rposes = remove_bad_particles(st, **kwargs)
na, aposes = add_missing_particles(st, **kwargs)
current_changed = na + nr
removed_poses.extend(rposes)
added_poses0.extend(aposes)
total_changed += current_changed
_change_since_opt += current_changed
if current_changed == 0:
break
elif _change_since_opt > max_npart:
_change_since_opt *= 0
CLOG.info('Start add_subtract optimization.')
opt.do_levmarq(st, opt.name_globals(st, remove_params=st.get(
'psf').params), max_iter=1, run_length=4, num_eig_dirs=3,
max_mem=max_mem, eig_update_frequency=2, rz_order=0,
use_accel=True)
CLOG.info('After optimization:\t{:.6}'.format(st.error))
# Optimize the added particles' radii:
for p in added_poses0:
i = st.obj_closest_particle(p)
opt.do_levmarq_particles(st, np.array([i]), max_iter=2, damping=0.3)
added_poses.append(st.obj_get_positions()[i])
return total_changed, np.array(removed_poses), np.array(added_poses) | python | {
"resource": ""
} |
q262607 | add_subtract_misfeatured_tile | validation | def add_subtract_misfeatured_tile(
st, tile, rad='calc', max_iter=3, invert='guess', max_allowed_remove=20,
minmass=None, use_tp=False, **kwargs):
"""
Automatically adds and subtracts missing & extra particles in a region
of poor fit.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
tile : :class:`peri.util.Tile`
The poorly-fit region to examine.
rad : Float or 'calc', optional
The initial radius for added particles; added particles radii are
not fit until the end of add_subtract. Default is ``'calc'``, which
uses the median radii of active particles.
max_iter : Int, optional
The maximum number of loops for attempted adds at one tile location.
Default is 3.
invert : {'guess', True, False}, optional
Whether to invert the image for feature_guess -- True for dark
particles on a bright background, False for bright particles. The
default is to guess from the state's current particles.
max_allowed_remove : Int, optional
The maximum number of particles to remove. If the misfeatured tile
contains more than this many particles, raises an error. If it
contains more than half as many particles, logs a warning. If more
than this many particles are added, they are optimized in blocks of
``max_allowed_remove``. Default is 20.
Other Parameters
----------------
im_change_frac : Float on [0, 1], optional.
If adding or removing a particle decreases the error less than
``im_change_frac``*the change in the image, the particle is deleted.
Default is 0.2.
min_derr : {Float, ``'3sig'``}, optional
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding
them. Default is True.
minmass : Float, optional
The minimum mass for a particle to be identified as a feature, as
used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out particles
at the edge of the image. Default is False.
Outputs
-------
n_added : Int
The change in the number of particles, i.e. ``n_added-n_subtracted``
ainds: List of ints
The indices of the added particles.
Notes
--------
The added/removed positions returned are whether or not the
position has been added or removed ever. It's possible/probably that
a position is added, then removed during a later iteration.
Algorithm is:
1. Remove all particles within the tile.
2. Feature and add particles to the tile.
3. Optimize the added particles positions only.
4. Run 2-3 until no particles have been added.
5. Optimize added particle radii
Because all the particles are removed within a tile, it is important
to set max_allowed_remove to a reasonable value. Otherwise, if the
tile is the size of the image it can take a long time to remove all
the particles and re-add them.
"""
if rad == 'calc':
rad = guess_add_radii(st)
if invert == 'guess':
invert = guess_invert(st)
# 1. Remove all possibly bad particles within the tile.
initial_error = np.copy(st.error)
rinds = np.nonzero(tile.contains(st.obj_get_positions()))[0]
if rinds.size >= max_allowed_remove:
CLOG.fatal('Misfeatured region too large!')
raise RuntimeError
elif rinds.size >= max_allowed_remove/2:
CLOG.warn('Large misfeatured regions.')
elif rinds.size > 0:
rpos, rrad = st.obj_remove_particle(rinds)
# 2-4. Feature & add particles to the tile, optimize, run until none added
n_added = -rinds.size
added_poses = []
for _ in range(max_iter):
if invert:
im = 1 - st.residuals[tile.slicer]
else:
im = st.residuals[tile.slicer]
guess, _ = _feature_guess(im, rad, minmass=minmass, use_tp=use_tp)
accepts, poses = check_add_particles(
st, guess+tile.l, rad=rad, do_opt=True, **kwargs)
added_poses.extend(poses)
n_added += accepts
if accepts == 0:
break
else: # for-break-else
CLOG.warn('Runaway adds or insufficient max_iter')
# 5. Optimize added pos + rad:
ainds = []
for p in added_poses:
ainds.append(st.obj_closest_particle(p))
if len(ainds) > max_allowed_remove:
for i in range(0, len(ainds), max_allowed_remove):
opt.do_levmarq_particles(
st, np.array(ainds[i:i + max_allowed_remove]),
include_rad=True, max_iter=3)
elif len(ainds) > 0:
opt.do_levmarq_particles(st, ainds, include_rad=True, max_iter=3)
# 6. Ensure that current error after add-subtracting is lower than initial
did_something = (rinds.size > 0) or (len(ainds) > 0)
if did_something & (st.error > initial_error):
CLOG.info('Failed addsub, Tile {} -> {}'.format(
tile.l.tolist(), tile.r.tolist()))
if len(ainds) > 0:
_ = st.obj_remove_particle(ainds)
if rinds.size > 0:
for p, r in zip(rpos.reshape(-1, 3), rrad.reshape(-1)):
_ = st.obj_add_particle(p, r)
n_added = 0
ainds = []
return n_added, ainds | python | {
"resource": ""
} |
q262608 | add_subtract_locally | validation | def add_subtract_locally(st, region_depth=3, filter_size=5, sigma_cutoff=8,
**kwargs):
"""
Automatically adds and subtracts missing particles based on local
regions of poor fit.
Calls identify_misfeatured_regions to identify regions, then
add_subtract_misfeatured_tile on the tiles in order of size until
region_depth tiles have been checked without adding any particles.
Parameters
----------
st: :class:`peri.states.State`
The state to add and subtract particles to.
region_depth : Int
The minimum amount of regions to try; the algorithm terminates if
region_depth regions have been tried without adding particles.
Other Parameters
----------------
filter_size : Int, optional
The size of the filter for calculating the local standard deviation;
should approximately be the size of a poorly featured region in each
dimension. Best if odd. Default is 5.
sigma_cutoff : Float, optional
The max allowed deviation of the residuals from what is expected,
in units of the residuals' standard deviation. Lower means more
sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel
out of every ``7*10^11`` is mis-identified randomly. In practice the
noise is not Gaussian so there are still some regions mis-
identified as improperly featured.
rad : Float or 'calc', optional
The initial radius for added particles; added particles radii are
not fit until the end of add_subtract. Default is ``'calc'``, which
uses the median radii of active particles.
max_iter : Int, optional
The maximum number of loops for attempted adds at one tile location.
Default is 3.
invert : Bool, optional
Whether to invert the image for feature_guess. Default is ``True``,
i.e. dark particles on bright background.
max_allowed_remove : Int, optional
The maximum number of particles to remove. If the misfeatured tile
contains more than this many particles, raises an error. If it
contains more than half as many particles, throws a warning. If more
than this many particles are added, they are optimized in blocks of
``max_allowed_remove``. Default is 20.
im_change_frac : Float, between 0 and 1.
If adding or removing a particle decreases the error less than
``im_change_frac *`` the change in the image, the particle is deleted.
Default is 0.2.
min_derr : Float
The minimum change in the state's error to keep a particle in the
image. Default is ``'3sig'`` which uses ``3*st.sigma``.
do_opt : Bool, optional
Set to False to avoid optimizing particle positions after adding
them. Default is True
minmass : Float, optional
The minimum mass for a particle to be identified as a feature, as
used by trackpy. Defaults to a decent guess.
use_tp : Bool, optional
Set to True to use trackpy to find missing particles inside the
image. Not recommended since trackpy deliberately cuts out
particles at the edge of the image. Default is False.
max_allowed_remove : Int, optional
The maximum number of particles to remove. If the misfeatured tile
contains more than this many particles, raises an error. If it
contains more than half as many particles, throws a warning. If more
than this many particles are added, they are optimized in blocks of
``max_allowed_remove``. Default is 20.
Returns
-------
n_added : Int
The change in the number of particles; i.e the number added - number
removed.
new_poses : List
[N,3] element list of the added particle positions.
Notes
-----
Algorithm Description
1. Identify mis-featured regions by how much the local residuals
deviate from the global residuals, as measured by the standard
deviation of both.
2. Loop over each of those regions, and:
a. Remove every particle in the current region.
b. Try to add particles in the current region until no more
can be added while adequately decreasing the error.
c. Terminate if at least region_depth regions have been
checked without successfully adding a particle.
Because this algorithm is more judicious about chooosing regions to
check, and more aggressive about removing particles in those regions,
it runs faster and does a better job than the (global) add_subtract.
However, this function usually does not work better as an initial add-
subtract on an image, since (1) it doesn't check for removing small/big
particles per se, and (2) when the poorly-featured regions of the image
are large or when the fit is bad, it will remove essentially all of the
particles, taking a long time. As a result, it's usually best to do a
normal add_subtract first and using this function for tough missing or
double-featured particles.
"""
# 1. Find regions of poor tiles:
tiles = identify_misfeatured_regions(
st, filter_size=filter_size, sigma_cutoff=sigma_cutoff)
# 2. Add and subtract in the regions:
n_empty = 0
n_added = 0
new_poses = []
for t in tiles:
curn, curinds = add_subtract_misfeatured_tile(st, t, **kwargs)
if curn == 0:
n_empty += 1
else:
n_added += curn
new_poses.extend(st.obj_get_positions()[curinds])
if n_empty > region_depth:
break # some message or something?
else: # for-break-else
pass
# CLOG.info('All regions contained particles.')
# something else?? this is not quite true
return n_added, new_poses | python | {
"resource": ""
} |
q262609 | guess_invert | validation | def guess_invert(st):
"""Guesses whether particles are bright on a dark bkg or vice-versa
Works by checking whether the intensity at the particle centers is
brighter or darker than the average intensity of the image, by
comparing the median intensities of each.
Parameters
----------
st : :class:`peri.states.ImageState`
Returns
-------
invert : bool
Whether to invert the image for featuring.
"""
pos = st.obj_get_positions()
pxinds_ar = np.round(pos).astype('int')
inim = st.ishape.translate(-st.pad).contains(pxinds_ar)
pxinds_tuple = tuple(pxinds_ar[inim].T)
pxvals = st.data[pxinds_tuple]
invert = np.median(pxvals) < np.median(st.data) # invert if dark particles
return invert | python | {
"resource": ""
} |
q262610 | load_wisdom | validation | def load_wisdom(wisdomfile):
"""
Prime FFTW with knowledge of which FFTs are best on this machine by
loading 'wisdom' from the file ``wisdomfile``
"""
if wisdomfile is None:
return
try:
pyfftw.import_wisdom(pickle.load(open(wisdomfile, 'rb')))
except (IOError, TypeError) as e:
log.warn("No wisdom present, generating some at %r" % wisdomfile)
save_wisdom(wisdomfile) | python | {
"resource": ""
} |
q262611 | save_wisdom | validation | def save_wisdom(wisdomfile):
"""
Save the acquired 'wisdom' generated by FFTW to file so that future
initializations of FFTW will be faster.
"""
if wisdomfile is None:
return
if wisdomfile:
pickle.dump(
pyfftw.export_wisdom(), open(wisdomfile, 'wb'),
protocol=2
) | python | {
"resource": ""
} |
q262612 | tile_overlap | validation | def tile_overlap(inner, outer, norm=False):
""" How much of inner is in outer by volume """
div = 1.0/inner.volume if norm else 1.0
return div*(inner.volume - util.Tile.intersection(inner, outer).volume) | python | {
"resource": ""
} |
q262613 | separate_particles_into_groups | validation | def separate_particles_into_groups(s, region_size=40, bounds=None):
"""
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters:
-----------
s : state
The PERI state to find particles in.
region_size: int or list of ints
The size of the box. Groups particles into boxes of shape region_size.
If region_size is a scalar, the box is a cube of length region_size.
Default is 40.
bounds: 2-element list-like of 3-element lists.
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
Returns:
-----------
particle_groups: list
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = (
s.oshape.translate(-s.pad) if bounds is None else
util.Tile(bounds[0], bounds[1])
)
# does all particle including out of image, is that correct?
region = util.Tile(region_size, dim=s.dim)
trange = np.ceil(imtile.shape.astype('float') / region.shape)
translations = util.Tile(trange).coords(form='vector')
translations = translations.reshape(-1, translations.shape[-1])
groups = []
positions = s.obj_get_positions()
for v in translations:
tmptile = region.copy().translate(region.shape * v - s.pad)
groups.append(find_particles_in_tile(positions, tmptile))
return [g for g in groups if len(g) > 0] | python | {
"resource": ""
} |
q262614 | create_comparison_state | validation | def create_comparison_state(image, position, radius=5.0, snr=20,
method='constrained-cubic', extrapad=2, zscale=1.0):
"""
Take a platonic image and position and create a state which we can
use to sample the error for peri. Also return the blurred platonic
image so we can vary the noise on it later
"""
# first pad the image slightly since they are pretty small
image = common.pad(image, extrapad, 0)
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=np.array(image.shape), sigma=1.0/snr,
radius=radius, psfargs={'params': np.array([2.0, 1.0, 3.0]), 'error': 1e-6, 'threads': 2},
objargs={'method': method}, stateargs={'sigmapad': False, 'pad': 4, 'zscale': zscale})
s.obj.pos[0] = position + s.pad + extrapad
s.reset()
s.model_to_true_image()
timage = 1-np.pad(image, s.pad, mode='constant', constant_values=0)
timage = s.psf.execute(timage)
return s, timage[s.inner] | python | {
"resource": ""
} |
q262615 | perfect_platonic_per_pixel | validation | def perfect_platonic_per_pixel(N, R, scale=11, pos=None, zscale=1.0, returnpix=None):
"""
Create a perfect platonic sphere of a given radius R by supersampling by a
factor scale on a grid of size N. Scale must be odd.
We are able to perfectly position these particles up to 1/scale. Therefore,
let's only allow those types of shifts for now, but return the actual position
used for the placement.
"""
# enforce odd scale size
if scale % 2 != 1:
scale += 1
if pos is None:
# place the default position in the center of the grid
pos = np.array([(N-1)/2.0]*3)
# limit positions to those that are exact on the size 1./scale
# positions have the form (d = divisions):
# p = N + m/d
s = 1.0/scale
f = zscale**2
i = pos.astype('int')
p = i + s*((pos - i)/s).astype('int')
pos = p + 1e-10 # unfortunately needed to break ties
# make the output arrays
image = np.zeros((N,)*3)
x,y,z = np.meshgrid(*(xrange(N),)*3, indexing='ij')
# for each real pixel in the image, integrate a bunch of superres pixels
for x0,y0,z0 in zip(x.flatten(),y.flatten(),z.flatten()):
# short-circuit things that are just too far away!
ddd = np.sqrt(f*(x0-pos[0])**2 + (y0-pos[1])**2 + (z0-pos[2])**2)
if ddd > R + 4:
image[x0,y0,z0] = 0.0
continue
# otherwise, build the local mesh and count the volume
xp,yp,zp = np.meshgrid(
*(np.linspace(i-0.5+s/2, i+0.5-s/2, scale, endpoint=True) for i in (x0,y0,z0)),
indexing='ij'
)
ddd = np.sqrt(f*(xp-pos[0])**2 + (yp-pos[1])**2 + (zp-pos[2])**2)
if returnpix is not None and returnpix == [x0,y0,z0]:
outpix = 1.0 * (ddd < R)
vol = (1.0*(ddd < R) + 0.0*(ddd == R)).sum()
image[x0,y0,z0] = vol / float(scale**3)
#vol_true = 4./3*np.pi*R**3
#vol_real = image.sum()
#print vol_true, vol_real, (vol_true - vol_real)/vol_true
if returnpix:
return image, pos, outpix
return image, pos | python | {
"resource": ""
} |
q262616 | translate_fourier | validation | def translate_fourier(image, dx):
""" Translate an image in fourier-space with plane waves """
N = image.shape[0]
f = 2*np.pi*np.fft.fftfreq(N)
kx,ky,kz = np.meshgrid(*(f,)*3, indexing='ij')
kv = np.array([kx,ky,kz]).T
q = np.fft.fftn(image)*np.exp(-1.j*(kv*dx).sum(axis=-1)).T
return np.real(np.fft.ifftn(q)) | python | {
"resource": ""
} |
q262617 | users | validation | def users():
"""Load default users and groups."""
from invenio_groups.models import Group, Membership, \
PrivacyPolicy, SubscriptionPolicy
admin = accounts.datastore.create_user(
email='admin@inveniosoftware.org',
password=encrypt_password('123456'),
active=True,
)
reader = accounts.datastore.create_user(
email='reader@inveniosoftware.org',
password=encrypt_password('123456'),
active=True,
)
admins = Group.create(name='admins', admins=[admin])
for i in range(10):
Group.create(name='group-{0}'.format(i), admins=[admin])
Membership.create(admins, reader)
db.session.commit() | python | {
"resource": ""
} |
q262618 | BarnesInterpolation1D._weight | validation | def _weight(self, rsq, sigma=None):
"""weighting function for Barnes"""
sigma = sigma or self.filter_size
if not self.clip:
o = np.exp(-rsq / (2*sigma**2))
else:
o = np.zeros(rsq.shape, dtype='float')
m = (rsq < self.clipsize**2)
o[m] = np.exp(-rsq[m] / (2*sigma**2))
return o | python | {
"resource": ""
} |
q262619 | BarnesInterpolation1D._eval_firstorder | validation | def _eval_firstorder(self, rvecs, data, sigma):
"""The first-order Barnes approximation"""
if not self.blocksize:
dist_between_points = self._distance_matrix(rvecs, self.x)
gaussian_weights = self._weight(dist_between_points, sigma=sigma)
return gaussian_weights.dot(data) / gaussian_weights.sum(axis=1)
else:
# Now rather than calculating the distance matrix all at once,
# we do it in chunks over rvecs
ans = np.zeros(rvecs.shape[0], dtype='float')
bs = self.blocksize
for a in range(0, rvecs.shape[0], bs):
dist = self._distance_matrix(rvecs[a:a+bs], self.x)
weights = self._weight(dist, sigma=sigma)
ans[a:a+bs] += weights.dot(data) / weights.sum(axis=1)
return ans | python | {
"resource": ""
} |
q262620 | BarnesInterpolation1D._newcall | validation | def _newcall(self, rvecs):
"""Correct, normalized version of Barnes"""
# 1. Initial guess for output:
sigma = 1*self.filter_size
out = self._eval_firstorder(rvecs, self.d, sigma)
# 2. There are differences between 0th order at the points and
# the passed data, so we iterate to remove:
ondata = self._eval_firstorder(self.x, self.d, sigma)
for i in range(self.iterations):
out += self._eval_firstorder(rvecs, self.d-ondata, sigma)
ondata += self._eval_firstorder(self.x, self.d-ondata, sigma)
sigma *= self.damp
return out | python | {
"resource": ""
} |
q262621 | BarnesInterpolationND._distance_matrix | validation | def _distance_matrix(self, a, b):
"""Pairwise distance between each point in `a` and each point in `b`"""
def sq(x): return (x * x)
# matrix = np.sum(map(lambda a,b: sq(a[:,None] - b[None,:]), a.T,
# b.T), axis=0)
# A faster version than above:
matrix = sq(a[:, 0][:, None] - b[:, 0][None, :])
for x, y in zip(a.T[1:], b.T[1:]):
matrix += sq(x[:, None] - y[None, :])
return matrix | python | {
"resource": ""
} |
q262622 | ChebyshevInterpolation1D._c2x | validation | def _c2x(self, c):
""" Convert cheb coordinates to windowdow coordinates """
return 0.5 * (self.window[0] + self.window[1] +
c * (self.window[1] - self.window[0])) | python | {
"resource": ""
} |
q262623 | ChebyshevInterpolation1D.tk | validation | def tk(self, k, x):
"""
Evaluates an individual Chebyshev polynomial `k` in coordinate space
with proper transformation given the window
"""
weights = np.diag(np.ones(k+1))[k]
return np.polynomial.chebyshev.chebval(self._x2c(x), weights) | python | {
"resource": ""
} |
q262624 | resolve_admin_type | validation | def resolve_admin_type(admin):
"""Determine admin type."""
if admin is current_user or isinstance(admin, UserMixin):
return 'User'
else:
return admin.__class__.__name__ | python | {
"resource": ""
} |
q262625 | SubscriptionPolicy.validate | validation | def validate(cls, policy):
"""Validate subscription policy value."""
return policy in [cls.OPEN, cls.APPROVAL, cls.CLOSED] | python | {
"resource": ""
} |
q262626 | PrivacyPolicy.validate | validation | def validate(cls, policy):
"""Validate privacy policy value."""
return policy in [cls.PUBLIC, cls.MEMBERS, cls.ADMINS] | python | {
"resource": ""
} |
q262627 | MembershipState.validate | validation | def validate(cls, state):
"""Validate state value."""
return state in [cls.ACTIVE, cls.PENDING_ADMIN, cls.PENDING_USER] | python | {
"resource": ""
} |
q262628 | Group.delete | validation | def delete(self):
"""Delete a group and all associated memberships."""
with db.session.begin_nested():
Membership.query_by_group(self).delete()
GroupAdmin.query_by_group(self).delete()
GroupAdmin.query_by_admin(self).delete()
db.session.delete(self) | python | {
"resource": ""
} |
q262629 | Group.update | validation | def update(self, name=None, description=None, privacy_policy=None,
subscription_policy=None, is_managed=None):
"""Update group.
:param name: Name of group.
:param description: Description of group.
:param privacy_policy: PrivacyPolicy
:param subscription_policy: SubscriptionPolicy
:returns: Updated group
"""
with db.session.begin_nested():
if name is not None:
self.name = name
if description is not None:
self.description = description
if (
privacy_policy is not None and
PrivacyPolicy.validate(privacy_policy)
):
self.privacy_policy = privacy_policy
if (
subscription_policy is not None and
SubscriptionPolicy.validate(subscription_policy)
):
self.subscription_policy = subscription_policy
if is_managed is not None:
self.is_managed = is_managed
db.session.merge(self)
return self | python | {
"resource": ""
} |
q262630 | Group.get_by_name | validation | def get_by_name(cls, name):
"""Query group by a group name.
:param name: Name of a group to search for.
:returns: Group object or None.
"""
try:
return cls.query.filter_by(name=name).one()
except NoResultFound:
return None | python | {
"resource": ""
} |
q262631 | Group.query_by_names | validation | def query_by_names(cls, names):
"""Query group by a list of group names.
:param list names: List of the group names.
:returns: Query object.
"""
assert isinstance(names, list)
return cls.query.filter(cls.name.in_(names)) | python | {
"resource": ""
} |
q262632 | Group.query_by_user | validation | def query_by_user(cls, user, with_pending=False, eager=False):
"""Query group by user.
:param user: User object.
:param bool with_pending: Whether to include pending users.
:param bool eager: Eagerly fetch group members.
:returns: Query object.
"""
q1 = Group.query.join(Membership).filter_by(user_id=user.get_id())
if not with_pending:
q1 = q1.filter_by(state=MembershipState.ACTIVE)
if eager:
q1 = q1.options(joinedload(Group.members))
q2 = Group.query.join(GroupAdmin).filter_by(
admin_id=user.get_id(), admin_type=resolve_admin_type(user))
if eager:
q2 = q2.options(joinedload(Group.members))
query = q1.union(q2).with_entities(Group.id)
return Group.query.filter(Group.id.in_(query)) | python | {
"resource": ""
} |
q262633 | Group.search | validation | def search(cls, query, q):
"""Modify query as so include only specific group names.
:param query: Query object.
:param str q: Search string.
:returs: Query object.
"""
return query.filter(Group.name.like('%{0}%'.format(q))) | python | {
"resource": ""
} |
q262634 | Group.add_member | validation | def add_member(self, user, state=MembershipState.ACTIVE):
"""Invite a user to a group.
:param user: User to be added as a group member.
:param state: MembershipState. Default: MembershipState.ACTIVE.
:returns: Membership object or None.
"""
return Membership.create(self, user, state) | python | {
"resource": ""
} |
q262635 | Group.invite_by_emails | validation | def invite_by_emails(self, emails):
"""Invite users to a group by emails.
:param list emails: Emails of users that shall be invited.
:returns list: Newly created Memberships or Nones.
"""
assert emails is None or isinstance(emails, list)
results = []
for email in emails:
try:
user = User.query.filter_by(email=email).one()
results.append(self.invite(user))
except NoResultFound:
results.append(None)
return results | python | {
"resource": ""
} |
q262636 | Group.is_member | validation | def is_member(self, user, with_pending=False):
"""Verify if given user is a group member.
:param user: User to be checked.
:param bool with_pending: Whether to include pending users or not.
:returns: True or False.
"""
m = Membership.get(self, user)
if m is not None:
if with_pending:
return True
elif m.state == MembershipState.ACTIVE:
return True
return False | python | {
"resource": ""
} |
q262637 | Group.can_see_members | validation | def can_see_members(self, user):
"""Determine if given user can see other group members.
:param user: User to be checked.
:returns: True or False.
"""
if self.privacy_policy == PrivacyPolicy.PUBLIC:
return True
elif self.privacy_policy == PrivacyPolicy.MEMBERS:
return self.is_member(user) or self.is_admin(user)
elif self.privacy_policy == PrivacyPolicy.ADMINS:
return self.is_admin(user) | python | {
"resource": ""
} |
q262638 | Group.can_invite_others | validation | def can_invite_others(self, user):
"""Determine if user can invite people to a group.
Be aware that this check is independent from the people (users) which
are going to be invited. The checked user is the one who invites
someone, NOT who is going to be invited.
:param user: User to be checked.
:returns: True or False.
"""
if self.is_managed:
return False
elif self.is_admin(user):
return True
elif self.subscription_policy != SubscriptionPolicy.CLOSED:
return True
else:
return False | python | {
"resource": ""
} |
q262639 | Membership.get | validation | def get(cls, group, user):
"""Get membership for given user and group.
:param group: Group object.
:param user: User object.
:returns: Membership or None.
"""
try:
m = cls.query.filter_by(user_id=user.get_id(), group=group).one()
return m
except Exception:
return None | python | {
"resource": ""
} |
q262640 | Membership._filter | validation | def _filter(cls, query, state=MembershipState.ACTIVE, eager=None):
"""Filter a query result."""
query = query.filter_by(state=state)
eager = eager or []
for field in eager:
query = query.options(joinedload(field))
return query | python | {
"resource": ""
} |
q262641 | Membership.query_by_user | validation | def query_by_user(cls, user, **kwargs):
"""Get a user's memberships."""
return cls._filter(
cls.query.filter_by(user_id=user.get_id()),
**kwargs
) | python | {
"resource": ""
} |
q262642 | Membership.query_invitations | validation | def query_invitations(cls, user, eager=False):
"""Get all invitations for given user."""
if eager:
eager = [Membership.group]
return cls.query_by_user(user, state=MembershipState.PENDING_USER,
eager=eager) | python | {
"resource": ""
} |
q262643 | Membership.query_requests | validation | def query_requests(cls, admin, eager=False):
"""Get all pending group requests."""
# Get direct pending request
if hasattr(admin, 'is_superadmin') and admin.is_superadmin:
q1 = GroupAdmin.query.with_entities(
GroupAdmin.group_id)
else:
q1 = GroupAdmin.query_by_admin(admin).with_entities(
GroupAdmin.group_id)
q2 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q1),
)
# Get request from admin groups your are member of
q3 = Membership.query_by_user(
user=admin, state=MembershipState.ACTIVE
).with_entities(Membership.id_group)
q4 = GroupAdmin.query.filter(
GroupAdmin.admin_type == 'Group', GroupAdmin.admin_id.in_(q3)
).with_entities(GroupAdmin.group_id)
q5 = Membership.query.filter(
Membership.state == MembershipState.PENDING_ADMIN,
Membership.id_group.in_(q4))
query = q2.union(q5)
return query | python | {
"resource": ""
} |
q262644 | Membership.query_by_group | validation | def query_by_group(cls, group_or_id, with_invitations=False, **kwargs):
"""Get a group's members."""
if isinstance(group_or_id, Group):
id_group = group_or_id.id
else:
id_group = group_or_id
if not with_invitations:
return cls._filter(
cls.query.filter_by(id_group=id_group),
**kwargs
)
else:
return cls.query.filter(
Membership.id_group == id_group,
db.or_(
Membership.state == MembershipState.PENDING_USER,
Membership.state == MembershipState.ACTIVE
)
) | python | {
"resource": ""
} |
q262645 | Membership.search | validation | def search(cls, query, q):
"""Modify query as so include only specific members.
:param query: Query object.
:param str q: Search string.
:returs: Query object.
"""
query = query.join(User).filter(
User.email.like('%{0}%'.format(q)),
)
return query | python | {
"resource": ""
} |
q262646 | Membership.order | validation | def order(cls, query, field, s):
"""Modify query as so to order the results.
:param query: Query object.
:param str s: Orderinig: ``asc`` or ``desc``.
:returs: Query object.
"""
if s == 'asc':
query = query.order_by(asc(field))
elif s == 'desc':
query = query.order_by(desc(field))
return query | python | {
"resource": ""
} |
q262647 | Membership.create | validation | def create(cls, group, user, state=MembershipState.ACTIVE):
"""Create a new membership."""
with db.session.begin_nested():
membership = cls(
user_id=user.get_id(),
id_group=group.id,
state=state,
)
db.session.add(membership)
return membership | python | {
"resource": ""
} |
q262648 | Membership.delete | validation | def delete(cls, group, user):
"""Delete membership."""
with db.session.begin_nested():
cls.query.filter_by(group=group, user_id=user.get_id()).delete() | python | {
"resource": ""
} |
q262649 | Membership.accept | validation | def accept(self):
"""Activate membership."""
with db.session.begin_nested():
self.state = MembershipState.ACTIVE
db.session.merge(self) | python | {
"resource": ""
} |
q262650 | GroupAdmin.create | validation | def create(cls, group, admin):
"""Create a new group admin.
:param group: Group object.
:param admin: Admin object.
:returns: Newly created GroupAdmin object.
:raises: IntegrityError
"""
with db.session.begin_nested():
obj = cls(
group=group,
admin=admin,
)
db.session.add(obj)
return obj | python | {
"resource": ""
} |
q262651 | GroupAdmin.get | validation | def get(cls, group, admin):
"""Get specific GroupAdmin object."""
try:
ga = cls.query.filter_by(
group=group, admin_id=admin.get_id(),
admin_type=resolve_admin_type(admin)).one()
return ga
except Exception:
return None | python | {
"resource": ""
} |
q262652 | GroupAdmin.delete | validation | def delete(cls, group, admin):
"""Delete admin from group.
:param group: Group object.
:param admin: Admin object.
"""
with db.session.begin_nested():
obj = cls.query.filter(
cls.admin == admin, cls.group == group).one()
db.session.delete(obj) | python | {
"resource": ""
} |
q262653 | GroupAdmin.query_by_admin | validation | def query_by_admin(cls, admin):
"""Get all groups for for a specific admin."""
return cls.query.filter_by(
admin_type=resolve_admin_type(admin), admin_id=admin.get_id()) | python | {
"resource": ""
} |
q262654 | GroupAdmin.query_admins_by_group_ids | validation | def query_admins_by_group_ids(cls, groups_ids=None):
"""Get count of admins per group."""
assert groups_ids is None or isinstance(groups_ids, list)
query = db.session.query(
Group.id, func.count(GroupAdmin.id)
).join(
GroupAdmin
).group_by(
Group.id
)
if groups_ids:
query = query.filter(Group.id.in_(groups_ids))
return query | python | {
"resource": ""
} |
q262655 | Profiles.all | validation | def all(self):
'''
Get all social newtworks profiles
'''
response = self.api.get(url=PATHS['GET_PROFILES'])
for raw_profile in response:
self.append(Profile(self.api, raw_profile))
return self | python | {
"resource": ""
} |
q262656 | Profiles.filter | validation | def filter(self, **kwargs):
'''
Based on some criteria, filter the profiles and return a new Profiles
Manager containing only the chosen items
If the manager doen't have any items, get all the profiles from Buffer
'''
if not len(self):
self.all()
new_list = filter(lambda item: [True for arg in kwargs if item[arg] == kwargs[arg]] != [], self)
return Profiles(self.api, new_list) | python | {
"resource": ""
} |
q262657 | zjitter | validation | def zjitter(jitter=0.0, radius=5):
"""
scan jitter is in terms of the fractional pixel difference when
moving the laser in the z-direction
"""
psfsize = np.array([2.0, 1.0, 3.0])
# create a base image of one particle
s0 = init.create_single_particle_state(imsize=4*radius,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
sl = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,s0.pad:-s0.pad]
# add up a bunch of trajectories
finalimage = 0*s0.get_model_image()[sl]
position = 0*s0.obj.pos[0]
for i in xrange(finalimage.shape[0]):
offset = jitter*np.random.randn(3)*np.array([1,0,0])
s0.obj.pos[0] = np.array(s0.image.shape)/2 + offset
s0.reset()
finalimage[i] = s0.get_model_image()[sl][i]
position += s0.obj.pos[0]
position /= float(finalimage.shape[0])
# place that into a new image at the expected parameters
s = init.create_single_particle_state(imsize=4*radius, sigma=0.05,
radius=radius, psfargs={'params': psfsize, 'error': 1e-6})
s.reset()
# measure the true inferred parameters
return s, finalimage, position | python | {
"resource": ""
} |
q262658 | Update.interactions | validation | def interactions(self):
'''
Returns the detailed information on individual interactions with the social
media update such as favorites, retweets and likes.
'''
interactions = []
url = PATHS['GET_INTERACTIONS'] % self.id
response = self.api.get(url=url)
for interaction in response['interactions']:
interactions.append(ResponseObject(interaction))
self.__interactions = interactions
return self.__interactions | python | {
"resource": ""
} |
q262659 | Update.edit | validation | def edit(self, text, media=None, utc=None, now=None):
'''
Edit an existing, individual status update.
'''
url = PATHS['EDIT'] % self.id
post_data = "text=%s&" % text
if now:
post_data += "now=%s&" % now
if utc:
post_data += "utc=%s&" % utc
if media:
media_format = "media[%s]=%s&"
for media_type, media_item in media.iteritems():
post_data += media_format % (media_type, media_item)
response = self.api.post(url=url, data=post_data)
return Update(api=self.api, raw_response=response['update']) | python | {
"resource": ""
} |
q262660 | Update.publish | validation | def publish(self):
'''
Immediately shares a single pending update and recalculates times for
updates remaining in the queue.
'''
url = PATHS['PUBLISH'] % self.id
return self.api.post(url=url) | python | {
"resource": ""
} |
q262661 | Update.delete | validation | def delete(self):
'''
Permanently delete an existing status update.
'''
url = PATHS['DELETE'] % self.id
return self.api.post(url=url) | python | {
"resource": ""
} |
q262662 | Update.move_to_top | validation | def move_to_top(self):
'''
Move an existing status update to the top of the queue and recalculate
times for all updates in the queue. Returns the update with its new
posting time.
'''
url = PATHS['MOVE_TO_TOP'] % self.id
response = self.api.post(url=url)
return Update(api=self.api, raw_response=response) | python | {
"resource": ""
} |
q262663 | Updates.pending | validation | def pending(self):
'''
Returns an array of updates that are currently in the buffer for an
individual social media profile.
'''
pending_updates = []
url = PATHS['GET_PENDING'] % self.profile_id
response = self.api.get(url=url)
for update in response['updates']:
pending_updates.append(Update(api=self.api, raw_response=update))
self.__pending = pending_updates
return self.__pending | python | {
"resource": ""
} |
q262664 | Updates.sent | validation | def sent(self):
'''
Returns an array of updates that have been sent from the buffer for an
individual social media profile.
'''
sent_updates = []
url = PATHS['GET_SENT'] % self.profile_id
response = self.api.get(url=url)
for update in response['updates']:
sent_updates.append(Update(api=self.api, raw_response=update))
self.__sent = sent_updates
return self.__sent | python | {
"resource": ""
} |
q262665 | Updates.shuffle | validation | def shuffle(self, count=None, utc=None):
'''
Randomize the order at which statuses for the specified social media
profile will be sent out of the buffer.
'''
url = PATHS['SHUFFLE'] % self.profile_id
post_data = ''
if count:
post_data += 'count=%s&' % count
if utc:
post_data += 'utc=%s' % utc
return self.api.post(url=url, data=post_data) | python | {
"resource": ""
} |
q262666 | Updates.reorder | validation | def reorder(self, updates_ids, offset=None, utc=None):
'''
Edit the order at which statuses for the specified social media profile will
be sent out of the buffer.
'''
url = PATHS['REORDER'] % self.profile_id
order_format = "order[]=%s&"
post_data = ''
if offset:
post_data += 'offset=%s&' % offset
if utc:
post_data += 'utc=%s&' % utc
for update in updates_ids:
post_data += order_format % update
return self.api.post(url=url, data=post_data) | python | {
"resource": ""
} |
q262667 | Updates.new | validation | def new(self, text, shorten=None, now=None, top=None, media=None, when=None):
'''
Create one or more new status updates.
'''
url = PATHS['CREATE']
post_data = "text=%s&" % text
post_data += "profile_ids[]=%s&" % self.profile_id
if shorten:
post_data += "shorten=%s&" % shorten
if now:
post_data += "now=%s&" % now
if top:
post_data += "top=%s&" % top
if when:
post_data += "scheduled_at=%s&" % str(when)
if media:
media_format = "media[%s]=%s&"
for media_type, media_item in media.iteritems():
post_data += media_format % (media_type, media_item)
response = self.api.post(url=url, data=post_data)
new_update = Update(api=self.api, raw_response=response['updates'][0])
self.append(new_update)
return new_update | python | {
"resource": ""
} |
q262668 | Logger.noformat | validation | def noformat(self):
""" Temporarily do not use any formatter so that text printed is raw """
try:
formats = {}
for h in self.get_handlers():
formats[h] = h.formatter
self.set_formatter(formatter='quiet')
yield
except Exception as e:
raise
finally:
for k,v in iteritems(formats):
k.formatter = v | python | {
"resource": ""
} |
q262669 | Logger.set_verbosity | validation | def set_verbosity(self, verbosity='vvv', handlers=None):
"""
Set the verbosity level of a certain log handler or of all handlers.
Parameters
----------
verbosity : 'v' to 'vvvvv'
the level of verbosity, more v's is more verbose
handlers : string, or list of strings
handler names can be found in ``peri.logger.types.keys()``
Current set is::
['console-bw', 'console-color', 'rotating-log']
"""
self.verbosity = sanitize(verbosity)
self.set_level(v2l[verbosity], handlers=handlers)
self.set_formatter(v2f[verbosity], handlers=handlers) | python | {
"resource": ""
} |
q262670 | generate_sphere | validation | def generate_sphere(radius):
"""Generates a centered boolean mask of a 3D sphere"""
rint = np.ceil(radius).astype('int')
t = np.arange(-rint, rint+1, 1)
x,y,z = np.meshgrid(t, t, t, indexing='ij')
r = np.sqrt(x*x + y*y + z*z)
sphere = r < radius
return sphere | python | {
"resource": ""
} |
q262671 | local_max_featuring | validation | def local_max_featuring(im, radius=2.5, noise_size=1., bkg_size=None,
minmass=1., trim_edge=False):
"""Local max featuring to identify bright spherical particles on a
dark background.
Parameters
----------
im : numpy.ndarray
The image to identify particles in.
radius : Float > 0, optional
Featuring radius of the particles. Default is 2.5
noise_size : Float, optional
Size of Gaussian kernel for smoothing out noise. Default is 1.
bkg_size : Float or None, optional
Size of the Gaussian kernel for removing long-wavelength
background. Default is None, which gives `2 * radius`
minmass : Float, optional
Return only particles with a ``mass > minmass``. Default is 1.
trim_edge : Bool, optional
Set to True to omit particles identified exactly at the edge
of the image. False-positive features frequently occur here
because of the reflected bandpass featuring. Default is
False, i.e. find particles at the edge of the image.
Returns
-------
pos, mass : numpy.ndarray
Particle positions and masses
"""
if radius <= 0:
raise ValueError('`radius` must be > 0')
#1. Remove noise
filtered = nd.gaussian_filter(im, noise_size, mode='mirror')
#2. Remove long-wavelength background:
if bkg_size is None:
bkg_size = 2*radius
filtered -= nd.gaussian_filter(filtered, bkg_size, mode='mirror')
#3. Local max feature
footprint = generate_sphere(radius)
e = nd.maximum_filter(filtered, footprint=footprint)
mass_im = nd.convolve(filtered, footprint, mode='mirror')
good_im = (e==filtered) * (mass_im > minmass)
pos = np.transpose(np.nonzero(good_im))
if trim_edge:
good = np.all(pos > 0, axis=1) & np.all(pos+1 < im.shape, axis=1)
pos = pos[good, :].copy()
masses = mass_im[pos[:,0], pos[:,1], pos[:,2]].copy()
return pos, masses | python | {
"resource": ""
} |
q262672 | otsu_threshold | validation | def otsu_threshold(data, bins=255):
"""
Otsu threshold on data.
Otsu thresholding [1]_is a method for selecting an intensity value
for thresholding an image into foreground and background. The sel-
ected intensity threshold maximizes the inter-class variance.
Parameters
----------
data : numpy.ndarray
The data to threshold
bins : Int or numpy.ndarray, optional
Bin edges, as passed to numpy.histogram
Returns
-------
numpy.float
The value of the threshold which maximizes the inter-class
variance.
Notes
-----
This could be generalized to more than 2 classes.
References
----------
..[1] N. Otsu, "A Threshold Selection Method from Gray-level
Histograms," IEEE Trans. Syst., Man, Cybern., Syst., 9, 1,
62-66 (1979)
"""
h0, x0 = np.histogram(data.ravel(), bins=bins)
h = h0.astype('float') / h0.sum() #normalize
x = 0.5*(x0[1:] + x0[:-1]) #bin center
wk = np.array([h[:i+1].sum() for i in range(h.size)]) #omega_k
mk = np.array([sum(x[:i+1]*h[:i+1]) for i in range(h.size)]) #mu_k
mt = mk[-1] #mu_T
sb = (mt*wk - mk)**2 / (wk*(1-wk) + 1e-15) #sigma_b
ind = sb.argmax()
return 0.5*(x0[ind] + x0[ind+1]) | python | {
"resource": ""
} |
q262673 | harris_feature | validation | def harris_feature(im, region_size=5, to_return='harris', scale=0.05):
"""
Harris-motivated feature detection on a d-dimensional image.
Parameters
---------
im
region_size
to_return : {'harris','matrix','trace-determinant'}
"""
ndim = im.ndim
#1. Gradient of image
grads = [nd.sobel(im, axis=i) for i in range(ndim)]
#2. Corner response matrix
matrix = np.zeros((ndim, ndim) + im.shape)
for a in range(ndim):
for b in range(ndim):
matrix[a,b] = nd.filters.gaussian_filter(grads[a]*grads[b],
region_size)
if to_return == 'matrix':
return matrix
#3. Trace, determinant
trc = np.trace(matrix, axis1=0, axis2=1)
det = np.linalg.det(matrix.T).T
if to_return == 'trace-determinant':
return trc, det
else:
#4. Harris detector:
harris = det - scale*trc*trc
return harris | python | {
"resource": ""
} |
q262674 | sphere_triangle_cdf | validation | def sphere_triangle_cdf(dr, a, alpha):
""" Cumulative distribution function for the traingle distribution """
p0 = (dr+alpha)**2/(2*alpha**2)*(0 > dr)*(dr>-alpha)
p1 = 1*(dr>0)-(alpha-dr)**2/(2*alpha**2)*(0<dr)*(dr<alpha)
return (1-np.clip(p0+p1, 0, 1)) | python | {
"resource": ""
} |
q262675 | sphere_analytical_gaussian_trim | validation | def sphere_analytical_gaussian_trim(dr, a, alpha=0.2765, cut=1.6):
"""
See sphere_analytical_gaussian_exact.
I trimmed to terms from the functional form that are essentially zero (1e-8)
for r0 > cut (~1.5), a fine approximation for these platonic anyway.
"""
m = np.abs(dr) <= cut
# only compute on the relevant scales
rr = dr[m]
t = -rr/(alpha*np.sqrt(2))
q = 0.5*(1 + erf(t)) - np.sqrt(0.5/np.pi)*(alpha/(rr+a+1e-10)) * np.exp(-t*t)
# fill in the grid, inside the interpolation and outside where values are constant
ans = 0*dr
ans[m] = q
ans[dr > cut] = 0
ans[dr < -cut] = 1
return ans | python | {
"resource": ""
} |
q262676 | PlatonicParticlesCollection._tile | validation | def _tile(self, n):
"""Get the update tile surrounding particle `n` """
pos = self._trans(self.pos[n])
return Tile(pos, pos).pad(self.support_pad) | python | {
"resource": ""
} |
q262677 | PlatonicParticlesCollection._i2p | validation | def _i2p(self, ind, coord):
""" Translate index info to parameter name """
return '-'.join([self.param_prefix, str(ind), coord]) | python | {
"resource": ""
} |
q262678 | PlatonicParticlesCollection.get_update_tile | validation | def get_update_tile(self, params, values):
""" Get the amount of support size required for a particular update."""
doglobal, particles = self._update_type(params)
if doglobal:
return self.shape.copy()
# 1) store the current parameters of interest
values0 = self.get_values(params)
# 2) calculate the current tileset
tiles0 = [self._tile(n) for n in particles]
# 3) update to newer parameters and calculate tileset
self.set_values(params, values)
tiles1 = [self._tile(n) for n in particles]
# 4) revert parameters & return union of all tiles
self.set_values(params, values0)
return Tile.boundingtile(tiles0 + tiles1) | python | {
"resource": ""
} |
q262679 | PlatonicParticlesCollection.update | validation | def update(self, params, values):
"""
Update the particles field given new parameter values
"""
#1. Figure out if we're going to do a global update, in which
# case we just draw from scratch.
global_update, particles = self._update_type(params)
# if we are doing a global update, everything must change, so
# starting fresh will be faster instead of add subtract
if global_update:
self.set_values(params, values)
self.initialize()
return
# otherwise, update individual particles. delete the current versions
# of the particles update the particles, and redraw them anew at the
# places given by (params, values)
oldargs = self._drawargs()
for n in particles:
self._draw_particle(self.pos[n], *listify(oldargs[n]), sign=-1)
self.set_values(params, values)
newargs = self._drawargs()
for n in particles:
self._draw_particle(self.pos[n], *listify(newargs[n]), sign=+1) | python | {
"resource": ""
} |
q262680 | PlatonicSpheresCollection.param_particle | validation | def param_particle(self, ind):
""" Get position and radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x', 'a']] | python | {
"resource": ""
} |
q262681 | PlatonicSpheresCollection.param_particle_pos | validation | def param_particle_pos(self, ind):
""" Get position of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, j) for i in ind for j in ['z', 'y', 'x']] | python | {
"resource": ""
} |
q262682 | PlatonicSpheresCollection.param_particle_rad | validation | def param_particle_rad(self, ind):
""" Get radius of one or more particles """
ind = self._vps(listify(ind))
return [self._i2p(i, 'a') for i in ind] | python | {
"resource": ""
} |
q262683 | PlatonicSpheresCollection.add_particle | validation | def add_particle(self, pos, rad):
"""
Add a particle or list of particles given by a list of positions and
radii, both need to be array-like.
Parameters
----------
pos : array-like [N, 3]
Positions of all new particles
rad : array-like [N]
Corresponding radii of new particles
Returns
-------
inds : N-element numpy.ndarray.
Indices of the added particles.
"""
rad = listify(rad)
# add some zero mass particles to the list (same as not having these
# particles in the image, which is true at this moment)
inds = np.arange(self.N, self.N+len(rad))
self.pos = np.vstack([self.pos, pos])
self.rad = np.hstack([self.rad, np.zeros(len(rad))])
# update the parameters globally
self.setup_variables()
self.trigger_parameter_change()
# now request a drawing of the particle plz
params = self.param_particle_rad(inds)
self.trigger_update(params, rad)
return inds | python | {
"resource": ""
} |
q262684 | PlatonicSpheresCollection._update_type | validation | def _update_type(self, params):
""" Returns dozscale and particle list of update """
dozscale = False
particles = []
for p in listify(params):
typ, ind = self._p2i(p)
particles.append(ind)
dozscale = dozscale or typ == 'zscale'
particles = set(particles)
return dozscale, particles | python | {
"resource": ""
} |
q262685 | PlatonicSpheresCollection._tile | validation | def _tile(self, n):
""" Get the tile surrounding particle `n` """
zsc = np.array([1.0/self.zscale, 1, 1])
pos, rad = self.pos[n], self.rad[n]
pos = self._trans(pos)
return Tile(pos - zsc*rad, pos + zsc*rad).pad(self.support_pad) | python | {
"resource": ""
} |
q262686 | PlatonicSpheresCollection.update | validation | def update(self, params, values):
"""Calls an update, but clips radii to be > 0"""
# radparams = self.param_radii()
params = listify(params)
values = listify(values)
for i, p in enumerate(params):
# if (p in radparams) & (values[i] < 0):
if (p[-2:] == '-a') and (values[i] < 0):
values[i] = 0.0
super(PlatonicSpheresCollection, self).update(params, values) | python | {
"resource": ""
} |
q262687 | Slab.rmatrix | validation | def rmatrix(self):
"""
Generate the composite rotation matrix that rotates the slab normal.
The rotation is a rotation about the x-axis, followed by a rotation
about the z-axis.
"""
t = self.param_dict[self.lbl_theta]
r0 = np.array([ [np.cos(t), -np.sin(t), 0],
[np.sin(t), np.cos(t), 0],
[0, 0, 1]])
p = self.param_dict[self.lbl_phi]
r1 = np.array([ [np.cos(p), 0, np.sin(p)],
[0, 1, 0],
[-np.sin(p), 0, np.cos(p)]])
return np.dot(r1, r0) | python | {
"resource": ""
} |
q262688 | j2 | validation | def j2(x):
""" A fast j2 defined in terms of other special functions """
to_return = 2./(x+1e-15)*j1(x) - j0(x)
to_return[x==0] = 0
return to_return | python | {
"resource": ""
} |
q262689 | calc_pts_hg | validation | def calc_pts_hg(npts=20):
"""Returns Hermite-Gauss quadrature points for even functions"""
pts_hg, wts_hg = np.polynomial.hermite.hermgauss(npts*2)
pts_hg = pts_hg[npts:]
wts_hg = wts_hg[npts:] * np.exp(pts_hg*pts_hg)
return pts_hg, wts_hg | python | {
"resource": ""
} |
q262690 | calc_pts_lag | validation | def calc_pts_lag(npts=20):
"""
Returns Gauss-Laguerre quadrature points rescaled for line scan integration
Parameters
----------
npts : {15, 20, 25}, optional
The number of points to
Notes
-----
The scale is set internally as the best rescaling for a line scan
integral; it was checked numerically for the allowed npts.
Acceptable pts/scls/approximate line integral scan error:
(pts, scl ) : ERR
------------------------------------
(15, 0.072144) : 0.002193
(20, 0.051532) : 0.001498
(25, 0.043266) : 0.001209
The previous HG(20) error was ~0.13ish
"""
scl = { 15:0.072144,
20:0.051532,
25:0.043266}[npts]
pts0, wts0 = np.polynomial.laguerre.laggauss(npts)
pts = np.sinh(pts0*scl)
wts = scl*wts0*np.cosh(pts0*scl)*np.exp(pts0)
return pts, wts | python | {
"resource": ""
} |
q262691 | f_theta | validation | def f_theta(cos_theta, zint, z, n2n1=0.95, sph6_ab=None, **kwargs):
"""
Returns the wavefront aberration for an aberrated, defocused lens.
Calculates the portions of the wavefront distortion due to z, theta
only, for a lens with defocus and spherical aberration induced by
coverslip mismatch. (The rho portion can be analytically integrated
to Bessels.)
Parameters
----------
cos_theta : numpy.ndarray.
The N values of cos(theta) at which to compute f_theta.
zint : Float
The position of the lens relative to the interface.
z : numpy.ndarray
The M z-values to compute f_theta at. `z.size` is unrelated
to `cos_theta.size`
n2n1: Float, optional
The ratio of the index of the immersed medium to the optics.
Default is 0.95
sph6_ab : Float or None, optional
Set sph6_ab to a nonzero value to add residual 6th-order
spherical aberration that is proportional to sph6_ab. Default
is None (i.e. doesn't calculate).
Returns
-------
wvfront : numpy.ndarray
The aberrated wavefront, as a function of theta and z.
Shape is [z.size, cos_theta.size]
"""
wvfront = (np.outer(np.ones_like(z)*zint, cos_theta) -
np.outer(zint+z, csqrt(n2n1**2-1+cos_theta**2)))
if (sph6_ab is not None) and (not np.isnan(sph6_ab)):
sec2_theta = 1.0/(cos_theta*cos_theta)
wvfront += sph6_ab * (sec2_theta-1)*(sec2_theta-2)*cos_theta
#Ensuring evanescent waves are always suppressed:
if wvfront.dtype == np.dtype('complex128'):
wvfront.imag = -np.abs(wvfront.imag)
return wvfront | python | {
"resource": ""
} |
q262692 | get_Kprefactor | validation | def get_Kprefactor(z, cos_theta, zint=100.0, n2n1=0.95, get_hdet=False,
**kwargs):
"""
Returns a prefactor in the electric field integral.
This is an internal function called by get_K. The returned prefactor
in the integrand is independent of which integral is being called;
it is a combination of the exp(1j*phase) and apodization.
Parameters
----------
z : numpy.ndarray
The values of z (distance along optical axis) at which to
calculate the prefactor. Size is unrelated to the size of
`cos_theta`
cos_theta : numpy.ndarray
The values of cos(theta) (i.e. position on the incoming
focal spherical wavefront) at which to calculate the
prefactor. Size is unrelated to the size of `z`
zint : Float, optional
The position of the optical interface, in units of 1/k.
Default is 100.
n2n1 : Float, optional
The ratio of the index mismatch between the optics (n1) and
the sample (n2). Default is 0.95
get_hdet : Bool, optional
Set to True to calculate the detection prefactor vs the
illumination prefactor (i.e. False to include apodization).
Default is False
Returns
-------
numpy.ndarray
The prefactor, of size [`z.size`, `cos_theta.size`], sampled
at the values [`z`, `cos_theta`]
"""
phase = f_theta(cos_theta, zint, z, n2n1=n2n1, **kwargs)
to_return = np.exp(-1j*phase)
if not get_hdet:
to_return *= np.outer(np.ones_like(z),np.sqrt(cos_theta))
return to_return | python | {
"resource": ""
} |
q262693 | get_K | validation | def get_K(rho, z, alpha=1.0, zint=100.0, n2n1=0.95, get_hdet=False, K=1,
Kprefactor=None, return_Kprefactor=False, npts=20, **kwargs):
"""
Calculates one of three electric field integrals.
Internal function for calculating point spread functions. Returns
one of three electric field integrals that describe the electric
field near the focus of a lens; these integrals appear in Hell's psf
calculation.
Parameters
----------
rho : numpy.ndarray
Rho in cylindrical coordinates, in units of 1/k.
z : numpy.ndarray
Z in cylindrical coordinates, in units of 1/k. `rho` and
`z` must be the same shape
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
get_hdet : Bool, optional
Set to True to get the detection portion of the psf; False
to get the illumination portion of the psf. Default is True
K : {1, 2, 3}, optional
Which of the 3 integrals to evaluate. Default is 1
Kprefactor : numpy.ndarray or None
This array is calculated internally and optionally returned;
pass it back to avoid recalculation and increase speed. Default
is None, i.e. calculate it internally.
return_Kprefactor : Bool, optional
Set to True to also return the Kprefactor (parameter above)
to speed up the calculation for the next values of K. Default
is False
npts : Int, optional
The number of points to use for Gauss-Legendre quadrature of
the integral. Default is 20, which is a good number for x,y,z
less than 100 or so.
Returns
-------
kint : numpy.ndarray
The integral K_i; rho.shape numpy.array
[, Kprefactor] : numpy.ndarray
The prefactor that is independent of which integral is being
calculated but does depend on the parameters; can be passed
back to the function for speed.
Notes
-----
npts=20 gives double precision (no difference between 20, 30, and
doing all the integrals with scipy.quad). The integrals are only
over the acceptance angle of the lens, so for moderate x,y,z they
don't vary too rapidly. For x,y,z, zint large compared to 100, a
higher npts might be necessary.
"""
# Comments:
# This is the only function that relies on rho,z being numpy.arrays,
# and it's just in a flag that I've added.... move to psf?
if type(rho) != np.ndarray or type(z) != np.ndarray or (rho.shape != z.shape):
raise ValueError('rho and z must be np.arrays of same shape.')
pts, wts = np.polynomial.legendre.leggauss(npts)
n1n2 = 1.0/n2n1
rr = np.ravel(rho)
zr = np.ravel(z)
#Getting the array of points to quad at
cos_theta = 0.5*(1-np.cos(alpha))*pts+0.5*(1+np.cos(alpha))
#[cos_theta,rho,z]
if Kprefactor is None:
Kprefactor = get_Kprefactor(z, cos_theta, zint=zint, \
n2n1=n2n1,get_hdet=get_hdet, **kwargs)
if K==1:
part_1 = j0(np.outer(rr,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr), 0.5*(get_taus(cos_theta,n2n1=n2n1)+\
get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2))))
integrand = Kprefactor * part_1
elif K==2:
part_2=j2(np.outer(rr,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr),0.5*(get_taus(cos_theta,n2n1=n2n1)-\
get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2))))
integrand = Kprefactor * part_2
elif K==3:
part_3=j1(np.outer(rho,np.sqrt(1-cos_theta**2)))*\
np.outer(np.ones_like(rr), n1n2*get_taup(cos_theta,n2n1=n2n1)*\
np.sqrt(1-cos_theta**2))
integrand = Kprefactor * part_3
else:
raise ValueError('K=1,2,3 only...')
big_wts=np.outer(np.ones_like(rr), wts)
kint = (big_wts*integrand).sum(axis=1) * 0.5*(1-np.cos(alpha))
if return_Kprefactor:
return kint.reshape(rho.shape), Kprefactor
else:
return kint.reshape(rho.shape) | python | {
"resource": ""
} |
q262694 | get_hsym_asym | validation | def get_hsym_asym(rho, z, get_hdet=False, include_K3_det=True, **kwargs):
"""
Calculates the symmetric and asymmetric portions of a confocal PSF.
Parameters
----------
rho : numpy.ndarray
Rho in cylindrical coordinates, in units of 1/k.
z : numpy.ndarray
Z in cylindrical coordinates, in units of 1/k. Must be the
same shape as `rho`
get_hdet : Bool, optional
Set to True to get the detection portion of the psf; False
to get the illumination portion of the psf. Default is True
include_K3_det : Bool, optional.
Flag to not calculate the `K3' component for the detection
PSF, corresponding to (I think) a low-aperature focusing
lens and no z-polarization of the focused light. Default
is True, i.e. calculates the K3 component as if the focusing
lens is high-aperture
Other Parameters
----------------
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
Returns
-------
hsym : numpy.ndarray
`rho`.shape numpy.array of the symmetric portion of the PSF
hasym : numpy.ndarray
`rho`.shape numpy.array of the asymmetric portion of the PSF
"""
K1, Kprefactor = get_K(rho, z, K=1, get_hdet=get_hdet, Kprefactor=None,
return_Kprefactor=True, **kwargs)
K2 = get_K(rho, z, K=2, get_hdet=get_hdet, Kprefactor=Kprefactor,
return_Kprefactor=False, **kwargs)
if get_hdet and not include_K3_det:
K3 = 0*K1
else:
K3 = get_K(rho, z, K=3, get_hdet=get_hdet, Kprefactor=Kprefactor,
return_Kprefactor=False, **kwargs)
hsym = K1*K1.conj() + K2*K2.conj() + 0.5*(K3*K3.conj())
hasym= K1*K2.conj() + K2*K1.conj() + 0.5*(K3*K3.conj())
return hsym.real, hasym.real | python | {
"resource": ""
} |
q262695 | get_polydisp_pts_wts | validation | def get_polydisp_pts_wts(kfki, sigkf, dist_type='gaussian', nkpts=3):
"""
Calculates a set of Gauss quadrature points & weights for polydisperse
light.
Returns a list of points and weights of the final wavevector's distri-
bution, in units of the initial wavevector.
Parameters
----------
kfki : Float
The mean of the polydisperse outgoing wavevectors.
sigkf : Float
The standard dev. of the polydisperse outgoing wavevectors.
dist_type : {`gaussian`, `gamma`}, optional
The distribution, gaussian or gamma, of the wavevectors.
Default is `gaussian`
nkpts : Int, optional
The number of quadrature points to use. Default is 3
Returns
-------
kfkipts : numpy.ndarray
The Gauss quadrature points at which to calculate kfki.
wts : numpy.ndarray
The associated Gauss quadrature weights.
"""
if dist_type.lower() == 'gaussian':
pts, wts = np.polynomial.hermite.hermgauss(nkpts)
kfkipts = np.abs(kfki + sigkf*np.sqrt(2)*pts)
elif dist_type.lower() == 'laguerre' or dist_type.lower() == 'gamma':
k_scale = sigkf**2/kfki
associated_order = kfki**2/sigkf**2 - 1
#Associated Laguerre with alpha >~170 becomes numerically unstable, so:
max_order=150
if associated_order > max_order or associated_order < (-1+1e-3):
warnings.warn('Numerically unstable sigk, clipping', RuntimeWarning)
associated_order = np.clip(associated_order, -1+1e-3, max_order)
kfkipts, wts = la_roots(nkpts, associated_order)
kfkipts *= k_scale
else:
raise ValueError('dist_type must be either gaussian or laguerre')
return kfkipts, wts/wts.sum() | python | {
"resource": ""
} |
q262696 | calculate_linescan_ilm_psf | validation | def calculate_linescan_ilm_psf(y,z, polar_angle=0., nlpts=1,
pinhole_width=1, use_laggauss=False, **kwargs):
"""
Calculates the illumination PSF for a line-scanning confocal with the
confocal line oriented along the x direction.
Parameters
----------
y : numpy.ndarray
The y points (in-plane, perpendicular to the line direction)
at which to evaluate the illumination PSF, in units of 1/k.
Arbitrary shape.
z : numpy.ndarray
The z points (optical axis) at which to evaluate the illum-
ination PSF, in units of 1/k. Must be the same shape as `y`
polar_angle : Float, optional
The angle of the illuminating light's polarization with
respect to the line's orientation along x. Default is 0.
pinhole_width : Float, optional
The width of the geometric image of the line projected onto
the sample, in units of 1/k. Default is 1. The perfect line
image is assumed to be a Gaussian. If `nlpts` is set to 1,
the line will always be of zero width.
nlpts : Int, optional
The number of points to use for Hermite-gauss quadrature over
the line's width. Default is 1, corresponding to a zero-width
line.
use_laggauss : Bool, optional
Set to True to use a more-accurate sinh'd Laguerre-Gauss
quadrature for integration over the line's length (more accurate
in the same amount of time). Default is False for backwards
compatibility. FIXME what did we do here?
Other Parameters
----------------
alpha : Float, optional
The acceptance angle of the lens, on (0,pi/2). Default is 1.
zint : Float, optional
The distance of the len's unaberrated focal point from the
optical interface, in units of 1/k. Default is 100.
n2n1 : Float, optional
The ratio n2/n1 of the index mismatch between the sample
(index n2) and the optical train (index n1). Must be on
[0,inf) but should be near 1. Default is 0.95
Returns
-------
hilm : numpy.ndarray
The line illumination, of the same shape as y and z.
"""
if use_laggauss:
x_vals, wts = calc_pts_lag()
else:
x_vals, wts = calc_pts_hg()
#I'm assuming that y,z are already some sort of meshgrid
xg, yg, zg = [np.zeros( list(y.shape) + [x_vals.size] ) for a in range(3)]
hilm = np.zeros(xg.shape)
for a in range(x_vals.size):
xg[...,a] = x_vals[a]
yg[...,a] = y.copy()
zg[...,a] = z.copy()
y_pinhole, wts_pinhole = np.polynomial.hermite.hermgauss(nlpts)
y_pinhole *= np.sqrt(2)*pinhole_width
wts_pinhole /= np.sqrt(np.pi)
#Pinhole hermgauss first:
for yp, wp in zip(y_pinhole, wts_pinhole):
rho = np.sqrt(xg*xg + (yg-yp)*(yg-yp))
phi = np.arctan2(yg,xg)
hsym, hasym = get_hsym_asym(rho,zg,get_hdet = False, **kwargs)
hilm += wp*(hsym + np.cos(2*(phi-polar_angle))*hasym)
#Now line hermgauss
for a in range(x_vals.size):
hilm[...,a] *= wts[a]
return hilm.sum(axis=-1)*2. | python | {
"resource": ""
} |
q262697 | calculate_linescan_psf | validation | def calculate_linescan_psf(x, y, z, normalize=False, kfki=0.889, zint=100.,
polar_angle=0., wrap=True, **kwargs):
"""
Calculates the point spread function of a line-scanning confocal.
Make x,y,z __1D__ numpy.arrays, with x the direction along the
scan line. (to make the calculation faster since I dont' need the line
ilm for each x).
Parameters
----------
x : numpy.ndarray
_One_dimensional_ array of the x grid points (along the line
illumination) at which to evaluate the psf. In units of
1/k_incoming.
y : numpy.ndarray
_One_dimensional_ array of the y grid points (in plane,
perpendicular to the line illumination) at which to evaluate
the psf. In units of 1/k_incoming.
z : numpy.ndarray
_One_dimensional_ array of the z grid points (along the
optical axis) at which to evaluate the psf. In units of
1/k_incoming.
normalize : Bool, optional
Set to True to include the effects of PSF normalization on
the image intensity. Default is False.
kfki : Float, optional
The ratio of the final light's wavevector to the incoming.
Default is 0.889
zint : Float, optional
The position of the optical interface, in units of 1/k_incoming
Default is 100.
wrap : Bool, optional
If True, wraps the psf calculation for speed, assuming that
the input x, y are regularly-spaced points. If x,y are not
regularly spaced then `wrap` must be set to False. Default is True.
polar_angle : Float, optional
The polarization angle of the light (radians) with respect to
the line direction (x). Default is 0.
Other Parameters
----------------
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
numpy.ndarray
A 3D- numpy.array of the point-spread function. Indexing is
psf[x,y,z]; shape is [x.size, y,size, z.size]
"""
#0. Set up vecs
if wrap:
xpts = vec_to_halfvec(x)
ypts = vec_to_halfvec(y)
x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='ij')
else:
x3,y3,z3 = np.meshgrid(x, y, z, indexing='ij')
rho3 = np.sqrt(x3*x3 + y3*y3)
#1. Hilm
if wrap:
y2,z2 = np.meshgrid(ypts, z, indexing='ij')
hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint,
polar_angle=polar_angle, **kwargs)
if ypts[0] == 0:
hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0)
else:
hilm = np.append(hilm0[::-1], hilm0, axis=0)
else:
y2,z2 = np.meshgrid(y, z, indexing='ij')
hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint,
polar_angle=polar_angle, **kwargs)
#2. Hdet
if wrap:
#Lambda function that ignores its args but still returns correct values
func = lambda *args: get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,
get_hdet=True, **kwargs)[0]
hdet = wrap_and_calc_psf(xpts, ypts, z, func)
else:
hdet, toss = get_hsym_asym(rho3*kfki, z3*kfki, zint=kfki*zint,
get_hdet=True, **kwargs)
if normalize:
hilm /= hilm.sum()
hdet /= hdet.sum()
for a in range(x.size):
hdet[a] *= hilm
return hdet if normalize else hdet / hdet.sum() | python | {
"resource": ""
} |
q262698 | calculate_polychrome_linescan_psf | validation | def calculate_polychrome_linescan_psf(x, y, z, normalize=False, kfki=0.889,
sigkf=0.1, zint=100., nkpts=3, dist_type='gaussian', wrap=True,
**kwargs):
"""
Calculates the point spread function of a line-scanning confocal with
polydisperse dye emission.
Make x,y,z __1D__ numpy.arrays, with x the direction along the
scan line. (to make the calculation faster since I dont' need the line
ilm for each x).
Parameters
----------
x : numpy.ndarray
_One_dimensional_ array of the x grid points (along the line
illumination) at which to evaluate the psf. In units of
1/k_incoming.
y : numpy.ndarray
_One_dimensional_ array of the y grid points (in plane,
perpendicular to the line illumination) at which to evaluate
the psf. In units of 1/k_incoming.
z : numpy.ndarray
_One_dimensional_ array of the z grid points (along the
optical axis) at which to evaluate the psf. In units of
1/k_incoming.
normalize : Bool, optional
Set to True to include the effects of PSF normalization on
the image intensity. Default is False.
kfki : Float, optional
The mean of the ratio of the final light's wavevector to the
incoming. Default is 0.889
sigkf : Float, optional
The standard deviation of the ratio of the final light's
wavevector to the incoming. Default is 0.1
zint : Float, optional
The position of the optical interface, in units of 1/k_incoming
Default is 100.
dist_type : {`gaussian`, `gamma`}, optional
The distribution of the outgoing light. If 'gaussian' the
resulting k-values are taken in absolute value. Default
is `gaussian`
wrap : Bool, optional
If True, wraps the psf calculation for speed, assuming that
the input x, y are regularly-spaced points. If x,y are not
regularly spaced then `wrap` must be set to False. Default is True.
Other Parameters
----------------
polar_angle : Float, optional
The polarization angle of the light (radians) with respect to
the line direction (x). Default is 0.
alpha : Float
The opening angle of the lens. Default is 1.
n2n1 : Float
The ratio of the index in the 2nd medium to that in the first.
Default is 0.95
Returns
-------
numpy.ndarray
A 3D- numpy.array of the point-spread function. Indexing is
psf[x,y,z]; shape is [x.size, y,size, z.size]
Notes
-----
Neither distribution type is perfect. If sigkf/k0 is big (>0.5ish)
then part of the Gaussian is negative. To avoid issues an abs() is
taken, but then the actual mean and variance are not what is
supplied. Conversely, if sigkf/k0 is small (<0.0815), then the
requisite associated Laguerre quadrature becomes unstable. To
prevent this sigkf/k0 is effectively clipped to be > 0.0815.
"""
kfkipts, wts = get_polydisp_pts_wts(kfki, sigkf, dist_type=dist_type,
nkpts=nkpts)
#0. Set up vecs
if wrap:
xpts = vec_to_halfvec(x)
ypts = vec_to_halfvec(y)
x3, y3, z3 = np.meshgrid(xpts, ypts, z, indexing='ij')
else:
x3,y3,z3 = np.meshgrid(x, y, z, indexing='ij')
rho3 = np.sqrt(x3*x3 + y3*y3)
#1. Hilm
if wrap:
y2,z2 = np.meshgrid(ypts, z, indexing='ij')
hilm0 = calculate_linescan_ilm_psf(y2, z2, zint=zint, **kwargs)
if ypts[0] == 0:
hilm = np.append(hilm0[-1:0:-1], hilm0, axis=0)
else:
hilm = np.append(hilm0[::-1], hilm0, axis=0)
else:
y2,z2 = np.meshgrid(y, z, indexing='ij')
hilm = calculate_linescan_ilm_psf(y2, z2, zint=zint, **kwargs)
#2. Hdet
if wrap:
#Lambda function that ignores its args but still returns correct values
func = lambda x,y,z, kfki=1.: get_hsym_asym(rho3*kfki, z3*kfki,
zint=kfki*zint, get_hdet=True, **kwargs)[0]
hdet_func = lambda kfki: wrap_and_calc_psf(xpts,ypts,z, func, kfki=kfki)
else:
hdet_func = lambda kfki: get_hsym_asym(rho3*kfki, z3*kfki,
zint=kfki*zint, get_hdet=True, **kwargs)[0]
#####
inner = [wts[a] * hdet_func(kfkipts[a]) for a in range(nkpts)]
hdet = np.sum(inner, axis=0)
if normalize:
hilm /= hilm.sum()
hdet /= hdet.sum()
for a in range(x.size):
hdet[a] *= hilm
return hdet if normalize else hdet / hdet.sum() | python | {
"resource": ""
} |
q262699 | wrap_and_calc_psf | validation | def wrap_and_calc_psf(xpts, ypts, zpts, func, **kwargs):
"""
Wraps a point-spread function in x and y.
Speeds up psf calculations by a factor of 4 for free / some broadcasting
by exploiting the x->-x, y->-y symmetry of a psf function. Pass x and y
as the positive (say) values of the coordinates at which to evaluate func,
and it will return the function sampled at [x[::-1]] + x. Note it is not
wrapped in z.
Parameters
----------
xpts : numpy.ndarray
1D N-element numpy.array of the x-points to evaluate func at.
ypts : numpy.ndarray
y-points to evaluate func at.
zpts : numpy.ndarray
z-points to evaluate func at.
func : function
The function to evaluate and wrap around. Syntax must be
func(x,y,z, **kwargs)
**kwargs : Any parameters passed to the function.
Outputs
-------
to_return : numpy.ndarray
The wrapped and calculated psf, of shape
[2*x.size - x0, 2*y.size - y0, z.size], where x0=1 if x[0]=0, etc.
Notes
-----
The coordinates should be something like numpy.arange(start, stop, diff),
with start near 0. If x[0]==0, all of x is calcualted but only x[1:]
is wrapped (i.e. it works whether or not x[0]=0).
This doesn't work directly for a linescan psf because the illumination
portion is not like a grid. However, the illumination and detection
are already combined with wrap_and_calc in calculate_linescan_psf etc.
"""
#1. Checking that everything is hunky-dory:
for t in [xpts,ypts,zpts]:
if len(t.shape) != 1:
raise ValueError('xpts,ypts,zpts must be 1D.')
dx = 1 if xpts[0]==0 else 0
dy = 1 if ypts[0]==0 else 0
xg,yg,zg = np.meshgrid(xpts,ypts,zpts, indexing='ij')
xs, ys, zs = [ pts.size for pts in [xpts,ypts,zpts] ]
to_return = np.zeros([2*xs-dx, 2*ys-dy, zs])
#2. Calculate:
up_corner_psf = func(xg,yg,zg, **kwargs)
to_return[xs-dx:,ys-dy:,:] = up_corner_psf.copy() #x>0, y>0
if dx == 0:
to_return[:xs-dx,ys-dy:,:] = up_corner_psf[::-1,:,:].copy() #x<0, y>0
else:
to_return[:xs-dx,ys-dy:,:] = up_corner_psf[-1:0:-1,:,:].copy() #x<0, y>0
if dy == 0:
to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,::-1,:].copy() #x>0, y<0
else:
to_return[xs-dx:,:ys-dy,:] = up_corner_psf[:,-1:0:-1,:].copy() #x>0, y<0
if (dx == 0) and (dy == 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-1,::-1,:].copy() #x<0,y<0
elif (dx == 0) and (dy != 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[::-1,-1:0:-1,:].copy() #x<0,y<0
elif (dy == 0) and (dx != 0):
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-1:0:-1,::-1,:].copy() #x<0,y<0
else: #dx==1 and dy==1
to_return[:xs-dx,:ys-dy,:] = up_corner_psf[-1:0:-1,-1:0:-1,:].copy()#x<0,y<0
return to_return | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.