_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q262700 | listify | validation | def listify(a):
"""
Convert a scalar ``a`` to a list and all iterables to list as well.
Examples
--------
>>> listify(0)
[0]
>>> listify([1,2,3])
[1, 2, 3]
>>> listify('a')
['a']
>>> listify(np.array([1,2,3]))
[1, 2, 3]
>>> listify('string')
| python | {
"resource": ""
} |
q262701 | delistify | validation | def delistify(a, b=None):
"""
If a single element list, extract the element as an object, otherwise
leave as it is.
Examples
--------
>>> delistify('string')
'string'
>>> delistify(['string'])
'string'
>>> delistify(['string', 'other'])
['string', 'other']
>>> delistify(np.array([1.0]))
1.0
>>> delistify([1, 2, 3])
[1, 2, 3]
| python | {
"resource": ""
} |
q262702 | aN | validation | def aN(a, dim=3, dtype='int'):
"""
Convert an integer or iterable list to numpy array of length dim. This func
is used to allow other methods to take both scalars non-numpy arrays with
flexibility.
Parameters
----------
a : number, iterable, array-like
The object to convert to numpy array
dim : integer
The length of the resulting array
dtype : string or np.dtype
Type which the resulting array should be, e.g. 'float', np.int8
Returns
-------
arr : numpy array
Resulting numpy array of length ``dim`` and type ``dtype``
Examples
| python | {
"resource": ""
} |
q262703 | patch_docs | validation | def patch_docs(subclass, superclass):
"""
Apply the documentation from ``superclass`` to ``subclass`` by filling
in all overridden member function docstrings with those from the
parent class
"""
funcs0 = inspect.getmembers(subclass, predicate=inspect.ismethod)
funcs1 = inspect.getmembers(superclass, predicate=inspect.ismethod)
funcs1 = [f[0] for f in funcs1]
for name, func in funcs0:
| python | {
"resource": ""
} |
q262704 | Tile.slicer | validation | def slicer(self):
"""
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
| python | {
"resource": ""
} |
q262705 | Tile.oslicer | validation | def oslicer(self, tile):
""" Opposite slicer, the outer part wrt to a field """
mask = None
vecs = tile.coords(form='meshed')
for v in vecs:
v[self.slicer] = -1
| python | {
"resource": ""
} |
q262706 | Tile.corners | validation | def corners(self):
"""
Iterate the vector of all corners of the hyperrectangles
>>> Tile(3, dim=2).corners
array([[0, 0],
[0, 3],
| python | {
"resource": ""
} |
q262707 | Tile._format_vector | validation | def _format_vector(self, vecs, form='broadcast'):
"""
Format a 3d vector field in certain ways, see `coords` for a description
of each formatting method.
"""
if form == 'meshed':
return np.meshgrid(*vecs, indexing='ij')
elif form == 'vector':
vecs = np.meshgrid(*vecs, indexing='ij')
| python | {
"resource": ""
} |
q262708 | Tile.coords | validation | def coords(self, norm=False, form='broadcast'):
"""
Returns the coordinate vectors associated with the tile.
Parameters
-----------
norm : boolean
can rescale the coordinates for you. False is no rescaling, True is
rescaling so that all coordinates are from 0 -> 1. If a scalar,
the same norm is applied uniformally while if an iterable, each
scale is applied to each dimension.
form : string
In what form to return the vector array. Can be one of:
'broadcast' -- return 1D arrays that are broadcasted to be 3D
'flat' -- return array without broadcasting so each component
is 1D and the appropriate length as the tile
'meshed' -- arrays are explicitly broadcasted and so all have
a 3D shape, each the size of the tile.
'vector' -- array is meshed and combined into one array with
the vector components along last dimension [Nz, Ny, Nx, 3]
Examples
--------
>>> | python | {
"resource": ""
} |
q262709 | Tile.kvectors | validation | def kvectors(self, norm=False, form='broadcast', real=False, shift=False):
"""
Return the kvectors associated with this tile, given the standard form
of -0.5 to 0.5. `norm` and `form` arguments arethe same as that passed to
`Tile.coords`.
Parameters
-----------
real : boolean
whether to return kvectors associated with the real fft instead
"""
if norm is False:
norm = 1
if norm is True:
| python | {
"resource": ""
} |
q262710 | Tile.contains | validation | def contains(self, items, pad=0):
"""
Test whether coordinates are contained within this tile.
Parameters
----------
items : ndarray [3] or [N, 3]
N coordinates to check are within the bounds of the tile
pad : integer or ndarray [3]
| python | {
"resource": ""
} |
q262711 | Tile.intersection | validation | def intersection(tiles, *args):
"""
Intersection of tiles, returned as a tile
>>> Tile.intersection(Tile([0, 1], [5, 4]), Tile([1, 0], [4, 5]))
Tile [1, 1] -> [4, 4] ([3, 3])
| python | {
"resource": ""
} |
q262712 | Tile.translate | validation | def translate(self, dr):
"""
Translate a tile by an amount dr
>>> Tile(5).translate(1)
Tile [1, 1, 1] -> [6, 6, 6] ([5, 5, 5])
"""
| python | {
"resource": ""
} |
q262713 | Tile.pad | validation | def pad(self, pad):
"""
Pad this tile by an equal amount on each side as specified by pad
>>> Tile(10).pad(2)
Tile [-2, -2, -2] -> | python | {
"resource": ""
} |
q262714 | Image.filtered_image | validation | def filtered_image(self, im):
"""Returns a filtered image after applying the Fourier-space filters"""
q = np.fft.fftn(im)
| python | {
"resource": ""
} |
q262715 | Image.set_filter | validation | def set_filter(self, slices, values):
"""
Sets Fourier-space filters for the image. The image is filtered by
subtracting values from the image at slices.
Parameters
----------
slices : List of indices or slice objects.
The q-values in Fourier space to filter.
values : np.ndarray
The complete array of Fourier space peaks to subtract off. values
should be the same size as the FFT of the image; only the portions
of values at slices will be removed.
Examples
| python | {
"resource": ""
} |
q262716 | RawImage.load_image | validation | def load_image(self):
""" Read the file and perform any transforms to get a loaded image """
try:
image = initializers.load_tiff(self.filename)
image = initializers.normalize(
image, invert=self.invert, scale=self.exposure,
| python | {
"resource": ""
} |
q262717 | RawImage.get_scale_from_raw | validation | def get_scale_from_raw(raw, scaled):
"""
When given a raw image and the scaled version of the same image, it
extracts the ``exposure`` parameters associated with those images.
This is useful when
Parameters
----------
raw : array_like
The image loaded fresh from a file
scaled : array_like
Image scaled using :func:`peri.initializers.normalize`
Returns
-------
exposure : tuple of numbers
Returns the exposure parameters (emin, emax) which get mapped to
| python | {
"resource": ""
} |
q262718 | ProgressBar._draw | validation | def _draw(self):
""" Interal draw method, simply prints to screen """
if self.display:
| python | {
"resource": ""
} |
q262719 | ProgressBar.update | validation | def update(self, value=0):
"""
Update the value of the progress and update progress bar.
Parameters
-----------
value : integer
The current iteration of the progress
"""
self._deltas.append(time.time())
self.value = value
self._percent = 100.0 * self.value / self.num
| python | {
"resource": ""
} |
q262720 | Model.check_consistency | validation | def check_consistency(self):
"""
Make sure that the required comps are included in the list of
components supplied by the user. Also check that the parameters are
consistent across the many components.
"""
error = False
regex = re.compile('([a-zA-Z_][a-zA-Z0-9_]*)')
# there at least must be the full model, not necessarily partial updates
if 'full' not in self.modelstr:
raise ModelError(
'Model must contain a `full` key describing '
'the entire image formation'
)
# Check that the two model descriptors are consistent
for name, eq in iteritems(self.modelstr):
var = regex.findall(eq)
for v in var:
# remove the derivative signs if there (dP -> P)
v = | python | {
"resource": ""
} |
q262721 | Model.check_inputs | validation | def check_inputs(self, comps):
"""
Check that the list of components `comp` is compatible with both the
varmap and modelstr for this Model
"""
error = False
compcats = [c.category for c in comps]
# Check that the components are all provided, given the categories
for k, v in iteritems(self.varmap):
if k not in self.modelstr['full']:
log.warn('Component (%s : %s) not used in model.' % (k,v))
if | python | {
"resource": ""
} |
q262722 | lbl | validation | def lbl(axis, label, size=22):
""" Put a figure label in an axis """
at = AnchoredText(label, loc=2, prop=dict(size=size), frameon=True)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.0")
#bb = axis.get_yaxis_transform()
#at = AnchoredText(label,
| python | {
"resource": ""
} |
q262723 | examine_unexplained_noise | validation | def examine_unexplained_noise(state, bins=1000, xlim=(-10,10)):
"""
Compares a state's residuals in real and Fourier space with a Gaussian.
Point out that Fourier space should always be Gaussian and white
Parameters
----------
state : `peri.states.State`
The state to examine.
bins : int or sequence of scalars or str, optional
The number of bins in the histogram, as passed to numpy.histogram
Default is 1000
xlim : 2-element tuple, optional
The range, in sigma, of the x-axis on the plot. Default (-10,10).
Returns
-------
list
The axes handles for the real and Fourier space subplots.
"""
r = state.residuals
q = np.fft.fftn(r)
#Get the expected values of `sigma`:
calc_sig = lambda x: np.sqrt(np.dot(x,x) / x.size)
rh, xr = np.histogram(r.ravel() / calc_sig(r.ravel()), bins=bins,
density=True)
bigq = np.append(q.real.ravel(), q.imag.ravel())
| python | {
"resource": ""
} |
q262724 | compare_data_model_residuals | validation | def compare_data_model_residuals(s, tile, data_vmin='calc', data_vmax='calc',
res_vmin=-0.1, res_vmax=0.1, edgepts='calc', do_imshow=True,
data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu):
"""
Compare the data, model, and residuals of a state.
Makes an image of any 2D slice of a state that compares the data,
model, and residuals. The upper left portion of the image is the raw
data, the central portion the model, and the lower right portion the
image. Either plots the image using plt.imshow() or returns a
np.ndarray of the image pixels for later use.
Parameters
----------
st : peri.ImageState object
The state to plot.
tile : peri.util.Tile object
The slice of the image to plot. Can be any xy, xz, or yz
projection, but it must return a valid 2D slice (the slice is
squeezed internally).
data_vmin : {Float, `calc`}, optional
vmin for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.min() + model.min())
data_vmax : {Float, `calc`}, optional
vmax for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.max() + model.max())
res_vmin : Float, optional
vmin for the imshow for the residuals. Default is -0.1
Default is 'calc' = 0.5(data.min() + model.min())
res_vmax : Float, optional
vmax for the imshow for the residuals. Default is +0.1
edgepts : {Nested list-like, Float, 'calc'}, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
do_imshow : Bool
If True, imshow's and returns the returned handle.
If False, returns the array as a [M,N,4] array.
data_cmap : matplotlib colormap instance
| python | {
"resource": ""
} |
q262725 | trisect_image | validation | def trisect_image(imshape, edgepts='calc'):
"""
Returns 3 masks that trisect an image into 3 triangular portions.
Parameters
----------
imshape : 2-element list-like of ints
The shape of the image. Elements after the first 2 are ignored.
edgepts : Nested list-like, float, or `calc`, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
Returns
-------
upper_mask : numpy.ndarray
Boolean array; True in the image's upper region.
center_mask : numpy.ndarray
Boolean array; True in the image's center region.
lower_mask : numpy.ndarray
Boolean array; True in the image's lower region.
"""
im_x, im_y = np.meshgrid(np.arange(imshape[0]), | python | {
"resource": ""
} |
q262726 | sim_crb_diff | validation | def sim_crb_diff(std0, std1, N=10000):
""" each element of std0 should correspond with the element of std1 """
| python | {
"resource": ""
} |
q262727 | twoslice | validation | def twoslice(field, center=None, size=6.0, cmap='bone_r', vmin=0, vmax=1,
orientation='vertical', figpad=1.09, off=0.01):
"""
Plot two parts of the ortho view, the two sections given by ``orientation``.
"""
center = center or [i//2 for i in field.shape]
slices = []
for i,c in enumerate(center):
blank = [np.s_[:]]*len(center)
blank[i] = c
slices.append(tuple(blank))
z,y,x = [float(i) for i in field.shape]
w = float(x + z)
h = float(y + z)
def show(field, ax, slicer, transpose=False):
tmp = field[slicer] if not transpose else field[slicer].T
ax.imshow(
tmp, cmap=cmap, interpolation='nearest',
vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
ax.grid('off')
if orientation.startswith('v'):
# rect = l,b,w,h
log.info('{} {} {} {} {} {}'.format(x, y, z, w, h, x/h))
r = x/h
q = y/h
f = 1 / (1 + 3*off)
| python | {
"resource": ""
} |
q262728 | circles | validation | def circles(st, layer, axis, ax=None, talpha=1.0, cedge='white', cface='white'):
"""
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
"""
pos = st.obj_get_positions()
rad = st.obj_get_radii()
shape = st.ishape.shape.tolist()
shape.pop(axis) #shape is now the shape of the image
if | python | {
"resource": ""
} |
q262729 | missing_particle | validation | def missing_particle(separation=0.0, radius=RADIUS, SNR=20):
""" create a two particle state and compare it to featuring using a single particle guess """
# create a base image of one particle
s = init.create_two_particle_state(imsize=6*radius+4, axis='x', sigma=1.0/SNR,
| python | {
"resource": ""
} |
q262730 | name_globals | validation | def name_globals(s, remove_params=None):
"""
Returns a list of the global parameter names.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to name the globals of.
remove_params : Set or None
A set of unique additional parameters to remove from the globals
list.
Returns
-------
all_params : list
The list of the global parameter names, with each of
| python | {
"resource": ""
} |
q262731 | get_num_px_jtj | validation | def get_num_px_jtj(s, nparams, decimate=1, max_mem=1e9, min_redundant=20):
"""
Calculates the number of pixels to use for J at a given memory usage.
Tries to pick a number of pixels as (size of image / `decimate`).
However, clips this to a maximum size and minimum size to ensure that
(1) too much memory isn't used and (2) J has enough elements so that
the inverse of JTJ will be well-conditioned.
Parameters
----------
s : :class:`peri.states.State`
The state on which to calculate J.
nparams : Int
The number of parameters that will be included in J.
decimate : Int, optional
The amount to decimate the number of pixels in the image by,
i.e. tries to pick num_px = size of image / decimate.
Default is 1
max_mem : Numeric, optional
The maximum allowed memory, in bytes, for J to occupy at
double-precision. Default is 1e9.
min_redundant : Int, optional
The number of pixels must be at least `min_redundant` *
| python | {
"resource": ""
} |
q262732 | vectorize_damping | validation | def vectorize_damping(params, damping=1.0, increase_list=[['psf-', 1e4]]):
"""
Returns a non-constant damping vector, allowing certain parameters to be
more strongly damped than others.
Parameters
----------
params : List
The list of parameter names, in order.
damping : Float
The default value of the damping.
increase_list: List
A nested 2-element list of the params to increase and their
scale factors. All parameters containing the string
increase_list[i][0] are increased by a factor increase_list[i][1].
| python | {
"resource": ""
} |
q262733 | find_particles_in_tile | validation | def find_particles_in_tile(positions, tile):
"""
Finds the particles in a tile, as numpy.ndarray of ints.
Parameters
----------
positions : `numpy.ndarray`
[N,3] array of the particle positions to check in the tile
tile : :class:`peri.util.Tile` instance
Tile of the region inside which to | python | {
"resource": ""
} |
q262734 | separate_particles_into_groups | validation | def separate_particles_into_groups(s, region_size=40, bounds=None,
doshift=False):
"""
Separates particles into convenient groups for optimization.
Given a state, returns a list of groups of particles. Each group of
particles are located near each other in the image. Every particle
located in the desired region is contained in exactly 1 group.
Parameters
----------
s : :class:`peri.states.ImageState`
The peri state to find particles in.
region_size : Int or 3-element list-like of ints, optional
The size of the box. Groups particles into boxes of shape
(region_size[0], region_size[1], region_size[2]). If region_size
is a scalar, the box is a cube of length region_size.
Default is 40.
bounds : 2-element list-like of 3-element lists, optional
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
doshift : {True, False, `'rand'`}, optional
Whether or not to shift the tile boxes by half a region size, to
prevent the same particles to be chosen every time. If `'rand'`,
randomly chooses either True or False. Default is False
Returns
-------
particle_groups : List
Each element of particle_groups is an int numpy.ndarray of the
group of nearby particles. Only contains groups with a nonzero
number of particles, so the elements don't necessarily correspond
to a given image region.
"""
imtile = s.oshape.translate(-s.pad)
| python | {
"resource": ""
} |
q262735 | _check_groups | validation | def _check_groups(s, groups):
"""Ensures that all particles are included in exactly 1 group"""
ans = []
for g in groups:
ans.extend(g)
if np.unique(ans).size != np.size(ans):
return False | python | {
"resource": ""
} |
q262736 | calc_particle_group_region_size | validation | def calc_particle_group_region_size(s, region_size=40, max_mem=1e9, **kwargs):
"""
Finds the biggest region size for LM particle optimization with a
given memory constraint.
Input Parameters
----------------
s : :class:`peri.states.ImageState`
The state with the particles
region_size : Int or 3-element list-like of ints, optional.
The initial guess for the region size. Default is 40
max_mem : Numeric, optional
The maximum memory for the optimizer to take. Default is 1e9
Other Parameters
----------------
bounds: 2-element list-like of 3-element lists.
The sub-region of the image over which to look for particles.
bounds[0]: The lower-left corner of the image region.
bounds[1]: The upper-right corner of the image region.
Default (None -> ([0,0,0], s.oshape.shape)) is a box of the entire
image size, i.e. the default places every particle in the image
somewhere in the groups.
Returns
-------
region_size : numpy.ndarray of ints of the region size.
"""
region_size = np.array(region_size).astype('int')
def calc_mem_usage(region_size):
rs = np.array(region_size)
particle_groups = separate_particles_into_groups(s, region_size=
rs.tolist(), **kwargs)
# The actual mem usage is the max of the memory usage of all the
# particle groups. However this is too slow. So instead we use | python | {
"resource": ""
} |
q262737 | get_residuals_update_tile | validation | def get_residuals_update_tile(st, padded_tile):
"""
Translates a tile in the padded image to the unpadded image.
Given a state and a tile that corresponds to the padded image, returns
a tile that corresponds to the the corresponding pixels of the difference
image
Parameters
----------
st : :class:`peri.states.State`
The state
padded_tile : :class:`peri.util.Tile`
The tile in the padded image.
Returns
| python | {
"resource": ""
} |
q262738 | find_best_step | validation | def find_best_step(err_vals):
"""
Returns the index of the lowest of the passed values. Catches nans etc.
"""
if np.all(np.isnan(err_vals)):
| python | {
"resource": ""
} |
q262739 | do_levmarq | validation | def do_levmarq(s, param_names, damping=0.1, decrease_damp_factor=10.,
run_length=6, eig_update=True, collect_stats=False, rz_order=0,
run_type=2, **kwargs):
"""
Runs Levenberg-Marquardt optimization on a state.
Convenience wrapper for LMGlobals. Same keyword args, but the defaults
have been set to useful values for optimizing globals.
See LMGlobals and LMEngine for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
LMGlobals : Optimizer object; the workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
if rz_order > 0:
| python | {
"resource": ""
} |
q262740 | do_levmarq_particles | validation | def do_levmarq_particles(s, particles, damping=1.0, decrease_damp_factor=10.,
run_length=4, collect_stats=False, max_iter=2, **kwargs):
"""
Levenberg-Marquardt optimization on a set of particles.
Convenience wrapper for LMParticles. Same keyword args, but the
defaults have been set to useful values for optimizing particles.
See LMParticles and LMEngine for documentation.
See Also
--------
do_levmarq_all_particle_groups : Levenberg-Marquardt optimization
of all the particles in the state.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticles : | python | {
"resource": ""
} |
q262741 | do_levmarq_all_particle_groups | validation | def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
"""
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for | python | {
"resource": ""
} |
q262742 | do_levmarq_n_directions | validation | def do_levmarq_n_directions(s, directions, max_iter=2, run_length=2,
damping=1e-3, collect_stats=False, marquardt_damping=True, **kwargs):
"""
Optimization of a state along a specific set of directions in parameter
space.
Parameters
----------
s : :class:`peri.states.State`
The state to optimize
directions : np.ndarray
[n,d] element numpy.ndarray of the n directions in the d-
dimensional space to optimize along. `directions` is trans-
formed to a unit vector internally
Other Parameters
----------------
Any parameters passed to LMEngine.
"""
# normal = direction / np.sqrt(np.dot(direction, direction))
normals = np.array([d/np.sqrt(np.dot(d,d)) for | python | {
"resource": ""
} |
q262743 | finish | validation | def finish(s, desc='finish', n_loop=4, max_mem=1e9, separate_psf=True,
fractol=1e-7, errtol=1e-3, dowarn=True):
"""
Crawls slowly to the minimum-cost state.
Blocks the global parameters into small enough sections such that each
can be optimized separately while including all the pixels (i.e. no
decimation). Optimizes the globals, then the psf separately if desired,
then particles, then a line minimization along the step direction to
speed up convergence.
Parameters
----------
s : :class:`peri.states.ImageState`
The state to optimize
desc : string, optional
Description to append to the states.save() call every loop.
Set to `None` to avoid saving. Default is `'finish'`.
n_loop : Int, optional
The number of times to loop over in the optimizer. Default is 4.
max_mem : Numeric, optional
The maximum amount of memory allowed for the optimizers' J's,
for both particles & globals. Default is 1e9.
separate_psf : Bool, optional
If True, does the psf optimization separately from the rest of
the globals, since the psf has a more tortuous fit landscape.
Default is True.
fractol : Float, optional
Fractional change in error at which to terminate. Default 1e-4
errtol : Float, optional
Absolute change in error at which to terminate. Default 1e-2
dowarn : Bool, optional
Whether to log a warning if termination results from finishing
loops rather than from convergence. Default is True.
Returns
-------
dictionary
Information about the optimization. Has two keys: ``'converged'``,
a Bool which of whether optimization stopped due to convergence
(True) or due to max number of iterations (False), and
``'loop_values'``, a [n_loop+1, N] ``numpy.ndarray`` of the
state's values, at the start of optimization and at the end of
each loop, before the line minimization.
"""
values = [np.copy(s.state[s.params])]
remove_params = s.get('psf').params if separate_psf else None
# FIXME explicit params
global_params = name_globals(s, remove_params=remove_params)
#FIXME this could be done much better, since much of the globals such
#as the ilm are local. Could be done with sparse matrices and/or taking
#nearby globals in a group and using the update tile only as the slicer,
#rather than the full residuals.
gs = np.floor(max_mem / s.residuals.nbytes).astype('int')
groups = [global_params[a:a+gs] for a in range(0, len(global_params), gs)]
CLOG.info('Start ``finish``:\t{}'.format(s.error))
for a in range(n_loop):
start_err = s.error
#1. Min globals:
| python | {
"resource": ""
} |
q262744 | fit_comp | validation | def fit_comp(new_comp, old_comp, **kwargs):
"""
Fits a new component to an old component
Calls do_levmarq to match the .get() fields of the two objects. The
parameters of new_comp are modified in place.
Parameters
----------
new_comp : :class:`peri.comps.comp`
The new object, whose parameters to update to fit the field of
`old_comp`. Must have a .get() attribute which returns an ndarray
old_comp : peri.comp
The old ilm to match to.
Other Parameters
----------------
Any keyword arguments to be passed to the optimizer LMGlobals
through do_levmarq.
See Also
-------- | python | {
"resource": ""
} |
q262745 | LMEngine.reset | validation | def reset(self, new_damping=None):
"""
Keeps all user supplied options the same, but resets counters etc.
"""
self._num_iter = 0
self._inner_run_counter = 0 | python | {
"resource": ""
} |
q262746 | LMEngine.do_run_1 | validation | def do_run_1(self):
"""
LM run, evaluating 1 step at a time.
Broyden or eigendirection updates replace full-J updates until
a full-J update occurs. Does not run with the calculated J (no
internal run).
"""
| python | {
"resource": ""
} |
q262747 | LMEngine._run1 | validation | def _run1(self):
"""workhorse for do_run_1"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#1. Assuming that J starts updated:
delta_vals = self.find_LM_updates(self.calc_grad())
#2. Increase damping until we get a good step:
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1)
if not good_step:
er0 = self.update_function(self.param_vals)
if np.abs(er0 -self.error)/er0 > 1e-7:
raise RuntimeError('Function updates are not exact.')
CLOG.debug('Bad step, increasing damping')
CLOG.debug('\t\t%f\t%f' % (self.error, er1))
grad = self.calc_grad()
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er1 = self.update_function(self.param_vals + delta_vals)
good_step = (find_best_step([self.error, er1]) == 1) | python | {
"resource": ""
} |
q262748 | LMEngine._run2 | validation | def _run2(self):
"""Workhorse for do_run_2"""
if self.check_update_J():
self.update_J()
else:
if self.check_Broyden_J():
self.update_Broyden_J()
if self.check_update_eig_J():
self.update_eig_J()
#0. Find _last_residuals, _last_error, etc:
_last_residuals = self.calc_residuals().copy()
_last_error = 1*self.error
_last_vals = self.param_vals.copy()
#1. Calculate 2 possible steps
delta_params_1 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping()
delta_params_2 = self.find_LM_updates(self.calc_grad(),
do_correct_damping=False)
self.decrease_damping(undo_decrease=True)
#2. Check which step is best:
er1 = self.update_function(self.param_vals + delta_params_1)
er2 = self.update_function(self.param_vals + delta_params_2)
triplet = (self.error, er1, er2)
best_step = find_best_step(triplet)
if best_step == 0:
#Both bad steps, put back & increase damping:
_ = self.update_function(self.param_vals.copy())
grad = self.calc_grad()
CLOG.debug('Bad step, increasing damping')
CLOG.debug('%f\t%f\t%f' % triplet)
for _try in range(self._max_inner_loop):
self.increase_damping()
delta_vals = self.find_LM_updates(grad)
er_new = self.update_function(self.param_vals + delta_vals)
good_step = er_new < self.error
if good_step:
#Update params, error, break:
self.update_param_vals(delta_vals, incremental=True)
self.error = er_new
CLOG.debug('Sufficiently increased damping')
CLOG.debug('%f\t%f' % (triplet[0], self.error))
break
else: #for-break-else
#Throw a warning, put back the parameters
| python | {
"resource": ""
} |
q262749 | LMEngine.do_internal_run | validation | def do_internal_run(self, initial_count=0, subblock=None, update_derr=True):
"""
Takes more steps without calculating J again.
Given a fixed damping, J, JTJ, iterates calculating steps, with
optional Broyden or eigendirection updates. Iterates either until
a bad step is taken or for self.run_length times.
Called internally by do_run_2() but is also useful on its own.
Parameters
----------
initial_count : Int, optional
The initial count of the run. Default is 0. Increasing from
0 effectively temporarily decreases run_length.
subblock : None or np.ndarray of bools, optional
If not None, a boolean mask which determines which sub-
block of parameters to run over. Default is None, i.e.
all the parameters.
update_derr : Bool, optional
Set to False to not update the variable that determines
delta_err, preventing premature termination through errtol.
Notes
-----
It might be good to do something similar to update_derr with the
parameter values, but this is trickier | python | {
"resource": ""
} |
q262750 | LMEngine.find_LM_updates | validation | def find_LM_updates(self, grad, do_correct_damping=True, subblock=None):
"""
Calculates LM updates, with or without the acceleration correction.
Parameters
----------
grad : numpy.ndarray
The gradient of the model cost.
do_correct_damping : Bool, optional
If `self.use_accel`, then set to True to correct damping
if the acceleration correction is too big. Default is True
Does nothing is `self.use_accel` is False
subblock : slice, numpy.ndarray, or None, optional
Set to a slice or a valide numpy.ndarray to use only a
certain subset of the parameters. Default is None, i.e.
use all the parameters.
Returns
-------
delta : numpy.ndarray
The Levenberg-Marquadt step, relative to the old
parameters. Size is always self.param_vals.size.
"""
if subblock is not None:
if (subblock.sum() == 0) or (subblock.size == 0):
CLOG.fatal('Empty subblock in find_LM_updates')
raise ValueError('Empty sub-block')
j = self.J[subblock]
JTJ = np.dot(j, j.T)
damped_JTJ = self._calc_damped_jtj(JTJ, subblock=subblock)
grad = grad[subblock] #select the subblock of | python | {
"resource": ""
} |
q262751 | LMEngine.update_param_vals | validation | def update_param_vals(self, new_vals, incremental=False):
"""
Updates the current set of parameter values and previous values,
sets a flag to re-calculate J.
Parameters
----------
new_vals : numpy.ndarray
The new values to update to
incremental : Bool, optional
Set to True to make it an incremental update relative
to the old parameters. Default is False
| python | {
"resource": ""
} |
q262752 | LMEngine.get_termination_stats | validation | def get_termination_stats(self, get_cos=True):
"""
Returns a dict of termination statistics
Parameters
----------
get_cos : Bool, optional
Whether or not to calcualte the cosine of the residuals
with the tangent plane of the model using the current J.
The calculation may take some time. Default is True
Returns
-------
dict
Has keys
delta_vals : The last change in parameter values.
delta_err : The last change in the error.
exp_err : The expected (last) change in the error.
frac_err : The fractional change in the error.
num_iter : The number of iterations completed.
error : The current error.
| python | {
"resource": ""
} |
q262753 | LMEngine.check_completion | validation | def check_completion(self):
"""
Returns a Bool of whether the algorithm has found a satisfactory minimum
"""
terminate = False
term_dict = self.get_termination_stats(get_cos=self.costol is not None)
terminate |= np.all(np.abs(term_dict['delta_vals']) < self.paramtol)
terminate |= (term_dict['delta_err'] < self.errtol) | python | {
"resource": ""
} |
q262754 | LMEngine.check_terminate | validation | def check_terminate(self):
"""
Returns a Bool of whether to terminate.
Checks whether a satisfactory minimum has been found or whether
too many iterations have occurred.
""" | python | {
"resource": ""
} |
q262755 | LMEngine.check_update_J | validation | def check_update_J(self):
"""
Checks if the full J should be updated.
Right now, just updates after update_J_frequency loops | python | {
"resource": ""
} |
q262756 | LMEngine.update_J | validation | def update_J(self):
"""Updates J, JTJ, and internal counters."""
self.calc_J()
# np.dot(j, j.T) is slightly faster but 2x as much mem
step = np.ceil(1e-2 * self.J.shape[1]).astype('int') # 1% more mem...
self.JTJ = low_mem_sq(self.J, step=step)
| python | {
"resource": ""
} |
q262757 | LMEngine.update_Broyden_J | validation | def update_Broyden_J(self):
"""Execute a Broyden update of J"""
CLOG.debug('Broyden update.')
delta_vals = self.param_vals - self._last_vals
delta_residuals = self.calc_residuals() - self._last_residuals
nrm = np.sqrt(np.dot(delta_vals, delta_vals))
| python | {
"resource": ""
} |
q262758 | LMEngine.update_eig_J | validation | def update_eig_J(self):
"""Execute an eigen update of J"""
CLOG.debug('Eigen update.')
vls, vcs = np.linalg.eigh(self.JTJ)
res0 = self.calc_residuals()
for a in range(min([self.num_eig_dirs, vls.size])):
#1. Finding stiff directions
stif_dir = vcs[-(a+1)] #already normalized
#2. Evaluating derivative along that direction, we'll use dl=5e-4:
dl = self.eig_dl #1e-5
_ = self.update_function(self.param_vals + dl*stif_dir)
res1 = self.calc_residuals()
| python | {
"resource": ""
} |
q262759 | LMEngine.calc_accel_correction | validation | def calc_accel_correction(self, damped_JTJ, delta0):
"""
Geodesic acceleration correction to the LM step.
Parameters
----------
damped_JTJ : numpy.ndarray
The damped JTJ used to calculate the initial step.
delta0 : numpy.ndarray
The initial LM step.
Returns
-------
corr : numpy.ndarray
The correction to the original LM step.
"""
#Get the derivative:
_ = self.update_function(self.param_vals)
rm0 = self.calc_residuals().copy()
| python | {
"resource": ""
} |
q262760 | LMFunction.calc_J | validation | def calc_J(self):
"""Updates self.J, returns nothing"""
del self.J
self.J = np.zeros([self.param_vals.size, self.data.size])
dp = np.zeros_like(self.param_vals)
f0 = self.model.copy()
for a in range(self.param_vals.size):
dp *= 0
| python | {
"resource": ""
} |
q262761 | LMFunction.update_function | validation | def update_function(self, param_vals):
"""Takes an array param_vals, updates function, returns the new error"""
| python | {
"resource": ""
} |
q262762 | LMOptObj.update_function | validation | def update_function(self, param_vals):
"""Updates the opt_obj, returns new error."""
| python | {
"resource": ""
} |
q262763 | OptState.calc_J | validation | def calc_J(self):
"""Calculates J along the direction."""
r0 = self.state.residuals.copy().ravel()
dl = np.zeros(self.param_vals.size)
p0 = self.param_vals.copy()
J | python | {
"resource": ""
} |
q262764 | LMParticleGroupCollection.reset | validation | def reset(self, new_region_size=None, do_calc_size=True, new_damping=None,
new_max_mem=None):
"""
Resets the particle groups and optionally the region size and damping.
Parameters
----------
new_region_size : : Int or 3-element list-like of ints, optional
The region size for sub-blocking particles. Default is 40
do_calc_size : Bool, optional
If True, calculates the region size internally based on
the maximum allowed memory. Default is True
new_damping : Float or None, optional
The new damping of the optimizer. Set to None to leave
as the default for LMParticles. Default is None.
new_max_mem : Numeric, optional
The maximum allowed memory for J to occupy. Default is 1e9
"""
if new_region_size is not None:
self.region_size = new_region_size
if new_max_mem != None:
self.max_mem = new_max_mem
if do_calc_size:
self.region_size = calc_particle_group_region_size(self.state,
region_size=self.region_size, max_mem=self.max_mem)
self.stats = []
| python | {
"resource": ""
} |
q262765 | LMParticleGroupCollection._do_run | validation | def _do_run(self, mode='1'):
"""workhorse for the self.do_run_xx methods."""
for a in range(len(self.particle_groups)):
group = self.particle_groups[a]
lp = LMParticles(self.state, group, **self._kwargs)
if mode == 'internal':
lp.J, lp.JTJ, lp._dif_tile = self._load_j_diftile(a)
if mode == '1':
lp.do_run_1()
if mode == '2':
lp.do_run_2()
| python | {
"resource": ""
} |
q262766 | LMParticleGroupCollection.do_internal_run | validation | def do_internal_run(self):
"""Calls LMParticles.do_internal_run for each group of particles."""
if not self.save_J:
raise RuntimeError('self.save_J=True required for do_internal_run()')
if not np.all(self._has_saved_J):
| python | {
"resource": ""
} |
q262767 | AugmentedState.reset | validation | def reset(self):
"""
Resets the initial radii used for updating the particles. Call
if any of the particle radii or positions have been changed
external to the augmented state.
"""
inds = list(range(self.state.obj_get_positions().shape[0]))
self._rad_nms = self.state.param_particle_rad(inds)
| python | {
"resource": ""
} |
q262768 | LMAugmentedState.reset | validation | def reset(self, **kwargs):
"""Resets the aug_state and the LMEngine""" | python | {
"resource": ""
} |
q262769 | Link.get_shares | validation | def get_shares(self):
'''
Returns an object with a the numbers of shares a link has had using
Buffer.
www will be stripped, but other subdomains will not.
| python | {
"resource": ""
} |
q262770 | sample | validation | def sample(field, inds=None, slicer=None, flat=True):
"""
Take a sample from a field given flat indices or a shaped slice
Parameters
-----------
inds : list of indices
One dimensional (raveled) indices to return from the field
slicer : slice object
A shaped (3D) slicer that returns a section of image
flat : boolean
| python | {
"resource": ""
} |
q262771 | State.update | validation | def update(self, params, values):
"""
Update a single parameter or group of parameters ``params``
with ``values``.
Parameters
----------
params : string or list of strings
Parameter names which to update
| python | {
"resource": ""
} |
q262772 | State.build_funcs | validation | def build_funcs(self):
"""
Here, we build gradient and hessian functions based on the properties
of a state that are generally wanted. For each one, we fill in _grad or
_hess with a function that takes care of various options such as
slicing and flattening. For example, `m` below takes the model, selects
different indices from it, maybe flattens it and copies it. This is
then used in the fisherinformation, gradmodel, and hessmodel functions.
"""
# create essentially lambda functions, but with a nice signature
def m(inds=None, slicer=None, flat=True):
return sample(self.model, inds=inds, slicer=slicer, flat=flat).copy()
def r(inds=None, slicer=None, flat=True):
return sample(self.residuals, inds=inds, slicer=slicer, flat=flat).copy()
def l():
return self.loglikelihood
def r_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return r(**kwargs), np.copy(self.error)
def m_e(**kwargs):
"""sliced etc residuals, with state.error appended on"""
return m(**kwargs), np.copy(self.error)
# set the member functions using partial
self.fisherinformation = partial(self._jtj, funct=m)
self.gradloglikelihood = partial(self._grad, funct=l)
self.hessloglikelihood = partial(self._hess, funct=l)
self.gradmodel = partial(self._grad, funct=m)
self.hessmodel = partial(self._hess, funct=m)
self.JTJ = partial(self._jtj, funct=r)
self.J = partial(self._grad, funct=r)
self.J_e = partial(self._grad, funct=r_e, nout=2)
self.gradmodel_e = partial(self._grad, funct=m_e, nout=2)
# add the appropriate documentation to the following functions
| python | {
"resource": ""
} |
q262773 | ImageState.set_model | validation | def set_model(self, mdl):
"""
Setup the image model formation equation and corresponding objects into
their various objects. `mdl` | python | {
"resource": ""
} |
q262774 | ImageState.model_to_data | validation | def model_to_data(self, sigma=0.0):
""" Switch out the data for the model's recreation of the data. """
im = self.model.copy()
| python | {
"resource": ""
} |
q262775 | ImageState.get_update_io_tiles | validation | def get_update_io_tiles(self, params, values):
"""
Get the tiles corresponding to a particular section of image needed to
be updated. Inputs are the parameters and values. Returned is the
padded tile, inner tile, and slicer to go between, but accounting for
wrap with the edge of the image as necessary.
"""
# get the affected area of the model image
otile = self.get_update_tile(params, values)
if otile is None:
return [None]*3
ptile = self.get_padding_size(otile) or util.Tile(0, dim=otile.dim)
otile = util.Tile.intersection(otile, self.oshape)
if (otile.shape <= 0).any():
raise UpdateError("update triggered invalid tile size")
if (ptile.shape < 0).any() or (ptile.shape > self.oshape.shape).any():
| python | {
"resource": ""
} |
q262776 | ImageState.get | validation | def get(self, name):
""" Return component by category name """
for c in self.comps:
| python | {
"resource": ""
} |
q262777 | ImageState._calc_loglikelihood | validation | def _calc_loglikelihood(self, model=None, tile=None):
"""Allows for fast local updates of log-likelihood"""
if model is None:
res = self.residuals
else:
res = model - self._data[tile.slicer]
sig, | python | {
"resource": ""
} |
q262778 | ImageState.update_from_model_change | validation | def update_from_model_change(self, oldmodel, newmodel, tile):
"""
Update various internal variables from a model update from oldmodel to
newmodel for the tile | python | {
"resource": ""
} |
q262779 | ImageState.set_mem_level | validation | def set_mem_level(self, mem_level='hi'):
"""
Sets the memory usage level of the state.
Parameters
----------
mem_level : string
Can be set to one of:
* hi : all mem's are np.float64
* med-hi : image, platonic are float32, rest are float64
* med : all mem's are float32
* med-lo : image, platonic are float16, rest float32
* lo : all are float16, which is bad for accuracy.
Notes
-----
Right now the PSF is not affected by the mem-level changes, which is
OK for mem but it means that self._model, self._residuals are always
float64, which can be a chunk of mem.
"""
#A little thing to parse strings for convenience:
key = ''.join([c if c in 'mlh' else '' for c in mem_level])
| python | {
"resource": ""
} |
q262780 | scramble_positions | validation | def scramble_positions(p, delete_frac=0.1):
"""randomly deletes particles and adds 1-px noise for a realistic
initial featuring | python | {
"resource": ""
} |
q262781 | create_img | validation | def create_img():
"""Creates an image, as a `peri.util.Image`, which is similar
to the image in the tutorial"""
# 1. particles + coverslip
rad = 0.5 * np.random.randn(POS.shape[0]) + 4.5 # 4.5 +- 0.5 px particles
part = objs.PlatonicSpheresCollection(POS, rad, zscale=0.89)
slab = objs.Slab(zpos=4.92, angles=(-4.7e-3, -7.3e-4))
objects = comp.ComponentCollection([part, slab], category='obj')
# 2. psf, ilm
p = exactpsf.FixedSSChebLinePSF(kfki=1.07, zslab=-29.3, alpha=1.17,
n2n1=0.98, sigkf=-0.33, zscale=0.89, laser_wavelength=0.45)
i = ilms.BarnesStreakLegPoly2P1D(npts=(16,10,8,4), zorder=8)
| python | {
"resource": ""
} |
q262782 | ParameterGroup.get_values | validation | def get_values(self, params):
"""
Get the value of a list or single parameter.
Parameters
----------
params : string, list of string
name of parameters which to retrieve
""" | python | {
"resource": ""
} |
q262783 | Component.set_shape | validation | def set_shape(self, shape, inner):
"""
Set the overall shape of the calculation area. The total shape of that
the calculation can possibly occupy, in pixels. The second, inner, is
the region of interest within the image.
"""
| python | {
"resource": ""
} |
q262784 | Component.trigger_update | validation | def trigger_update(self, params, values):
""" Notify parent of a parameter change """
if self._parent:
| python | {
"resource": ""
} |
q262785 | ComponentCollection.get | validation | def get(self):
""" Combine the fields from all components """ | python | {
"resource": ""
} |
q262786 | ComponentCollection.set_shape | validation | def set_shape(self, shape, inner):
""" Set the shape for | python | {
"resource": ""
} |
q262787 | ComponentCollection.sync_params | validation | def sync_params(self):
""" Ensure that shared parameters are the same value everywhere """
def _normalize(comps, param):
vals = [c.get_values(param) for c in comps]
diff = any([vals[i] != vals[i+1] | python | {
"resource": ""
} |
q262788 | ComponentCollection.setup_passthroughs | validation | def setup_passthroughs(self):
"""
Inherit some functions from the components that we own. In particular,
let's grab all functions that begin with `param_` so the super class
knows how to get parameter groups. Also, take anything that is listed
under Component.exports and rename with the category type, i.e.,
SphereCollection.add_particle -> Component.obj_add_particle
"""
self._nopickle = []
for c in self.comps:
# take all member functions that start with 'param_'
funcs = inspect.getmembers(c, predicate=inspect.ismethod)
| python | {
"resource": ""
} |
q262789 | read_environment | validation | def read_environment():
""" Read all environment variables to see if they contain PERI """
out = {}
for k,v in iteritems(os.environ):
| python | {
"resource": ""
} |
q262790 | get_group_name | validation | def get_group_name(id_group):
"""Used for breadcrumb dynamic_list_constructor."""
| python | {
"resource": ""
} |
q262791 | index | validation | def index():
"""List all user memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
groups = Group.query_by_user(current_user, eager=True)
if q:
groups = Group.search(groups, q)
groups = groups.paginate(page, per_page=per_page)
requests = Membership.query_requests(current_user).count()
| python | {
"resource": ""
} |
q262792 | requests | validation | def requests():
"""List all pending memberships, listed only for group admins."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_requests(current_user, eager=True).all()
return render_template(
| python | {
"resource": ""
} |
q262793 | invitations | validation | def invitations():
"""List all user pending memberships."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
memberships = Membership.query_invitations(current_user, eager=True).all()
return render_template(
| python | {
"resource": ""
} |
q262794 | new | validation | def new():
"""Create new group."""
form = GroupForm(request.form)
if form.validate_on_submit():
try:
group = Group.create(admins=[current_user], **form.data)
| python | {
"resource": ""
} |
q262795 | manage | validation | def manage(group_id):
"""Manage your group."""
group = Group.query.get_or_404(group_id)
form = GroupForm(request.form, obj=group)
if form.validate_on_submit():
if group.can_edit(current_user):
try:
group.update(**form.data)
flash(_('Group "%(name)s" was updated', name=group.name),
'success')
except Exception as e:
flash(str(e), 'error')
return render_template(
"invenio_groups/new.html",
form=form,
group=group,
)
else:
| python | {
"resource": ""
} |
q262796 | delete | validation | def delete(group_id):
"""Delete group."""
group = Group.query.get_or_404(group_id)
if group.can_edit(current_user):
try:
group.delete()
except Exception as e:
flash(str(e), "error")
return redirect(url_for(".index"))
flash(_('Successfully removed group "%(group_name)s"',
group_name=group.name), 'success')
| python | {
"resource": ""
} |
q262797 | members | validation | def members(group_id):
"""List user group members."""
page = request.args.get('page', 1, type=int)
per_page = request.args.get('per_page', 5, type=int)
q = request.args.get('q', '')
s = request.args.get('s', '')
group = Group.query.get_or_404(group_id)
if group.can_see_members(current_user):
members = Membership.query_by_group(group_id, with_invitations=True)
if q:
members = Membership.search(members, q)
if s:
members = Membership.order(members, Membership.state, s)
members = members.paginate(page, per_page=per_page)
return render_template(
| python | {
"resource": ""
} |
q262798 | leave | validation | def leave(group_id):
"""Leave group."""
group = Group.query.get_or_404(group_id)
if group.can_leave(current_user):
try:
group.remove_member(current_user)
except Exception as e:
flash(str(e), "error")
return redirect(url_for('.index'))
flash(
_(
'You have successfully left %(group_name)s group.',
group_name=group.name
| python | {
"resource": ""
} |
q262799 | approve | validation | def approve(group_id, user_id):
"""Approve a user."""
membership = Membership.query.get_or_404((user_id, group_id))
group = membership.group
if group.can_edit(current_user):
try:
membership.accept()
except Exception as e:
flash(str(e), 'error')
return redirect(url_for('.requests', group_id=membership.group.id))
flash(_('%(user)s accepted to %(name)s group.',
user=membership.user.email,
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.