index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
59,634 | zhuokaizhao/artifice | refs/heads/master | /artifice/cutils/__init__.py | from .cutils import detect_peaks
__all__ = [detect_peaks]
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,635 | zhuokaizhao/artifice | refs/heads/master | /artifice/utils.py | """Generic utils."""
import os
import json
import shutil
import numpy as np
from artifice.log import logger
def divup(a, b):
return (a + b - 1) // b
def listwrap(val):
"""Wrap `val` as a list.
:param val: iterable or constant
:returns: `list(val)` if `val` is iterable, else [val]
"""
if isinstance(val, list):
return val
if isinstance(val, tuple):
return list(val)
return [val]
def listify(val, length):
"""Ensure `val` is a list of size `length`.
:param val: iterable or constant
:param length: integer length
:returns: listified `val`.
:raises: RuntimeError if 1 < len(val) != length
"""
if not isinstance(val, str) and hasattr(val, '__iter__'):
val = list(val)
if len(val) == 1:
return val * length
if len(val) != length:
raise RuntimeError("mismatched length")
return val
return [val] * length
def jsonable(hist):
"""Make a history dictionary json-serializable.
:param hist: dictionary of lists of float-like numbers.
"""
out = {}
for k, v in hist.items():
out[k] = list(map(float, v))
return out
def json_save(fname, obj):
"""Saves obj to fname as JSON."""
with open(fname, 'w') as f:
f.write(json.dumps(obj))
def json_load(fname):
with open(fname, 'r') as f:
obj = json.loads(f.read())
return obj
def atleast_4d(image):
"""Expand a numpy array (typically an image) to 4d.
Inserts batch dim, then channel dim.
:param image:
:returns:
:rtype:
"""
if image.ndim >= 4:
return image
if image.ndim == 3:
return image[np.newaxis, :, :, :]
if image.ndim == 2:
return image[np.newaxis, :, :, np.newaxis]
if image.ndim == 1:
return image[np.newaxis, :, np.newaxis, np.newaxis]
raise ValueError(f"incompatible image dimension: {image.ndim}")
def rm(path):
if not os.path.exists(path):
return
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
else:
raise RuntimeError(f"bad path: {path}")
logger.info(f"removed {path}.")
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,636 | zhuokaizhao/artifice | refs/heads/master | /test_utils/experiment.py | """experiments is a tool for creating large, labeled training sets for semantic
segmentation and/or object detection, with the ray-tracing tool POV-Ray.
Dependencies:
* numpy
* POV-Ray
* vapory
On masks and annotations:
A "mask" is a tuple of arrays, such as those returned by skimage.draw functions,
which index into the experiment's image space.
An "annotation" is an array with the same height and width as the experiment's
image, containing information about an image and the objects in it. A shallow
annotation contains just class labels at every pixel. A deeper annotation
contains scalar information about an object at every pixel, such as the distance
to its center.
"""
import logging
logger = logging.getLogger('experiment')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s:experiment:%(message)s'))
logger.addHandler(handler)
import numpy as np
import vapory
import os
import matplotlib.pyplot as plt
from skimage import draw
from inspect import signature
import subprocess as sp
import tensorflow as tf
from artifice.utils import img, vid
from artifice import dat
INFINITY = 10e9
def normalize(X):
"""Normalize a vector.
:param X: 1-D numpy vector
:returns: normalized vector
:rtype: same as X
"""
X = np.array(X)
assert(len(X.shape) == 1)
return X / np.linalg.norm(X)
def perpendicular(X):
"""Return a unit vector perpendicular to X in R^3."""
X = np.array(X)
assert(len(X.shape) == 1)
return normalize(np.array([X[1] - X[2],
X[2] - X[0],
X[0] - X[1]]))
def quadratic_formula(a,b,c):
"""Return the two solutions according to the quadratic formula.
:param a:
:param b:
:param c:
:returns: two real solutions, or None
"""
sqrt_term = b**2 - 4*a*c
if sqrt_term < 0:
return None
return (-b + np.sqrt(sqrt_term)) / (2*a), (-b - np.sqrt(sqrt_term)) / (2*a)
class DynamicObject:
"""An ImageObject is a wrapper around a Vapory object. This may be used for any
objects in a scene which change from image to image but are not being tracked.
The vapory object itself is created on every __call__().
* vapory_object: the vapory class this ExperimentObject represents.
* object_args: either a tuple, containing all required arguments for creating
vapory_object, or a function which creates this tuple (allowing for
non-determinism), as well as any others that should change on each
call. This function may optionally take an argument (such as a time step).
* args: any additional args, passed on as vapory_object(*object_args + args)
* kwargs: additions keyword arguments are passed onto the vapory object, the
same on every call.
self.args stores the most recent args as a list. This is instantiated as [],
and is used by Experiment.annotate_and_label() to generate the mask of the object.
"""
def __init__(self, vapory_object, object_args, *args, **kwargs):
"""FIXME! briefly describe function
:param vapory_object:
:param object_args:
:returns:
:rtype:
"""
self.vapory_object = vapory_object
if callable(object_args):
self.get_args = object_args
self.do_time_step = len(signature(object_args).parameters) == 1
elif type(object_args) == tuple:
self.get_args = lambda : object_args
self.do_time_step = False
else:
raise RuntimeError("`object_args` is not a tuple or function")
self.other_args = list(args) + sum([list(t) for t in kwargs.items()], [])
self.args = []
def __call__(self, t=None):
"""Return an instance of the represented vapory object, ready to be inserted
into a scene.
TODO: allow arguments to be passed in, overriding get_args. Optional.
"""
if self.do_time_step:
assert(t is not None)
self.args = list(self.get_args(t)) + self.other_args
else:
self.args = list(self.get_args()) + self.other_args
return self.vapory_object(*self.args)
class ExperimentObject(DynamicObject):
"""An ExperimentObject is a wrapper around a Vapory object, and it represents a
marker for detection. An ExperimentObject should be used, rather than a mere
ImageObject or vapory object, whenever the object is being tracked (needs a
mask). Unlike an ImageObject, every ExperimentObject has a class associated
with it (an integer >0, since 0 is the background class).
args:
vapory_object: the vapory class this ExperimentObject represents.
object_args: either a tuple, containing all required arguments for creating
vapory_object, or a function which creates this tuple (allowing for
non-determinism), as well as any others that should change on each call.
semantic_label: numerical class of the object, used for generating an
annotation. default=1.
args: any additional args, passed on as vapory_object(*object_args + args)
kwargs: additions keyword arguments are passed onto the vapory object, the
same on every call.
"""
def __init__(self, vapory_object, object_args, *args, semantic_label=1, **kwargs):
super().__init__(vapory_object, object_args, *args, **kwargs)
assert(semantic_label > 0)
self.semantic_label = int(semantic_label)
def compute_mask(self, experiment):
"""Compute the mask of the ExperimentObject, given Experiment
`experiment`.
This should be overwritten by subclasses, for each type of vapory
object. Each object returns the indices of the experiment scene that it
contains (as in skimage.draw). It is up to the Experiment to decide, in the
case of occlusions, which object is in front of the other.
"""
raise NotImplementedError("compute_mask is specific to each vapory object.")
def compute_location(self, experiment):
"""Compute the image-space location of the object. This should be an
unambiguous location, such as the center.
"""
raise NotImplementedError()
def compute_label(self, experiment):
"""Compute the label for this object. Usually just location and sometimes
orientation."""
raise NotImplementedError()
class ExperimentSphere(ExperimentObject):
"""An ExperimentSphere, representing a vapory.Sphere.
args:
vapory_object: the vapory class this ExperimentObject represents.
object_args: either a tuple, containing all required arguments for creating
vapory_object, or a function which creates this tuple (allowing for
non-determinism), as well as any others that should change on each call.
semantic_label: numerical class of the object, used for generating an
annotation. default=1.
args: any additional args, passed on as vapory_object(*object_args + args)
kwargs: additions keyword arguments are passed onto the vapory object, the
same on every call.
"""
def __init__(self, *args, **kwargs):
super().__init__(vapory.Sphere, *args, **kwargs)
self.center = self.radius = None
def __call__(self, t=None):
"""Record the center and radius of the sphere."""
vapory_object = super().__call__(t)
self.center = np.array(self.args[0])
self.radius = self.args[1]
return vapory_object
def distance_to_surface(self, Xi, experiment):
"""Given a point Xi = [x,y] in image-space, compute the distance from
experiment.camera_location to the near-surface of the sphere.
If Xi is not on the surface, return "infinity" (actually 1bil).
"""
assert(len(self.args) != 0)
const = experiment.camera_location - self.center
v = experiment.unproject(Xi)
a = np.linalg.norm(v)**2
b = 2*np.dot(const, v)
c = np.linalg.norm(const)**2 - self.radius**2
ts = quadratic_formula(a,b,c)
if ts == None:
return INFINITY
t1, t2 = ts
d1 = np.linalg.norm(t1*v)
d2 = np.linalg.norm(t2*v)
return min(d1, d2)
def compute_mask(self, experiment):
"""Compute the mask for an ExperimentSphere, placed in experiment. Returns rr,
cc, which are list of indices to access the image (as from skimage.draw),
and dd: the distance from camera to object along each pixel in (rr,cc).
"""
assert(len(self.args) != 0)
center = experiment.project(self.center)
center_to_edge = self.radius * perpendicular(
experiment.camera_to(self.center))
radius_vector = (experiment.project(self.center + center_to_edge)
- experiment.project(self.center))
radius = np.linalg.norm(radius_vector)
rr, cc = draw.circle(center[0], center[1], radius,
shape=experiment.image_shape[:2])
dd = np.empty(rr.shape[0], dtype=np.float64)
for i in range(dd.shape[0]):
dd[i] = self.distance_to_surface([rr[i], cc[i]], experiment)
return rr, cc, dd
def compute_location(self, experiment):
assert(len(self.args) != 0)
return experiment.project(self.center)
def compute_label(self, experiment):
"""Computes the label for the object
:experiment: the Experiment containing this object
:returns: `[obj_id, x pos, y pos, theta, x_scale, y_scale]`
"""
label = np.empty((experiment.label_dimension,), dtype=np.float32)
label[0] = float(self.semantic_label)
label[1:3] = self.compute_location(experiment)
label[3] = 0
label[4] = 1
label[5] = 1
return label
class Experiment:
"""An Experiment contains information for generating a dataset, which is done
using self.run(). It has variations that affect the output labels.
:param image_shape: (rows, cols) shape of the output images, determines the
aspect ratio of the camera, default=(512,512). Number of channels determined
by `mode`
:param mode: image mode to generate, default='L' (8-bit grayscale)
:param num_classes: number of classes to be detected, INCLUDING the background class.
:param N: number of images to generate, default=1000
:param output_format: filetype to write, default='tfrecord'. Can be a list of
filetypes, in which case the same data will be written to each.
:param fname: name of output file, without extension. Ignored if included.
:param camera_multiplier: controls how far out the camera is positioned, as a
multiple of image_shape[1] (vertical pixels), default=4 (far away)
:param noisify: add Poisson noise to each frame depending on the frame rate.
Image `mode` is according to PIL.Image. Valid inputs are:
* L (8-bit pixels, black and white)
* RGB (3x8-bit pixels, true colour)
Other modes to be supported later, including:
The camera will be placed in each experiment such that the <x,y,0> plane is
the image plane, with one unit of distance corresponding to ~1 pixel on that
plane.
self.objects is a list of ExperimentObjects that are subject to change,
whereas self.static_objects is a list of vapory Objects ready to be inserted
in the scene, as is.
"""
supported_modes = {'L', 'RGB'}
pix_fmts = {'L' : 'gray', 'RGB' : 'rgb8'}
supported_formats = {'tfrecord', 'mp4', 'png'}
included = ["colors.inc", "textures.inc"]
label_dimension = 6
def __init__(self, image_shape=(512,512), mode='L', num_classes=2, N=1000,
output_format='tfrecord', data_root="data/tmp", camera_multiplier=4,
fps=1, noisify=True):
self.N = int(N)
self.noisify = noisify
self.image_shape = tuple(image_shape)
assert(len(self.image_shape) == 2)
assert(mode in self.supported_modes)
self.mode = mode
# output formats
if type(output_format) in [list, set]:
self.output_formats = set(output_format)
else:
assert(type(output_format) == str)
self.output_formats = {output_format}
assert(all([f in self.supported_formats for f in self.output_formats]))
# set fname, without extension
self.data_root = data_root
if not os.path.exists(self.data_root):
os.makedirs(self.data_root)
assert(camera_multiplier > 0)
self.camera_multiplier = camera_multiplier
assert(num_classes > 0)
self.num_classes = num_classes # TODO: unused
self.fps = int(fps)
self._set_camera()
# The objects in the scene should be added to by the subclass.
self.experiment_objects = [] # ExperimentObject instances
self.dynamic_objects = [] # DynamicObject instances
self.static_objects = [] # vapory object instances
def add_object(self, obj):
"""Adds obj to the appropriate list, according to the type of the object.
If obj is not an ExperimentObject or a vapory object, behavior is
undefined.
"""
if issubclass(type(obj), ExperimentObject):
self.experiment_objects.append(obj)
elif type(obj) == DynamicObject:
self.dynamic_objects = []
else:
self.static_objects.append(obj)
def _set_camera(self):
"""Sets the camera dimensions of the Experiment so that the output image has
`image_shape`. Also sets the camera projection matrix. Should only be called
by __init__().
"""
camera_distance = self.image_shape[0]*self.camera_multiplier
location = [0, 0, -camera_distance]
direction = [0, 0, 1] # POV-Ray direction vector
aspect_ratio = self.image_shape[0] / self.image_shape[1] # aspect ratio
right = [aspect_ratio, 0, 0] # POV-Ray vector
half_angle_radians = np.arctan(1 / (2*self.camera_multiplier))
# (Szeliski 53)
focal_length = self.image_shape[1] / (2*np.tan(half_angle_radians))
# Set the camera projection matrix.
K = np.array(
[[focal_length, 0, self.image_shape[0]/2],
[0, aspect_ratio*focal_length, self.image_shape[1]/2],
[0, 0, 1]])
T = np.array(
[[0],
[0],
[camera_distance]])
R = np.array(
[[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
P = K @ np.concatenate((R, T), axis=1)
self._camera_WtoI = np.concatenate((P, [[0, 0, 0, 1]]), axis=0)
self._camera_ItoW = np.linalg.inv(self._camera_WtoI)
self.camera_location = np.array(location)
self.camera = vapory.Camera('location', location,
'direction', direction,
'right', right,
'angle', 2*np.degrees(half_angle_radians))
def camera_to(self, X):
"""Get the world-space vector from the camera to X"""
assert(len(X) == 3)
return np.array(X) - self.camera_location
def project(self, X):
"""Project the world-space POINT X = [x,y,z] to image-space.
Return the [i,j] point in image-space (as a numpy array).
"""
assert(len(X) == 3)
Xi = self._camera_WtoI @ np.concatenate((np.array(X), [1]))
return np.array([Xi[0]/Xi[2], Xi[1]/Xi[2]])
def unproject_point(self, Xi, disparity=1):
"""From index space point Xi = [x,y], unproject back into world-space. Note
that since an unambiguous 3D point cannot be recovered, this should be used
only to recover a ray associated with a given pixel in image-space.
The "disparity" argument controls this ambiguity. Different disparities will
yield different points along the same ray.
"""
assert(len(Xi) == 2)
Xi = np.array(Xi)
X = self._camera_ItoW @ np.array([Xi[0], Xi[1], 1, disparity])
return (X / X[3])[:3]
def unproject(self, Xi):
"""From index space point Xi = [x,y], unproject back into world-space.
Due to 3D-2D ambiguity, an image-space corresponds to a ray in
world-space. Returns a unit-vector along this ray. Together with camera
location, this can recover any point along the ray.
"""
Xi = np.array(Xi)
a = self.unproject_point(Xi)
b = self.unproject_point(Xi, disparity = 2)
V = normalize(a - b)
if V[2] == 0:
return V
else:
return V * V[2] / abs(V[2]) # ensure V points toward +z
def unproject_to_image_plane(self, Xi):
"""Unproject back to the world-space point which lies on the image plane.
:param Xi: [i,j] index-space point
:returns: 3-vector world-space position
:rtype: numpy array
"""
Xi = np.array(Xi)
u_hat = self.unproject(Xi)
v = self.camera_location
mag_v = np.linalg.norm(v)
cos_th = np.dot(u_hat,v) / mag_v
u = (mag_v / cos_th) * u_hat
return v + u
def annotate_and_label(self):
"""Computes the annotation and label for the scene.
Based on the most recent vapory objects created.
The first channel of the annotation always marks class labels.
Further channels are more flexible, but in general the second channel should
enocde distance. Here, we use an object's compute_location method to get
the distance at every point inside an object's annotation.
The label has a row for every object in the image (which can get flattened
for a network). The first element of each row contains the semantic label of
the object, if it is in the example, or 0 otherwise. TODO: implement this.
The determination of whether an object is "in the example" is subjective. We
consider "in the example" to mean that the object's "center" location is not
occluded.
TODO: currently, only modifies masks due to occlusion by other objects in
experiment_objects. This is usually sufficient, but in some cases, occlusion
may occur from static or untracked objects.
"""
label = np.zeros((len(self.experiment_objects), self.label_dimension),
dtype=np.float32)
annotation = np.zeros((self.image_shape[0], self.image_shape[1], 1),
dtype=np.int64)
object_distance = INFINITY * np.ones(annotation.shape[:2], dtype=np.float64)
for i, obj in enumerate(self.experiment_objects):
label[i] = obj.compute_label(self)
rr, cc, dd = obj.compute_mask(self)
for r, c, d in zip(rr, cc, dd):
# TODO: overwrite objects in the background, if they're not visible.
if d < object_distance[r, c]:
object_distance[r, c] = d
annotation[r, c, 0] = obj.semantic_label
return annotation, label
def render_scene(self, t=None):
"""Renders a single scene, applying the various perturbations on each
object/light source in the Experiment.
:returns: `(example, annotation)` pair.
TODO:
Call the make_targets() function, implemented by subclasses, that uses
the object locations, orientations, etc. set by render_scene, to calculate
the targets.
"""
dynamic_objects = [obj(t) for obj in self.dynamic_objects]
experiment_objects = [obj(t) for obj in self.experiment_objects]
all_objects = self.static_objects + dynamic_objects + experiment_objects
vap_scene = vapory.Scene(self.camera, all_objects, included=self.included)
# image, annotation ndarrays of np.uint8s.
image = vap_scene.render(height=self.image_shape[0], width=self.image_shape[1])
if self.mode == 'L':
image = img.grayscale(image)
# Add noise
if self.noisify:
peak = 5000 # TODO: make fps dependent.
image = np.random.poisson(image.astype(np.float64) / 255. * peak)
image = (image / peak * 255.).astype(np.uint8)
# compute annotation, label using most recently used args, produced by the
# render call
annotation, label = self.annotate_and_label()
return (image, label), annotation
def run(self, verbose=None):
"""Generate the dataset in each format.
"""
if verbose is not None:
logger.warning("verbose is depricated")
if len(self.output_formats) == 0:
# TODO: raise error?
return
# Instantiate writers and fnames for each format
if 'png' in self.output_formats:
image_dir = os.path.join(self.data_root, 'images/')
annotation_dir = os.path.join(self.data_root, 'annotations/')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(annotation_dir):
os.makedirs(annotation_dir)
label_path = os.path.join(self.data_root, 'labels.npy')
labels = None
logger.info("writing images to {}".format(image_dir))
if 'tfrecord' in self.output_formats:
tfrecord_name = os.path.join(self.data_root, 'data.tfrecord')
tfrecord_writer = tf.python_io.TFRecordWriter(tfrecord_name)
logger.info("writing tfrecord to {}".format(tfrecord_name))
if 'mp4' in self.output_formats:
mp4_image_name = os.path.join(self.data_root, 'data.mp4')
mp4_image_writer = vid.MP4Writer(
mp4_image_name, self.image_shape[:2], fps=self.fps)
logger.info("writing video to {}".format(mp4_image_name))
# step through all the frames, rendering each scene with time-dependence if
# necessary.
for t in range(self.N):
logger.info("Rendering scene {} of {}...".format(t, self.N))
example, annotation = self.render_scene(t)
image, label = example
logger.debug(f"label: {label}")
if 'png' in self.output_formats:
fname = f"{str(t).zfill(5)}"
img.save(os.path.join(image_dir, fname + '.png'), np.squeeze(image))
np.save(os.path.join(annotation_dir, fname + '.npy'), annotation)
if labels is None:
labels = np.empty((self.N,) + label.shape)
labels[t] = label
if 'tfrecord' in self.output_formats:
e = dat.proto_from_scene(scene)
tfrecord_writer.write(e)
if 'mp4' in self.output_formats:
mp4_image_writer.write(image)
if 'png' in self.output_formats:
logger.info("Finished writing images.")
np.save(os.path.join(self.data_root, "labels.npy"), labels)
if 'tfrecord' in self.output_formats:
tfrecord_writer.close()
logger.info("Finished writing tfrecord.")
if 'mp4' in self.output_formats:
mp4_image_writer.close()
logger.info("Finished writing video.")
def main():
"""For testing purposes"""
pass
if __name__ == '__main__': main()
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,637 | zhuokaizhao/artifice | refs/heads/master | /artifice/conversions.py | """Module for converting data to standard form expected by Artifice.
To create custom conversions, create a new function in the style of
`png_dir_and_npy_file`, the name of which specifies that the images have a
'.png' extension and the labels be in a single 'labels.npy' file in the
data_root. A conversion function should return nothing, merely saving the
resulting dataset to a location expected by Artifice, usually
data_root/labeled_set.tfrecord or data_root/unlabeled_set.tfrecord. It should
accept the directory where data is/will be stored, as well as the number of
examples to withold for a potentially labeled test set, if applicable (as a
keyword arg).
"""
from os.path import join, splitext
from glob import glob
import logging
from itertools import islice
import numpy as np
from artifice import img, dat
logger = logging.getLogger('artifice')
def _get_paths(dirpath, ext):
paths = sorted(glob(join(dirpath, f'*.{ext}')))
if not paths:
raise FileNotFoundError(f"no '.{ext}' files in {dirpath}")
return paths
def _load_single_labels(labels_path):
"""Load labels from a single file."""
ext = splitext(labels_path)[1]
raise NotImplementedError
def _image_dir_and_label_file(data_root, record_name='labeled_set.tfrecord',
image_dirname='images', image_ext='png',
labels_filename='labels.npy', test_size=0):
"""Helper function to performs the conversion when labels are in one file.
Assumes fully labeled data.
:param data_root: root directory
:param image_dirname: name of directory containing images by path
:param image_ext: extension of images, e.g. 'png', 'jpeg'
:param labels_ext: extension of data_root/labels.{ext} file.
"""
image_paths = _get_paths(join(data_root, image_dirname), image_ext)
labels = _load_single_labels(labels_path)
raise NotImplementedError
_label_loaders = {'npy': np.load,
'txt': np.loadtxt}
def _image_dir_and_label_dir(data_root, record_name='labeled_set.tfrecord',
image_dirname='images', image_ext='png',
label_dirname='labels', label_ext='npy',
test_size=0, test_name='test_set.tfrecord'):
"""Performs the conversion when labels in corresponding files.
:param data_root:
:param record_name:
:param image_dirname:
:param image_ext:
:param label_dirname:
:param label_ext:
:param test_size: number of examples to place in a separate tfrecord test_set
:param test_name: name of test set
:returns:
:rtype:
"""
image_paths = _get_paths(join(data_root, image_dirname), image_ext)
label_paths = _get_paths(join(data_root, label_dirname), label_ext)
if len(image_paths) > len(label_paths):
logger.warning(f"labeled_set: using the first {len(label_paths)} images "
f"(for which labels exist)")
del image_paths[len(label_paths):]
elif len(image_paths) < len(label_paths):
logger.warning(f"labeled_set: using the first {len(image_paths)} labels "
f"(for which images exist)")
del label_paths[len(image_paths):]
assert len(image_paths) >= test_size
def gen():
for image_path, label_path in zip(image_paths, label_paths):
image = img.open_as_float(image_path)
label = _label_loaders[label_ext](label_path)
# swap x,y for some reason
ys = label[:, 0].copy()
label[:, 0] = label[:, 1]
label[:, 1] = ys
yield dat.proto_from_example((image, label))
dat.write_set(islice(gen(), test_size), join(data_root, test_name))
dat.write_set(islice(gen(), test_size, None), join(data_root, record_name))
def _image_dir(data_root, record_name='unlabeled_set.tfrecord',
image_dirname='images', image_ext='png', test_size=0):
"""Used to write an unlabeled set of just images.
test_size can be used to denote a number of examples to skip, in the sorted
list of path names. The actual test set is not created, however, as this
requires labels.
:param data_root:
:param record_name:
:param image_dirname:
:param image_ext:
:param test_size:
:returns:
:rtype:
"""
image_paths = _get_paths(join(data_root, image_dirname), image_ext)
assert len(image_paths) >= test_size
def gen():
for image_path in image_paths:
image = img.open_as_float(image_path)
yield dat.proto_from_image(image)
dat.write_set(islice(gen(), test_size, None), join(data_root, record_name))
def png_dir_and_txt_dir(data_root, test_size=0):
"""Convert from a directory of image files and dir of label files.
Expect DATA_ROOT/images/ containing loadable images all of the same
form and DATA_ROOT/labels/ with corresponding labels in text files, which can
be loaded with np.loadtxt.
"""
_image_dir_and_label_dir(data_root, image_ext='png', label_ext='txt',
test_size=test_size)
def png_dir_and_txt_file(data_root, test_size=0):
"""Convert from a directory of image files, along with a `labels.txt` file.
Expect DATA_ROOT/images/ directory containing loadable images all of the same
form and DATA_ROOT/labels.txt with corresponding labels as can be loaded by
np.loadtxt().
"""
raise NotImplementedError
def png_dir_and_npy_dir(data_root, test_size=0):
"""Convert from a directory of image files, along with a `labels.npy` file.
Expect DATA_ROOT/images/ directory containing loadable images all of the same
form and DATA_ROOT/labels/ with corresponding labels .npy files.
"""
_image_dir_and_label_dir(data_root, image_ext='png', label_ext='npy',
test_size=test_size)
def png_dir_and_npy_file(data_root, test_size=0):
"""Convert from a directory of image files, along with a `labels.npy` file.
Expect DATA_ROOT/images/ directory containing loadable images all of the same
form and DATA_ROOT/labels.npy with corresponding labels in one numpy array.
"""
raise NotImplementedError
def png_dir(data_root, test_size=0):
_image_dir(data_root, image_ext='png', test_size=test_size)
# list of all the conversion functions here.
conversions = {0: png_dir_and_txt_dir,
1: png_dir_and_txt_file,
2: png_dir_and_npy_dir,
3: png_dir_and_npy_file,
4: png_dir}
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,638 | zhuokaizhao/artifice | refs/heads/master | /test_utils/springs.py | """Analayze positions of two spheres to try to gauge the spring constant
connecting the two."""
import numpy as np
from artifice import vis
def find_constant(labels, tethered=[]):
"""
:param labels: `(num_examples, num_objects, label_dim)` array of labeles or
detections
:param tethered: list of object IDs which are tethered to the center of the
image.
:returns: predicted spring constant
:rtype:
"""
positions = labels[:,:,1:3]
ls = np.linalg.norm(positions[:,1] - positions[:,0], axis=-1)
return ls
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,639 | zhuokaizhao/artifice | refs/heads/master | /artifice/conv_utils.py | """Borrowed largely from Tensorflow source.
"""
import tensorflow as tf
from artifice.log import logger # noqa
def _dim_val(dim):
if tf.executing_eagerly():
out = dim
else:
out = dim.value
return out
def divup(a, b):
return (a + b - 1) // b
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_output_shape(input_shape, filters, kernel_size, padding, strides):
"""Compute the output shape of the given convolutional layer.
:param input_shape:
:param filters:
:param kernel_size:
:param padding:
:param strides:
:returns:
:rtype:
"""
output_h = conv_output_length(
input_shape[1],
kernel_size[0],
padding,
strides[0])
output_w = conv_output_length(
input_shape[2],
kernel_size[1],
padding,
strides[1])
return [_dim_val(input_shape[0]),
_dim_val(output_h),
_dim_val(output_w),
filters]
def deconv_output_length(input_length, filter_size, padding,
output_padding=None, stride=0, dilation=1):
"""Determines output length of a transposed convolution given input length.
Original source can be found [here]
(https://github.com/tensorflow/tensorflow/blob/5912f51d580551e5cee2cfde4cb882594b4d3e60/tensorflow/python/keras/utils/conv_utils.py#L140). # noqa
:param input_length: Integer.
:param filter_size: Integer.
:param padding: one of `"same"`, `"valid"`, `"full"`.
:param output_padding: Integer, amount of padding along the output dimension.
Can be set to `None` in which case the output length is inferred.
:param stride: Integer.
:param dilation: Integer.
:returns: The output length (integer).
"""
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad
+ output_padding)
return length
def deconv_output_shape(input_shape, filters, kernel_size, padding, strides):
"""Compute the output shape of the given transpose convolutional layer.
:param input_shape:
:param filters:
:param kernel_size:
:param padding:
:param strides:
:returns:
:rtype:
"""
output_h = deconv_output_length(
input_shape[1],
kernel_size[0],
padding,
stride=strides[0])
output_w = deconv_output_length(
input_shape[2],
kernel_size[1],
padding,
stride=strides[1])
return [_dim_val(input_shape[0]),
_dim_val(output_h),
_dim_val(output_w),
filters]
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,640 | zhuokaizhao/artifice | refs/heads/master | /test_utils/annotate.py | """Messing around with annotating the gyros data. These annotators are highly
ad-hoc and not what would eventually be used, in all likelihood. They're more
the sort of thing we want to avoid."""
import numpy as np
from artifice.utils import img, vis
from glob import glob
from skimage.feature import canny
from skimage.draw import circle
import matplotlib.pyplot as plt
class Annotator():
sigma = 1.0
def annotate_object(self, image, obj_label):
"""Annotate the object at index-space position.
:param image: numpy image, float32
:param obj_label: 1D object label with index-space position at obj_label[1:3]
:returns: `(xs,ys)` lists of indices belonging to the object at position
"""
raise NotImplementedError
def annotate_image(self, image, label):
"""Annotate all the objects in the image.
:param image: the numpy image
:param label: `(num_objects, label_dim)` where `label_dim >= 3`
:returns:
:rtype:
"""
annotation = np.zeros_like(image)
edges = canny(image, sigma=self.sigma)
for obj_label in label:
xs, ys = self.annotate_object(image, obj_label, edges=edges)
annotation[xs,ys] = obj_label[0]
return annotation
def __call__(self, *args, **kwargs):
return self.annotate_image(*args, **kwargs)
class GyroAnnotator(Annotator):
initial_radius = 10
def annotate_object(self, image, obj_label, edges=None):
"""Annotate the gyro at `obj_label`.
Run a canny edge detector on the image, if needed
"""
if edges is None:
edges = canny(image, sigma=self.sigma)
"""
Grab the edge pixels within 10 pixels, calculate distance to center, take
their median as the true radius and return pixels within that range.
"""
rr, cc = circle(obj_label[1], obj_label[2], self.initial_radius,
shape=image.shape)
mask = np.zeros_like(image, dtype=bool)
mask[rr,cc] = True
xs, ys = np.where(np.logical_and(edges, mask))
distances = np.linalg.norm(
np.stack((xs - obj_label[1], ys - obj_label[2]), axis=1), axis=1)
r = np.median(distances) + 1.5
return circle(obj_label[1], obj_label[2], r, shape=image.shape)
def main():
labels = np.load('data/gyros/labels.npy')
image_paths = sorted(glob('data/gyros/images/*.png'))
annotator = GyroAnnotator()
for i, (image_path, label) in enumerate(zip(image_paths, labels)):
if i % 100 == 0:
print(f"{i} / {labels.shape[0]}")
image = img.open_as_float(image_path)
annotation = annotator(image, labels[0])
np.save(f'data/gyros/annotations/{str(i).zfill(4)}.npy', annotation)
# fig, axes = vis.plot_image(image, scale=80)
# xs, ys = np.where(annotation)
# plt.plot(ys, xs, 'r.')
# plt.show()
# plt.savefig('docs/gyro_annotation.png')
if __name__ == "__main__":
main()
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,641 | zhuokaizhao/artifice | refs/heads/master | /artifice/sparse/sparse_lib.py | """Sparse ops implementation using tf primitives.
"""
import numpy as np
from collections import namedtuple
import tensorflow as tf
from artifice.log import logger # noqa: unused
from artifice import utils
def _compute_bcount(size, bstride):
return [utils.divup(size[0], bstride[0]),
utils.divup(size[1], bstride[1])]
def _compute_input_padding(size, bcount, bsize, boffset, bstride):
"""Computes the padding for the operation.
:param size: `[SZH, SZW]` list-like of ints, size of image
:param bcount: `[BCH, BCW]` list of ints
:param bsize:
:param boffset:
:param bstride:
:returns: `pad_h, pad_w` for _pad_inputs function, possibly negative.
:rtype:
"""
pad_h = [boffset[0],
boffset[0] + bstride[0] * bcount[0] + bsize[0] - size[0]]
pad_w = [boffset[1],
boffset[1] + bstride[1] * bcount[1] + bsize[1] - size[1]]
return pad_h, pad_w
def _pad_inputs(mask, bcount, bsize, boffset, bstride):
"""Pad the inputs for then
:param mask: 4D tensor containing the inputs.
:param bcount:
:param bsize:
:param boffset:
:param bstride:
:returns: Padded (or cropped) mask
:rtype: tf.Tensor
"""
pad_h, pad_w = _compute_input_padding(
mask.shape[1:3], bcount, bsize, boffset, bstride)
if pad_h[0] < 0:
mask = mask[:, -pad_h[0]:, :, :]
pad_h[0] = 0
if pad_h[1] < 0:
mask = mask[:, :-pad_h[1], :, :]
pad_h[1] = 0
if pad_w[0] < 0:
mask = mask[:, :, -pad_w[0]:, :]
pad_w[0] = 0
if pad_w[1] < 0:
mask = mask[:, :, :-pad_w[1], :]
pad_w[1] = 0
pad_n = pad_c = [0, 0]
return tf.pad(mask, [pad_n, pad_h, pad_w, pad_c])
def _compute_upsample_offsets(bsize):
"""Compute the offsets for blocks with bsize.
Assumes that the given coordinate is at the top left of the block.
So for example, if the block size were [3, 4], the returned offsets would be:
```
[[[0], [1], [2], [3]],
[[1], [2], [3], [4]],
[[2], [3], [4], [5]]]
```
which has shape [1, 3, 4, 1]
:param bsize: `[BSZH, BSZW]` size of the blocks.
:returns: [1, bsize[0], bsize[1], 1] array of offsets to upsample a set of
block_indices.
:rtype: tf.Tensor
"""
offsets = np.array([[[0, i, j] for j in range(bsize[1])]
for i in range(bsize[0])], np.int32)
# todo: fix this so it isn't crazy
offsets = tf.constant(offsets, tf.int32)
offsets = tf.expand_dims(offsets, 0)
return offsets
def _upsample_block_indices(active_block_indices, bsize, boffset, bstride):
"""Upsamples the indices to have all indices in a rectangle.
:param active_block_indices: [M,3] Tensor. Corresponds to top left coordinate
after offset and scaling.
:param bsize: block size
:param boffset:
:param bstride:
:returns: [M, bsize[0], bsize[1], 3] locations of all pixels in the blocks.
:rtype:
"""
ops = []
logger.debug(f"bsize: {bsize}")
logger.debug(f"bstride: {bstride}")
# ops.append(tf.print(active_block_indices, summarize=-1))
offset = tf.constant([0, boffset[0], boffset[1]], dtype=tf.int32)
scale = tf.constant([1, bstride[0], bstride[1]], dtype=tf.int32)
indices = tf.cast(active_block_indices, tf.int32) + offset
indices *= scale # [M, 3]
indices = tf.expand_dims(indices, 1)
indices = tf.expand_dims(indices, 2) # [M, 1, 1, 3]
upsample_offsets = _compute_upsample_offsets(
bsize) # [1, bsize[0], bsize[1], 3]
logger.debug(f"indices: {indices.shape}")
logger.debug(f"upsample_offsets: {upsample_offsets.shape}")
# ops.append(tf.print(indices, summarize=-1))
# ops.append(tf.print(upsample_offsets, summarize=-1))
with tf.control_dependencies(ops):
indices += upsample_offsets # [M, bsize[0], bsize[1], 3]
return indices
"""
TensorFlow primitive implementations.
"""
def reduce_mask(mask,
bcount, *,
bsize,
boffset,
bstride,
tol=0.5,
avgpool=False):
"""Reduce the mask to namedtuple `(bin_counts, active_block_indices)`, indices.
:param mask:
:param block_count:
:param bsize:
:param boffset:
:param bstride:
:param tol:
:param avgpool:
:returns:
:rtype:
"""
logger.debug(f"mask: {mask.shape}")
mask = _pad_inputs(mask, bcount, bsize, boffset, bstride)
logger.debug(f"padded mask: {mask.shape}")
mask = tf.nn.pool(
mask,
window_shape=bsize,
pooling_type='AVG' if avgpool else 'MAX',
padding='SAME',
strides=bstride)
mask = tf.squeeze(mask, axis=3)
active_block_indices = tf.where(mask > tf.constant(tol, mask.dtype))
active_block_indices = tf.cast(active_block_indices, tf.int32)
bin_counts = tf.shape(active_block_indices)[0]
Indices = namedtuple('Indices', ['active_block_indices', 'bin_counts'])
return Indices(active_block_indices, bin_counts)
def gather(
inputs,
bin_counts,
active_block_indices, *,
bsize,
boffset,
bstride):
"""FIXME! briefly describe function
:param inputs:
:param bin_counts: number of blocks?
:param active_block_indices:
:param bsize:
:param boffset:
:param bstride:
:returns:
:rtype:
"""
logger.debug(f"inputs: {inputs.shape}")
size = inputs.shape[1:3]
bcount = _compute_bcount(size, bstride)
inputs = _pad_inputs(inputs, bcount, bsize, boffset, bstride)
logger.debug(f"padded inputs: {inputs.shape}")
indices = _upsample_block_indices(
active_block_indices,
bsize,
boffset,
bstride)
ops = []
# ops.append(tf.print(indices, summarize=-1))
logger.debug(f"gather indices: {indices.shape}")
with tf.control_dependencies(ops):
blocks = tf.gather_nd(inputs, indices) # todo: fix index error
blocks = tf.reshape(
blocks, [bin_counts, bsize[0], bsize[1], tf.shape(inputs)[3]])
return blocks
def scatter(
blocks,
bin_counts, # pylint: disable=unused-argument
active_block_indices,
outputs, *,
bsize,
boffset,
bstride,
add=False):
"""Scatter the blocks back onto outputs.
Note that currently this only uses `outputs.shape` to scatter onto a tensor
of zeros.
In tf >= 1.14, the functions tf.tensor_scatter_nd_update and
tf.tensor_scatter_nd_add would overcome this barrier.
:param blocks: [M, bsize[0], bsize[1], C]
:param bin_counts:
:param active_block_indices:
:param outputs: [N, H, W, C]
:param bsize:
:param boffset:
:param bstride:
:returns:
:rtype:
"""
size = outputs.shape[1:3]
bcount = _compute_bcount(size, bstride)
outputs = _pad_inputs(outputs, bcount, bsize, boffset, bstride)
indices = _upsample_block_indices(
active_block_indices,
bsize,
boffset,
bstride) # [M, bsize[0], bsize[1], 3]
if add:
raise NotImplementedError
else:
outputs = tf.case(
[(tf.equal(tf.shape(blocks)[0], tf.constant(0, tf.int32)),
(lambda: outputs))],
default=lambda: tf.scatter_nd(indices, blocks, tf.shape(outputs)))
return outputs[:, :size[0], :size[1], :]
def scatter_var(
blocks,
bin_counts, # pylint: disable=unused-argument
active_block_indices,
outputs, *,
bsize,
boffset,
bstride,
add=False):
raise NotImplementedError("no gradient for sparse_lib.scatter_var")
indices = _upsample_block_indices(
active_block_indices,
bsize,
boffset,
bstride) # [M, bsize[0], bsize[1], 3]
if add:
outputs = tf.scatter_nd_add(outputs, indices, blocks)
else:
outputs = tf.scatter_nd_update(outputs, indices, blocks)
return outputs
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,642 | zhuokaizhao/artifice | refs/heads/master | /artifice/windows/__init__.py | from .annotator_window import *
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,643 | zhuokaizhao/artifice | refs/heads/master | /artifice/dat.py | """
Functions for reading and writing datasets in tfrecords, as needed by
artifice and test_utils.
Data class for feeding data into models, possibly with augmentation.
We expect labels to be of the form:
|-------|-------|--------------------------------------|
| x pos | y pos | object pose parameters |
|-------|-------|--------------------------------------|
"""
import os
from glob import glob
import numpy as np
from skimage.feature import peak_local_max
import tensorflow as tf
from artifice.log import logger
from artifice import utils
from artifice import img
from artifice import vis
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _serialize_feature(feature):
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
"""
Parsing and serialization functions.
"""
def proto_from_image(image):
image = img.as_float(image)
feature = {'image': _bytes_feature(image.tostring()),
'image_dim0': _int64_feature(image.shape[0]),
'image_dim1': _int64_feature(image.shape[1]),
'image_dim2': _int64_feature(image.shape[2])}
return _serialize_feature(feature)
def image_from_proto(proto):
feature_description = {
'image': tf.FixedLenFeature([], tf.string),
'image_dim0': tf.FixedLenFeature([], tf.int64),
'image_dim1': tf.FixedLenFeature([], tf.int64),
'image_dim2': tf.FixedLenFeature([], tf.int64)}
features = tf.parse_single_example(proto, feature_description)
image = tf.decode_raw(features['image'], tf.float32)
return (tf.reshape(image, [features['image_dim0'],
features['image_dim1'],
features['image_dim2']]),)
def proto_from_example(example):
image, label = example
image = img.as_float(image)
label = label.astype(np.float32)
feature = {'image': _bytes_feature(image.tostring()),
'image_dim0': _int64_feature(image.shape[0]),
'image_dim1': _int64_feature(image.shape[1]),
'image_dim2': _int64_feature(image.shape[2]),
'label': _bytes_feature(label.tostring()),
'label_dim0': _int64_feature(label.shape[0]),
'label_dim1': _int64_feature(label.shape[1])}
return _serialize_feature(feature)
def example_from_proto(proto):
feature_description = {
'image': tf.FixedLenFeature([], tf.string),
'image_dim0': tf.FixedLenFeature([], tf.int64),
'image_dim1': tf.FixedLenFeature([], tf.int64),
'image_dim2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.string),
'label_dim0': tf.FixedLenFeature([], tf.int64),
'label_dim1': tf.FixedLenFeature([], tf.int64)}
features = tf.parse_single_example(proto, feature_description)
image = tf.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, (features['image_dim0'],
features['image_dim1'],
features['image_dim2']))
label = tf.decode_raw(features['label'], tf.float32)
label = tf.reshape(label, [features['label_dim0'], features['label_dim1']])
return image, label
def proto_from_annotated_example(example):
image, label, annotation = example
image = img.as_float(image)
label = label.astype(np.float32)
annotation = img.as_float(annotation)
feature = {'image': _bytes_feature(image.tostring()),
'image_dim0': _int64_feature(image.shape[0]),
'image_dim1': _int64_feature(image.shape[1]),
'image_dim2': _int64_feature(image.shape[2]),
'label': _bytes_feature(label.tostring()),
'label_dim0': _int64_feature(label.shape[0]),
'label_dim1': _int64_feature(label.shape[1]),
'annotation': _bytes_feature(annotation.tostring()),
'annotation_dim0': _int64_feature(annotation.shape[0]),
'annotation_dim1': _int64_feature(annotation.shape[1]),
'annotation_dim2': _int64_feature(annotation.shape[2])}
return _serialize_feature(feature)
def annotated_example_from_proto(proto):
feature_description = {
'image': tf.FixedLenFeature([], tf.string),
'image_dim0': tf.FixedLenFeature([], tf.int64),
'image_dim1': tf.FixedLenFeature([], tf.int64),
'image_dim2': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.string),
'label_dim0': tf.FixedLenFeature([], tf.int64),
'label_dim1': tf.FixedLenFeature([], tf.int64),
'annotation': tf.FixedLenFeature([], tf.string),
'annotation_dim0': tf.FixedLenFeature([], tf.int64),
'annotation_dim1': tf.FixedLenFeature([], tf.int64),
'annotation_dim2': tf.FixedLenFeature([], tf.int64)}
features = tf.parse_single_example(proto, feature_description)
image = tf.decode_raw(features['image'], tf.float32)
image = tf.reshape(image, (features['image_dim0'],
features['image_dim1'],
features['image_dim2']))
label = tf.decode_raw(features['label'], tf.float32)
label = tf.reshape(label, [features['label_dim0'], features['label_dim1']])
annotation = tf.decode_raw(features['annotation'], tf.float32)
annotation = tf.reshape(annotation, (features['annotation_dim0'],
features['annotation_dim1'],
features['annotation_dim2']))
return image, label, annotation
"""
loading and saving tf datasets
"""
def load_dataset(record_name, parse, num_parallel_calls=None):
"""Load a tfrecord dataset.
:param record_name: File name(s) to load.
:param parse_function: function to parse each entry.
:param num_parallel_calls: passed to map.
:returns:
:rtype:
"""
dataset = tf.data.TFRecordDataset(record_name)
return dataset.map(parse, num_parallel_calls=num_parallel_calls)
def save_dataset(record_name, dataset, serialize=None,
num_parallel_calls=None):
"""Write a tf.data.Dataset to a file.
:param record_name:
:param dataset:
:param serialize: function to serialize examples. If None, assumes
dataset already serialized.
:param num_parallel_calls: only used if serialize() is not None.
:returns:
:rtype:
"""
if serialize is not None:
dataset = dataset.map(serialize, num_parallel_calls)
writer = tf.data.experimental.TFRecordWriter(record_name)
write_op = writer.write(dataset)
if not tf.executing_eagerly():
with tf.Session() as sess:
sess.run(write_op)
def write_set(protos, record_path):
logger.info(f"writing {record_path}...")
with tf.python_io.TFRecordWriter(record_path) as writer:
for i, proto in enumerate(protos):
if i % 100 == 0:
logger.info(f"writing example {i}")
writer.write(proto)
"""
ArtificeData classes.
"""
class ArtificeData(object):
"""Abstract class for data wrappers in artifice, which are distinguished by the
type of examples they hold (unlabeled images, (image, label) pairs
(examples), etc.).
Subclasses should implement the process() and serialize() functions to
complete. Serialize is used for saving the dataset.
"""
# dataset modes:
TRAINING = "TRAINING" # `(image, proxy)` tensor tuple
PREDICTION = "PREDICTION" # single `image` tensor
EVALUATION = "EVALUATION" # `(image, label)` tensor tuple
ENUMERATED_PREDICTION = "ENUMERATED_PREDICTION"
def __init__(self, record_path, *, size, image_shape, input_tile_shape,
output_tile_shapes, batch_size, num_parallel_calls=None,
num_shuffle=10000, cache_dir='cache', **kwargs):
"""Initialize the data, loading it if necessary..
kwargs is there only to allow extraneous keyword arguments. It is not used.
:param record_paths: path or paths containing tfrecord files. If a
directory, then grabs all .tfrecord files in that directory *at runtime*.
:param size: size of an epoch. If not a multiple of batch_size, the
remainder examples are dropped.
:param image_shape:
:param input_tile_shape:
:param output_tile_shapes: list of output shapes, bottom to top
:param batch_size:
:param num_parallel_calls:
:param num_shuffle:
:param cache_dir:
:returns:
:rtype:
"""
# inherent
self.record_paths = utils.listwrap(record_path)
self.size = size - size % batch_size # size of an epoch
self.image_shape = image_shape
assert len(self.image_shape) == 3
self.input_tile_shape = input_tile_shape
self.output_tile_shapes = output_tile_shapes
self.batch_size = batch_size
self.num_parallel_calls = num_parallel_calls
self.num_shuffle = num_shuffle
self.cache_dir = os.path.abspath(cache_dir)
# derived
self.output_tile_shape = output_tile_shapes[-1]
self.num_tiles = self.compute_num_tiles(self.image_shape,
self.output_tile_shape)
self.prefetch_buffer_size = self.batch_size
self.block_length = self.num_tiles
@property
def record_names(self):
record_names = []
for path in self.record_paths:
if os.path.isdir(path):
record_names += glob(os.path.join(path, "*.tfrecord"))
else:
record_names.append(path)
return record_names
@staticmethod
def serialize(entry):
raise NotImplementedError("subclass should implement")
def process(self, dataset, mode):
"""Process the dataset of serialized examples into tensors ready for input.
todo: update this documentation for modes.
The full data processing pipeline is:
* deserialize example
* augment (if applicable)
* convert to proxy
* tile
* shuffle (if mode is TRAINING)
* repeat
* batch
`process()` does steps 2,3, and 4. MUST return
:param dataset:
:param training: if this is for training
:returns:
:rtype:
"""
raise NotImplementedError("subclasses should implement")
def postprocess(self, dataset, mode, cache=False):
if "ENUMERATED" in mode:
dataset = dataset.apply(tf.data.experimental.enumerate_dataset())
if cache:
logger.info("caching this epoch...")
dataset = dataset.repeat(-1).take(self.size).cache(self.cache_dir)
dataset = dataset.batch(self.batch_size, drop_remainder=True)
if mode == ArtificeData.TRAINING:
dataset = dataset.shuffle(self.num_shuffle)
dataset = dataset.repeat(-1)
if mode != ArtificeData.TRAINING:
dataset = dataset.take(self.steps_per_epoch)
dataset = dataset.prefetch(self.prefetch_buffer_size)
return dataset
def get_input(self, mode, cache=False):
dataset = tf.data.TFRecordDataset(self.record_names)
dataset = self.process(dataset, mode)
return self.postprocess(dataset, mode, cache=cache)
def training_input(self, cache=False):
return self.get_input(ArtificeData.TRAINING, cache=cache)
def prediction_input(self):
return self.get_input(ArtificeData.PREDICTION)
def evaluation_input(self):
return self.get_input(ArtificeData.EVALUATION)
def enumerated_prediction_input(self):
return self.get_input(ArtificeData.ENUMERATED_PREDICTION)
@property
def dataset(self):
return load_dataset(self.record_names, self.parse, self.num_parallel_calls)
@property
def steps_per_epoch(self):
return int(self.size // self.batch_size)
@staticmethod
def compute_num_tiles(image_shape, output_tile_shape):
return int(np.ceil(image_shape[0] / output_tile_shape[0])
* np.ceil(image_shape[1] / output_tile_shape[1]))
def __len__(self):
return self.size
def save(self, record_name):
"""Save the dataset to record_name."""
save_dataset(record_name, self.dataset, serialize=self.serialize,
num_parallel_calls=self.num_parallel_calls)
def get_entry(self, i):
"""Get the i'th entry of the original dataset, in numpy form."""
if tf.executing_eagerly():
entry = next(iter(self.dataset.skip(i).take(1)))
else:
raise NotImplementedError
if issubclass(type(entry), tf.Tensor):
return entry.numpy()
return tuple(e.numpy() for e in entry)
"""
Generic functions for proxies/tiling
"""
def image_padding(self):
diff0 = self.input_tile_shape[0] - self.output_tile_shape[0]
diff1 = self.input_tile_shape[1] - self.output_tile_shape[1]
rem0 = self.image_shape[0] - \
(self.image_shape[0] % self.output_tile_shape[0])
rem1 = self.image_shape[1] - \
(self.image_shape[1] % self.output_tile_shape[1])
pad_top = int(np.floor(diff0 / 2))
pad_bottom = int(np.ceil(diff0 / 2)) + rem0
pad_left = int(np.floor(diff1 / 2))
pad_right = int(np.ceil(diff1 / 2)) + rem1
return [[pad_top, pad_bottom], [pad_left, pad_right], [0, 0]]
def proxy_padding(self):
rem0 = self.image_shape[0] - \
(self.image_shape[0] % self.output_tile_shape[0])
rem1 = self.image_shape[1] - \
(self.image_shape[1] % self.output_tile_shape[1])
return [[0, rem0], [0, rem1], [0, 0]]
# todo: determing whether to use tf.image.extract_image_patches instead
def tile_image(self, image):
image = tf.pad(image, self.image_padding(), 'CONSTANT')
tiles = []
for i in range(0, self.image_shape[0], self.output_tile_shape[0]):
for j in range(0, self.image_shape[1], self.output_tile_shape[1]):
tiles.append(image[i:i + self.input_tile_shape[0],
j:j + self.input_tile_shape[1]])
return tf.data.Dataset.from_tensor_slices(tiles)
def tile_image_label(self, image, label):
image = tf.pad(image, self.image_padding(), 'CONSTANT')
tiles = []
labels = []
for i in range(0, self.image_shape[0], self.output_tile_shape[0]):
for j in range(0, self.image_shape[1], self.output_tile_shape[1]):
tiles.append(image[i:i + self.input_tile_shape[0],
j:j + self.input_tile_shape[1]])
tile_space_positions = label[:, :2] - tf.constant([[i, j]], tf.float32)
labels.append(tf.concat((tile_space_positions, label[:, 2:]), axis=1))
return tf.data.Dataset.from_tensor_slices((tiles, labels))
@property
def make_proxies_map_func(self):
"""Map over a (tile, label) dataset to convert it to
(tile, [pose, proxy1,...]) form.
todo: remember, label could be empty.
"""
def map_func(tile, label):
proxy_set = []
positions = tf.cast(label[:, :2], tf.float32) # [num_objects, 2]
for level, tile_shape in enumerate(self.output_tile_shapes):
scale_factor = 2**(len(self.output_tile_shapes) - level - 1)
dx = (scale_factor * tile_shape[0] - self.output_tile_shape[0]) / 2
dy = (scale_factor * tile_shape[1] - self.output_tile_shape[1]) / 2
translation = tf.constant([[dx, dy]], dtype=tf.float32)
level_positions = (positions + translation) / scale_factor
points = tf.constant(np.array( # [H*W,2]
[np.array([i + 0.5, j + 0.5]) for i in range(tile_shape[0])
for j in range(tile_shape[1])], dtype=np.float32), tf.float32)
points = tf.expand_dims(points, axis=1) # [H*W,1,2]
level_positions = tf.expand_dims(level_positions, axis=0)
object_distances = tf.norm(
points - level_positions, axis=-1) # [H*W,num_objects]
# make distance proxy function: 1 / (d^2 + 1)
distances = tf.reduce_min(object_distances, axis=-1)
flat = tf.reciprocal(tf.square(distances) + tf.constant(1, tf.float32))
proxy_set.append(tf.reshape(flat, [tile_shape[0], tile_shape[1], 1]))
# make pose map, assumes object_distances at tope of U
pose = label[:, 2:]
regions = tf.expand_dims(tf.argmin(object_distances, axis=-1), axis=-1)
pose_field = tf.reshape(tf.gather_nd(pose, regions), # [H,W,pose_dim]
[tile_shape[0], tile_shape[1], -1])
pose = tf.concat((proxy_set[-1], pose_field), axis=-1)
return tile, (pose,) + tuple(proxy_set)
return map_func
"""
Output analysis functions.
"""
def untile(self, tiles):
"""Untile num_tiles tiles into a single "image".
`tiles` must contain the correct number of tiles.
:param tiles: `num_tiles` length list of 3D arrays or tiles.
:returns: reconstructed image.
:rtype:
"""
if len(tiles) != self.num_tiles:
raise RuntimeError("Ensure tiles is same length as num_tiles.")
if self.num_tiles == 1:
return tiles[0][:self.image_shape[0], :self.image_shape[1]]
if tiles[0].ndim == 3:
shape = (self.image_shape[0], self.image_shape[1], tiles[0].shape[2])
elif tiles[0].ndim == 2:
shape = (self.image_shape[0], self.image_shape[1])
else:
raise ValueError
image = np.empty(shape, dtype=np.float32)
tile_iter = iter(tiles)
for i in range(0, self.image_shape[0], self.output_tile_shape[0]):
if i + self.output_tile_shape[0] <= self.image_shape[0]:
si = self.output_tile_shape[0]
else:
si = self.image_shape[0] % self.output_tile_shape[0]
for j in range(0, self.image_shape[1], self.output_tile_shape[1]):
if j + self.output_tile_shape[1] <= self.image_shape[1]:
sj = self.output_tile_shape[1]
else:
sj = self.image_shape[1] % self.output_tile_shape[1]
image[i:i + si, j:j + sj] = next(tile_iter)[:si, :sj]
return image
def untile_points(self, points):
"""Untile points from tile-space to image-space.
:param points: list of 2d arrays with shape [?,>= 2], containing points in
the tile space for that entry of the list. Must be length num_tiles. Leaves
extra elements of dim 1 alone.
:returns:
:rtype:
"""
if len(points) != self.num_tiles:
raise RuntimeError("Ensure points is same length as num_tiles.")
if self.num_tiles == 1:
return points[0]
points_iter = iter(points)
image_points = []
for i in range(0, self.image_shape[0], self.output_tile_shape[0]):
for j in range(0, self.image_shape[1], self.output_tile_shape[1]):
points = next(points_iter)
image_points += list(
points + np.array([[i, j] + [0] * (points.shape[1] - 2)],
dtype=np.float32))
return np.array(image_points)
def analyze_outputs(self, outputs, multiscale=False):
"""Analyze the model outputs, return predictions like original labels.
:param outputs: a list of lists, containing outputs from at least num_tiles
examples, so that tiles can be reassembled into full images. Only uses the
first num_tiles elements. This contains:
[[pose 0, level_ouput_0 0, level_output_1 0, ...],
[pose 1, level_ouput_0 1, level_output_1 1, ...],
...]
:returns: prediction in the same shape as original labels
:rtype: np.ndarray
"""
if multiscale:
peaks = self.untile_points([multiscale_detect_peaks(output[1:])
for output in outputs[:self.num_tiles]])
else:
peaks = None
# check peaks, possible conflicts at edges:
# todo: limit checks to edges AND regions
dist_image = self.untile([output[-1][:, :, 0] for output in
outputs[:self.num_tiles]])
peaks = detect_peaks(dist_image, pois=peaks)
pose_image = self.untile([output[0]
for output in outputs[:self.num_tiles]])
prediction = np.empty((peaks.shape[0], 1 + pose_image.shape[-1]),
dtype=np.float32)
for i, peak in enumerate(peaks):
prediction[i, :2] = peak
prediction[i, 2:] = pose_image[int(peak[0]), int(peak[1]), 1:]
return prediction
def accumulate(self, accumulator):
"""Runs the accumulators across the dataset.
An accumulator function should take a `entry` and an `aggregate` object. On
the first call, `aggregate` will be None. Afterward, each accumulator will
be passed the output from its previous call as `aggregate`, as well as the
next entry in the data as 'entry'. On the final call, `entry` will be None,
allowing for post-processing.
If the accumulator returns None for aggregate, the accumulation is
terminated early.
:param accumulator: an accumulator function OR a dictionary mapping names
to accumulator functions
:returns: aggregate from `accumulator` OR a dictionary of aggregates with
the same keys as `accumulators`.
:rtype: dict
"""
if type(accumulator) == dict:
accumulators = accumulator
else:
accumulators = {0: accumulator}
aggregates = dict.fromkeys(accumulators.keys())
finished = dict([(k, False) for k in accumulators.keys()])
if tf.executing_eagerly():
for entry in self.dataset:
if all(finished.values()):
break
for k, acc in accumulators.items():
if finished[k]:
continue
agg = acc(tuple(t.numpy() for t in entry), aggregates[k])
if agg is None:
finished[k] = True
else:
aggregates[k] = agg
else:
raise NotImplementedError
logger.info("finished accumulation")
for k, acc in accumulators.items():
aggregates[k] = acc(None, aggregates[k])
if type(accumulator) == dict:
return aggregates
else:
return aggregates[0]
"""
Subclasses of ArtificeData.
"""
class UnlabeledData(ArtificeData):
@staticmethod
def serialize(entry):
return proto_from_image(entry)
@staticmethod
def parse(proto):
return image_from_proto(proto)
def process(self, dataset, mode):
def map_func(proto):
image = self.parse(proto)[0]
if mode in [ArtificeData.PREDICTION, ArtificeData.ENUMERATED_PREDICTION]:
return self.tile_image(image)
raise ValueError(f"{mode} mode invalid for UnlabeledData")
return dataset.interleave(map_func, cycle_length=self.num_parallel_calls,
block_length=self.block_length,
num_parallel_calls=self.num_parallel_calls)
class LabeledData(ArtificeData):
@staticmethod
def serialize(entry):
return proto_from_example(entry)
@staticmethod
def parse(proto):
return example_from_proto(proto)
@staticmethod
def label_accumulator(entry, labels):
if labels is None:
labels = []
if entry is None:
return np.array(labels)
labels.append(entry[1])
return labels
def get_labels(self):
return self.accumulate(self.label_accumulator)
def process(self, dataset, mode):
def map_func(proto):
image, label = self.parse(proto)[:2]
if mode in [ArtificeData.PREDICTION, ArtificeData.ENUMERATED_PREDICTION]:
return self.tile_image(image)
if mode == ArtificeData.EVALUATION:
return self.tile_image_label(image, label)
if mode == ArtificeData.TRAINING:
tiled_set = self.tile_image_label(image, label)
return tiled_set.map(self.make_proxies_map_func)
raise ValueError(f"{mode} mode invalid for LabeledData")
return dataset.interleave(map_func, cycle_length=self.num_parallel_calls,
block_length=self.block_length,
num_parallel_calls=self.num_parallel_calls)
class AnnotatedData(LabeledData):
"""Class for annotated data, which can be augmented. Annotated data consists of
an `(image, label, annotation)` tuple, which we call an "annotated example".
AnnotatedData inherits from LabeledData, since get_labels is a perfectly
legal operation on `(image, label, annotation)` tuples as written.
This data can technically be used to regular prediction, evaluation, and
training modes, but note that in this case, unless the number of examples is
known precisely, then some examples may be repeated.
Annotations are essentially instance segmentations with object ids given by
that object's index in the label. Background pixels should be filled with
-1. (This off-by-one from convention, where the background is given ID 0 and
objects are > 0).
"""
def __init__(self, *args, transformation=None, identity_prob=0.01, **kwargs):
"""FIXME! briefly describe function
:param transformation: a single or list of transformations that are applied
during augmentation. If multiple, then each augmented example has a
randomly selected transformation applied to it.
:param identity_prob: probability that no augmentations are applied to an
example.
:returns:
:rtype:
"""
self.transformation = transformation
self.identity_prob = identity_prob
super().__init__(*args, **kwargs)
@staticmethod
def serialize(entry):
return proto_from_annotated_example(entry)
@staticmethod
def parse(proto):
return annotated_example_from_proto(proto)
def process(self, dataset, mode):
if self.transformation is not None:
background = self.get_background()
def map_func(proto):
image, label, annotation = self.parse(proto)[:3]
if self.transformation is not None:
image, label = self.augment(image, label, annotation, background)
if mode == ArtificeData.PREDICTION:
return self.tile_image(image)
if mode == ArtificeData.EVALUATION:
return self.tile_image_label(image, label)
if mode == ArtificeData.TRAINING:
tiled_set = self.tile_image_label(image, label)
return tiled_set.map(self.make_proxies_map_func)
raise ValueError(f"{mode} mode invalid for AnnotatedData")
return dataset.interleave(map_func, cycle_length=self.num_parallel_calls,
block_length=self.block_length,
num_parallel_calls=self.num_parallel_calls)
def augment(self, image, label, annotation, background):
"""Augment an example using self.transformation.
A transformation takes in an image, a label, and an annotation and returns
an `(image, label)` pair. If more than one transformation is listed, then a
random one is selected on each call. If the pose dimensions of the label
are affected by the transformation, then it should know how to deal with
those as well.
Of course, some transformations may require additional information. This
could be encoded in the annotation, which could be a nested tensor if
handled correctly.
Artifice includes several builtin transformations, all of which are in the
`tform` module. For now, only one of these may be selected, but the
function in question could randomly apply different transformations within
its body.
:param image:
:param label:
:param annotation:
:returns: new `(image, label)` example
"""
if self.transformation is None:
return image, label
def fn():
return tf.py_function(self.transformation,
inp=[image, label, annotation, background],
Tout=[tf.float32, tf.float32])
return tf.case(
{tf.greater(tf.random.uniform([], 0, 1, tf.float32),
tf.constant(self.identity_prob, tf.float32)): fn},
default=lambda: [image, label], exclusive=True)
@staticmethod
def mean_background_accumulator(entry, agg):
"""Take a running average of the pixels where no objects exist.
Fills pixels with no values at the end of the accumulation with gaussian
noise.
"""
if agg is None:
assert entry is not None
image = entry[0]
background = -np.ones_like(image, dtype=np.float32)
ns = np.zeros_like(background, dtype=np.int64)
else:
background, ns = agg
if entry is None:
return img.fill_negatives(background)
image = entry[0]
annotation = entry[2]
# Update the elements with a running average
bg_indices = np.atleast_3d(np.less(annotation[:, :, 0], 0))
indices = np.logical_and(background >= 0, bg_indices)
ns[indices] += 1
background[indices] = (background[indices]
+ (image[indices] - background[indices])
/ ns[indices])
# initialize the new background elements
indices = np.logical_and(background < 0, bg_indices)
background[indices] = image[indices]
ns[indices] += 1
return background, ns
@staticmethod
def greedy_background_accumulator(entry, background):
"""Grabs te first non-object value for each pixel in the dataset.
Terminates accumulation when finished by returning None.
"""
if background is None:
assert entry is not None
image = entry[0]
background = -np.ones_like(image, dtype=np.float32)
if entry is None:
return img.fill_negatives(background)
image = entry[0]
annotation = entry[2]
unfilled = background < 0
if unfilled.sum() == 0:
return None
bg_indices = annotation[:, :, 0:1] < 0
indices = np.logical_and(unfilled, bg_indices)
background[indices] = image[indices]
return background
def get_background(self):
return self.accumulate(self.greedy_background_accumulator)
"""
Independant data analysis functions.
"""
def make_regions(points, shape, radius=3):
"""Make a boolean footprint around each point, for `labels` kw.
For efficiency's sake, each region is a simple rectangle with width 2*radius.
:param points: array of x,y points too make fooprint around.
:param radius: radius of each footprint.
:returns:
:rtype:
"""
regions = np.zeros(shape, dtype=np.bool)
for point in points:
top = max(int(np.floor(point[0] - radius)), 0)
bottom = min(int(np.ceil(point[0] + radius + 1)), regions.shape[0])
left = max(int(np.floor(point[1] - radius)), 0)
right = min(int(np.ceil(point[1] + radius + 1)), regions.shape[1])
regions[top: bottom, left: right] = True
return regions
def detect_peaks(image, threshold_abs=0.1, min_distance=1, pois=None):
"""Analyze the predicted distance proxy for detections.
TODO: make more sophisticated, and also fix footprinting behavior
:param image: image, or usually predicted distance proxy
:param threshold_abs:
:param min_distance:
:param pois: points of interest to search around
:returns: detected peaks
:rtype:
"""
assert image.ndim == 2
if pois is not None:
if pois.shape[0] == 0:
return np.empty((0, 2), np.float32)
regions = make_regions(pois, image.shape)
peaks = peak_local_max(image, threshold_abs=threshold_abs, indices=True,
labels=regions, exclude_border=False)
else:
peaks = peak_local_max(image, threshold_abs=threshold_abs,
min_distance=min_distance, indices=True,
exclude_border=False)
return peaks
def multiscale_detect_peaks(images):
"""Use the images at lower scales to track peaks more efficiently."""
peaks = detect_peaks(images[0][:, :, 0])
for i in range(1, len(images)):
translation = (2 * np.array(images[i - 1].shape[:2])
- np.array(images[i].shape[:2])) / 2
peaks = 2 * peaks - translation # transform peaks to proper coordinates
peaks = detect_peaks(images[i][:, :, 0], pois=peaks)
return peaks
def evaluate_prediction(label, prediction, distance_threshold=10):
"""Evaluage the prediction against the label and return an array of absolute
The error array has the same ordering as objects in the label. Negative
values indicate that object was not detected.
:param label:
:param prediction_:
:param distance_threshold:
:returns:
:rtype:
"""
error = -np.ones((label.shape[0], label.shape[1] - 1))
if prediction.shape[0] == 0:
return error, label.shape[0]
prediction = prediction.copy()
num_failed = 0
for i in range(label.shape[0]):
distances = np.linalg.norm(
prediction[:, :2] - label[i:i + 1, :2], axis=1)
pidx = np.argmin(distances)
if distances[pidx] >= distance_threshold:
num_failed += 1
continue
error[i, 0] = np.linalg.norm(prediction[pidx, :2] - label[i, :2])
error[i, 1:] = np.abs(prediction[pidx, 2:] - label[i, 2:])
prediction[pidx, :2] = np.inf
return error, num_failed
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,644 | zhuokaizhao/artifice | refs/heads/master | /scripts/waltzing_spheres.py | """Create a dataset of two spheres that walk regularly along the image plane,
separated, with some step size between each one. Useful for a test-set.
Each "time step" is just a shift over. "steps_per_frame" is useful for skipping
by a few pixels at a time.
"""
import vapory
import numpy as np
import matplotlib.pyplot as plt
from test_utils import experiment
import logging
logger = logging.getLogger('experiment')
# Main parameters
debug = False
# dataset parameters
root = "data/harper_waltzing_spheres/" # root dir for fname
fps = 30 # frame rate of the video
frame_step = 1/float(fps) # time per frame (DERIVED)
separation = 4 # separation between cell-centered samples
output_formats = {'png', 'mp4'} # output formats
image_shape = (196, 196) # image shape
num_classes = 3 # including background
num_rows = image_shape[0] // separation
num_cols = image_shape[1] // separation
N = num_rows * num_cols
# Configure initial parameters. 1 povray unit = 1 cm
# ball 1 in povray unites
r1 = 5 # radius (cm)
# ball 2
r2 = 15
big_sphere_offset = N // 2
#################### CONFIGURABLE OPTIONS ABOVE ####################
"""
Given the global index, which is stepping over the spots separated by
steps_per_frame pixels, calculate the index at which it would be in the image
and convert that to world-space.
"""
def compute_position(n, offset=0):
"""Return the x,y world-space position at step (n + offset) % N.
:param n: the global step
:param offset: offset for the sphere's starting position.
:returns: world-space position of sphere.
:rtype:
"""
global exp
idx = (n + offset) % N
i = separation*(idx // num_cols) + separation / 2. + 0.5
j = separation*(idx % num_cols) + separation / 2. + 0.5
return list(exp.unproject_to_image_plane([i,j]))[:2]
def argsf1(n):
x,y = compute_position(n)
# logger.info(f"x1: {x,y}")
return [x,y,0], r1
def argsf2(n):
x,y = compute_position(n, offset=big_sphere_offset)
# logger.info(f"x2: {x,y}")
return [x,y,0], r2
def main():
# helpers
color = lambda col : vapory.Texture(vapory.Pigment('color', col))
texture = lambda text : vapory.Texture(text)
# Begin setup
s1 = experiment.ExperimentSphere(argsf1, texture('White_Wood'),
semantic_label=1)
s2 = experiment.ExperimentSphere(argsf2, texture('White_Wood'),
semantic_label=2)
# experiment
global exp
exp = experiment.Experiment(image_shape=image_shape,
num_classes=num_classes,
N=N, data_root=root,
output_format=output_formats,
fps=fps, mode='L')
exp.add_object(vapory.LightSource([0, 5*image_shape[0], 0],
'color', [1,1,1]))
exp.add_object(vapory.LightSource([5*image_shape[0], 0, 0],
'color', [1,1,1]))
# Background
exp.add_object(vapory.Plane(
[0,0,1], 10*max(r1, r2), vapory.Texture(
vapory.Pigment(vapory.ImageMap('png', '"scripts/images/harper.png"')),
'scale', '300', 'translate', [image_shape[0] // 2, 2*image_shape[1] // 3, 0])))
exp.add_object(s1)
exp.add_object(s2)
if debug:
(image, _) , _ = exp.render_scene(0)
plt.imshow(image[:,:,0], cmap='gray')
plt.show()
else:
exp.run()
if __name__ == "__main__":
main()
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,645 | zhuokaizhao/artifice | refs/heads/master | /artifice/sharedobjects/shared.py | from os.path import exists
import pickle
from filelock import FileLock
class SharedDict(dict):
"""Maintain a dict. Doesn't allow reads or writes unless the dict is
acquired. If path already exists, does not clear it unless clear() method is
called.
"""
def __init__(self, path):
self.path = path
if not exists(path):
self._save()
self.lock = FileLock(self.path + ".lockfile")
def _check_acquired(self):
if not self.lock.is_locked:
raise RuntimeError("SharedDict: call acquire() before accessing dict.")
def __getitem__(self, key):
self._check_acquired()
return super().__getitem__(key)
def __setitem__(self, key, val):
self._check_acquired()
return super().__setitem__(key, val)
def _save(self):
with open(self.path, 'wb') as f:
pickle.dump(super().copy(), f)
def _load(self):
self.clear()
with open(self.path, 'rb') as f:
self.update(pickle.load(f))
def acquire(self, *args, **kwargs):
"""Acquire this dictionary in order to modify it.
"""
self.lock.acquire(*args, **kwargs)
self._load()
def release(self, *args, **kwargs):
"""Release this dictionary, allowing other processes to acquire it.
"""
self._save()
self.lock.release(*args, **kwargs)
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,646 | zhuokaizhao/artifice | refs/heads/master | /artifice/log/logger.py | import logging
logger = logging.getLogger('artifice')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(levelname)s:artifice:%(message)s'))
logger.addHandler(handler)
def set_verbosity(verbose):
if verbose == 0:
logger.setLevel(logging.WARNING)
elif verbose == 1:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.DEBUG)
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,647 | zhuokaizhao/artifice | refs/heads/master | /artifice/prio.py | """Module for artifice's selector.
"""
from time import sleep, time
import numpy as np
import tensorflow as tf
from artifice.log import logger
from artifice import ann
class Prioritizer:
def __init__(self, data_set, *, info_path):
self.data_set = data_set
self.info = ann.AnnotationInfo(info_path, clear_priorities=True,
clear_limbo=False)
def run(self, seconds=-1):
"""Run for at most `seconds`. If `seconds` is negative, run forever."""
start_time = time()
if tf.executing_eagerly():
dataset = self.data_set.enumerated_prediction_input().repeat(-1)
for indices, images in dataset:
logger.info(f"evaluating priorities for {indices}...")
priorities = list(self.prioritize(images))
self.info.push(list(zip(list(indices), priorities)))
logger.info(f"pushed {indices} with priorities {priorities}.")
if time() - start_time > seconds > 0:
logger.info(f"finished after {seconds}s.")
break
else:
raise NotImplementedError("patient execution")
def prioritize(self, images):
"""Assign a priority to a batch of images for labeling.
The meaning of "priority" is flexible. In the context of active learning,
it could be the measure of uncertainty. Higher priority examples will be
annotated first.
:param image: batch of images
:returns: batch of priorities, in numpy or list form
:rtype:
"""
raise NotImplementedError("subclasses should implement")
class SimulatedPrioritizer(Prioritizer):
def __init__(self, *args, selection_delay=1, **kwargs):
self.selection_delay = selection_delay
super().__init__(*args, **kwargs)
class RandomPrioritizer(SimulatedPrioritizer):
def prioritize(self, images):
sleep(self.selection_delay)
return np.random.uniform(0, 1, size=images.shape[0])
class ModelUncertaintyPrioritizer(Prioritizer):
"""Uses the `uncertainty_on_batch` method of ArtificeModel to prioritize each
image."""
def __init__(self, *args, model, load_freq=200, **kwargs):
"""
:param model: model to use
:param load_freq: how frequently to load the weights
"""
self.model = model
self.load_freq = load_freq
self.count = 0
super().__init__(*args, **kwargs)
def prioritize(self, images):
if self.count % self.load_freq == 0:
self.model.load_weights()
self.count += 1
return self.model.uncertainty_on_batch(images)
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,648 | zhuokaizhao/artifice | refs/heads/master | /artifice/vis.py | """Utils for visualizing artifice output. (Mostly for testing).
TODO: make `show` functions wrappers around `plot` functions, which can be
called without clearing the matplotlib buffer.
"""
from glob import glob
from os.path import join, basename
from stringcase import pascalcase, titlecase
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from artifice.log import logger
from artifice import utils
_show = True
def set_show(val):
global _show
_show = val
if not val:
mpl.use('Agg')
plt.ioff()
def show(fname=None, save=False):
"""Show the figure currently in matplotlib or save it, if not self.show.
If no fname provided, and self.show is False, then closes the figure. If save
is True, figure is saved regardless of show.
:param fname: name of the file to save to.
:param save: whether to save the file.
"""
if _show and not save:
logger.info("showing figure...")
plt.show()
elif fname is None:
logger.warning("Cannot save figure. Did you forget to set --show?")
plt.close()
else:
plt.savefig(fname)
logger.info(f"saved figure to {fname}.")
def plot_image(*images, columns=10, ticks=True, scale=20, colorbar=False,
cmap='gray', cram=False, **kwargs):
cmaps = utils.listify(cmap, len(images))
columns = min(columns, len(images))
rows = max(1, len(images) // columns)
fig, axes = plt.subplots(rows, columns, squeeze=False,
figsize=(scale, scale * rows / columns))
for i, image in enumerate(images):
ax = axes[i // columns, i % columns]
if image is None:
ax.axis('off')
continue
im = ax.imshow(np.squeeze(image), cmap=cmaps[i], **kwargs)
if colorbar:
fig.colorbar(im, ax=ax, orientation='horizontal',
fraction=0.046, pad=0.04)
for ax in axes.ravel():
if not ticks:
ax.axis('off')
ax.set_aspect('equal')
if cram:
fig.subplots_adjust(wspace=0, hspace=0)
return fig, axes
def plot_hists_from_dir(model_root, columns=10, scale=20):
"""Plot all the histories in `model_dir`.
For each named property, creates a plot with all the model histories that had
that named property (loss or metric)
:returns: fig, axes
"""
history_fnames = glob(join(model_root, '*history.json'))
logger.debug(f"history_fnames: {history_fnames}")
if not history_fnames:
logger.warning(f"no history saved at {model_root}")
return None, None
hist_data = {} # {property_name -> {model_name -> [values]}}
for fname in history_fnames:
hist = utils.json_load(fname)
model_name = pascalcase(basename(fname).replace('_history.json', ''))
for prop_name, values in hist.items():
if not isinstance(values, list):
continue
if hist_data.get(prop_name) is None:
hist_data[prop_name] = {}
hist_data[prop_name][model_name] = values
columns = min(columns, len(hist_data))
rows = max(1, len(hist_data) // columns)
fig, axes = plt.subplots(rows, columns, squeeze=False,
figsize=(scale, scale * rows / columns))
for i, (prop_name, prop_data) in enumerate(hist_data.items()):
ax = axes[i // columns, i % columns]
for model_name, values in prop_data.items():
ax.plot(values, '-', label=model_name)
ax.set_title(titlecase(prop_name))
ax.set_xlabel('Epoch')
ax.set_ylabel('Loss')
fig.suptitle("Training")
plt.legend()
return fig, axes
if __name__ == '__main__':
print('Hello world!')
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,649 | zhuokaizhao/artifice | refs/heads/master | /artifice/main.py | """The main script for running artifice.
"""
import os
from os.path import join, exists
import sys
from time import time, asctime
from glob import glob
import argparse
import numpy as np
import tensorflow as tf
from artifice import log
from artifice.log import logger
from artifice import dat
from artifice import mod
from artifice import docs
from artifice import vis
from artifice import conversions
from artifice import utils
from artifice import ann
from artifice import prio
from artifice import tform
def _system_checks():
if sys.version_info < (3, 6):
logger.error("Required: Python3.6 or higher.")
exit()
if tuple(map(int, tf.__version__.split('.'))) < (1, 13, 1):
logger.error("Required: TensorFlow 1.13.1 or higher.")
exit()
def _set_eager(eager):
if eager:
tf.enable_eager_execution()
def _ensure_dirs_exist(dirs):
for path in dirs:
if not exists(path):
logger.info(f"creating '{path}'")
os.makedirs(path)
class Artifice:
"""Bag of state or Main() class that directs a single `artifice` run.
All arguments are required keyword arguments, for the sake of
correctness. Defaults are specified in the command-line defaults for this
script. Run `python artifice.py -h` for more info.
# todo: copy docs here
"""
def __init__(self, *, # pylint: disable=too-many-statements
commands,
data_root,
model_root,
overwrite,
deep,
figs_dir,
convert_mode,
transformation,
identity_prob,
priority_mode,
labeled,
annotation_mode,
record_size,
annotation_delay,
image_shape,
data_size,
test_size,
batch_size,
num_objects,
pose_dim,
num_shuffle,
base_shape,
level_filters,
level_depth,
model,
multiscale,
use_var,
dropout,
initial_epoch,
epochs,
learning_rate,
tol,
num_parallel_calls,
verbose,
keras_verbose,
eager,
show,
cache,
seconds):
# main
self.commands = commands
# file settings
self.data_root = data_root
self.model_root = model_root
self.overwrite = overwrite
self.deep = deep
self.figs_dir = figs_dir
# data settings
self.convert_modes = utils.listwrap(convert_mode)
self.transformation = transformation
self.identity_prob = identity_prob
self.priority_mode = priority_mode
self.labeled = labeled
# annotation settings
self.annotation_mode = annotation_mode
self.record_size = record_size
self.annotation_delay = annotation_delay
# data sizes/settings
self.image_shape = image_shape
self.data_size = data_size
self.test_size = test_size
self.batch_size = batch_size
self.num_objects = num_objects
self.pose_dim = pose_dim
self.num_shuffle = num_shuffle
# model architecture
self.base_shape = utils.listify(base_shape, 2)
self.level_filters = level_filters
self.level_depth = level_depth
# model type settings
self.model = model
self.multiscale = multiscale
self.use_var = use_var
# hyperparameters
self.dropout = dropout
self.initial_epoch = initial_epoch
self.epochs = epochs
self.learning_rate = learning_rate
self.tol = tol
# runtime settings
self.num_parallel_calls = num_parallel_calls
self.verbose = verbose
self.keras_verbose = keras_verbose
self.eager = eager
self.show = show
self.cache = cache
self.seconds = seconds
# globals
log.set_verbosity(self.verbose)
_set_eager(self.eager)
vis.set_show(self.show)
self._set_num_parallel_calls()
# derived sizes/shapes
self.num_levels = len(self.level_filters)
self.input_tile_shape = mod.UNet.compute_input_tile_shape_(
self.base_shape, self.num_levels, self.level_depth)
self.output_tile_shapes = mod.UNet.compute_output_tile_shapes_(
self.base_shape, self.num_levels, self.level_depth)
self.output_tile_shape = self.output_tile_shapes[-1]
self.num_tiles = dat.ArtificeData.compute_num_tiles(
self.image_shape, self.output_tile_shape)
# derived model subdirs/paths
self.cache_dir = join(self.model_root, 'cache')
self.annotation_info_path = join(self.model_root, 'annotation_info.pkl')
self.annotated_dir = join(self.model_root, 'annotated') # model-dependent
# ensure directories exist
_ensure_dirs_exist([self.data_root, self.model_root, self.figs_dir,
self.cache_dir, self.annotated_dir])
"""
Helper functions.
"""
def __str__(self):
return f"""{asctime()}:
data_root: {self.data_root}
model_root: {self.model_root}
figs_dir: {self.figs_dir}
----
labeled: {self.labeled}
num_parallel_calls: {self.num_parallel_calls}
----
input tile shape: {self.input_tile_shape}
output shapes: {self.output_tile_shapes}
todo: other attributes"""
def __call__(self):
for command in self.commands:
if (command[0] == '_'
or not hasattr(self, command)
or not callable(getattr(self, command))):
raise RuntimeError(f"bad command: {command}")
getattr(self, command)()
def _set_num_parallel_calls(self):
if self.num_parallel_calls <= 0:
self.num_parallel_calls = os.cpu_count()
"""
Loading datasets and models.
"""
@property
def _data_kwargs(self):
return {'image_shape': self.image_shape,
'input_tile_shape': self.input_tile_shape,
'output_tile_shapes': self.output_tile_shapes,
'batch_size': self.batch_size,
'num_parallel_calls': self.num_parallel_calls,
'num_shuffle': min(self.data_size, self.num_shuffle),
'cache_dir': self.cache_dir}
def _load_labeled(self):
return dat.LabeledData(join(self.data_root, 'labeled_set.tfrecord'),
size=self.data_size, **self._data_kwargs)
def _load_unlabeled(self):
return dat.UnlabeledData(join(self.data_root, 'unlabeled_set.tfrecord'),
size=self.data_size, **self._data_kwargs)
def _load_annotated(self):
transformation = (None if self.transformation is None else
tform.transformations[self.transformation])
return dat.AnnotatedData(self.annotated_dir, transformation=transformation,
size=self.data_size, **self._data_kwargs)
def _load_test(self):
return dat.LabeledData(join(self.data_root, 'test_set.tfrecord'),
size=self.test_size, **self._data_kwargs)
def _load_train(self):
if self.labeled:
return self._load_labeled()
return self._load_annotated()
@property
def _model_kwargs(self):
kwargs = {'base_shape': self.base_shape,
'level_filters': self.level_filters,
'num_channels': self.image_shape[2],
'pose_dim': self.pose_dim,
'level_depth': self.level_depth,
'dropout': self.dropout,
'model_dir': self.model_root,
'learning_rate': self.learning_rate,
'overwrite': self.overwrite}
if (self.use_var and self.model == 'sparse'
or self.model == 'better-sparse'
or self.model == 'auto-sparse'):
kwargs['batch_size'] = self.batch_size
if 'sparse' in self.model:
kwargs['tol'] = self.tol
return kwargs
def _load_model(self):
kwargs = self._model_kwargs
if self.model == 'unet':
return mod.UNet(**kwargs)
elif self.model == 'sparse':
return mod.SparseUNet(**kwargs)
elif self.model == 'better-sparse':
return mod.BetterSparseUNet(**kwargs)
elif self.model == 'auto-sparse':
return mod.AutoSparseUNet(**kwargs)
else:
raise RuntimeError(f"No '{self.model}' model type.")
"""
Methods implementing Commands.
"""
def convert(self):
for mode in self.convert_modes:
conversions.conversions[mode](
self.data_root, test_size=self.test_size)
def uncache(self):
"""Clean up the cache files."""
for path in glob(join(self.model_root, "cache*")):
utils.rm(path)
def clean(self):
"""Clean up the files associated with this model for a future run.
Removes the annotation info file and lock, annotation records, and
cache.
If --deep is specified, also removes the saved model and checkpoints. Does
not remove data.
"""
if self.deep:
utils.rm(self.model_root)
else:
utils.rm(self.annotation_info_path)
utils.rm(self.annotation_info_path + '.lockfile')
utils.rm(self.annotated_dir)
def prioritize(self):
"""Prioritize images for annotation using an active learning or other strategy.
Note that this does not perform any labeling. It simply maintains a queue
of the indices for examples most recently desired for labeling. This queue
contains no repeats. The queue is saved to disk, and a file lock should be
created whenever it is altered, ensuring that the annotator does not make a
bad access.
"""
kwargs = {'info_path': self.annotation_info_path}
if self.priority_mode == 'random':
prioritizer = prio.RandomPrioritizer(self._load_unlabeled(), **kwargs)
elif self.priority_mode == 'uncertainty':
prioritizer = prio.ModelUncertaintyPrioritizer(
self._load_unlabeled(), model=self._load_model(), **kwargs)
else:
raise NotImplementedError(f"{self.priority_mode} priority mode")
prioritizer.run(seconds=self.seconds)
def annotate(self):
"""Continually annotate new examples.
Continually access the selection queue, pop off the most recent, and
annotate it, either with a human annotator, or automatically using prepared
labels (and a sleep timer). Needs to keep a list of examples already
annotated, since they will be strewn throughout different files, as well as
respect the file lock on the queue.
"""
kwargs = {'info_path': self.annotation_info_path,
'annotated_dir': self.annotated_dir,
'record_size': self.record_size}
if self.annotation_mode == 'disks':
annotator = ann.DiskAnnotator(self._load_labeled(),
annotation_delay=self.annotation_delay,
**kwargs)
else:
raise NotImplementedError(f"{self.annotation_mode} annotation mode")
annotator.run(seconds=self.seconds)
def train(self):
"""Train the model using augmented examples from the annotated set."""
train_set = self._load_train()
model = self._load_model()
model.train(train_set, epochs=self.epochs,
initial_epoch=self.initial_epoch,
verbose=self.keras_verbose,
seconds=self.seconds,
cache=self.cache)
def predict(self):
"""Run prediction on the unlabeled set."""
unlabeled_set = self._load_unlabeled()
model = self._load_model()
start_time = time()
predictions = list(model.predict(
unlabeled_set, multiscale=self.multiscale))
logger.info(f"ran prediction in {time() - start_time}s.")
logger.debug(f"prediction:\n{predictions}")
fname = join(self.model_root, 'predictions.npy')
np.save(fname, predictions)
logger.info(f"saved {len(predictions)} predictions to {fname}.")
def evaluate(self):
test_set = self._load_test()
model = self._load_model()
errors, num_failed = model.evaluate(test_set)
if not errors:
logger.warning(f"found ZERO objects, num_failed: {num_failed}")
return
avg_error = errors.mean(axis=0)
total_num_objects = self.test_size * self.num_objects
num_detected = total_num_objects - num_failed
logger.info(f"objects detected: {num_detected} / "
f"{total_num_objects}")
logger.info(f"avg (euclidean) detection error: {avg_error[0]}")
logger.info(f"avg (absolute) pose error: {avg_error[1:]}")
logger.info(
"note: some objects may be occluded, making detection impossible")
logger.info(f"avg: {errors.mean(axis=0)}")
logger.info(f"std: {errors.std(axis=0)}")
logger.info(f"min: {errors.min(axis=0)}")
logger.info(f"max: {errors.max(axis=0)}")
def vis_train(self):
"""Visualize the training set. (Mostly for debugging.)"""
train_set = self._load_train()
for batch in train_set.training_input():
for b in range(self.batch_size):
image = batch[0][b]
targets = batch[1]
pose = targets[0][b]
vis.plot_image(image, None, None,
pose[:, :, 1], pose[:, :, 2], None,
targets[1][b], targets[2][b], targets[3][b],
columns=3)
vis.show()
def vis_history(self):
# todo: fix this for multiple models in the same model_dir
vis.plot_hists_from_dir(self.model_root)
vis.show(join(self.figs_dir, 'history.pdf'))
def vis_predict(self):
"""Run prediction on the test set and visualize the output."""
history_files = glob(join(self.model_dir, '*history.json'))
hists = dict((fname, utils.json_load(fname)) for fname in history_files)
vis.plot_hist(hists)
test_set = self._load_test()
model = self._load_model()
for image, dist_image, prediction in model.predict_visualization(test_set):
fig, axes = vis.plot_image(image, image, dist_image, colorbar=True)
axes[0, 1].plot(prediction[:, 1], prediction[:, 0], 'rx')
axes[0, 2].plot(prediction[:, 1], prediction[:, 0], 'rx')
logger.info(f"prediction:\n{prediction}")
vis.show(join(self.figs_dir, 'prediction.pdf'))
if not self.show:
break
def vis_outputs(self):
"""Run prediction on the test set and visualize the output."""
test_set = self._load_test()
model = self._load_model()
for image, outputs in model.predict_outputs(test_set):
pose_image = outputs[0]
level_outputs = outputs[1:]
columns = max(model.num_levels, model.pose_dim + 1)
fig, axes = vis.plot_image(
image, *[None for _ in range(columns - 1)],
*[pose_image[:, :, i] for i in range(model.pose_dim + 1)],
*[None for _ in range(columns - model.pose_dim - 1)],
*level_outputs,
*[None for _ in range(columns - model.num_levels)],
colorbar=True, columns=columns)
vis.show(join(self.figs_dir, 'model_outputs.pdf'))
if not self.show:
break
def main():
_system_checks()
parser = argparse.ArgumentParser(description=docs.description)
parser.add_argument('commands', nargs='+', help=docs.commands)
# file settings
parser.add_argument('--data-root', '--input', '-i', nargs=1,
default=['data/default'],
help=docs.data_root)
parser.add_argument('--model-root', '--model-dir', '-m', nargs=1,
default=['models/tmp'],
help=docs.model_root)
parser.add_argument('--overwrite', '-f', action='store_true',
help=docs.overwrite)
parser.add_argument('--deep', action='store_true',
help=docs.deep)
parser.add_argument('--figs-dir', '--figures', nargs=1,
default=['figs'],
help=docs.figs_dir)
# data settings
parser.add_argument('--convert-mode', nargs='+', default=[0, 4], type=int,
help=docs.convert_mode)
parser.add_argument('--transformation', '--augment', '-a', nargs='?',
default=None, const=0, type=int,
help=docs.transformation)
parser.add_argument('--identity-prob', nargs=1, default=[0.01], type=float,
help=docs.identity_prob)
parser.add_argument('--priority-mode', '--priority', nargs=1,
default=['random'], help=docs.priority_mode)
parser.add_argument('--labeled', action='store_true', help=docs.labeled)
# annotation settings
parser.add_argument('--annotation-mode', '--annotate', nargs=1,
default=['disks'], help=docs.annotation_mode)
parser.add_argument('--record-size', nargs=1, default=[10], type=int,
help=docs.record_size)
parser.add_argument('--annotation-delay', nargs=1, default=[60], type=float,
help=docs.annotation_delay)
# sizes relating to data
parser.add_argument('--image-shape', '--shape', '-s', nargs=3, type=int,
default=[500, 500, 1], help=docs.image_shape)
parser.add_argument('--data-size', '-N', nargs=1, default=[10000], type=int,
help=docs.data_size)
parser.add_argument('--test-size', '-T', nargs=1, default=[1000], type=int,
help=docs.test_size)
parser.add_argument('--batch-size', '-b', nargs=1, default=[16], type=int,
help=docs.batch_size)
parser.add_argument('--num-objects', '-n', nargs=1, default=[40], type=int,
help=docs.num_objects)
parser.add_argument('--pose-dim', '-p', nargs=1, default=[2], type=int,
help=docs.pose_dim)
parser.add_argument('--num-shuffle', nargs=1, default=[1000], type=int,
help=docs.num_shuffle)
# model architecture
parser.add_argument('--base-shape', nargs='+', default=[28], type=int,
help=docs.base_shape)
parser.add_argument('--level-filters', nargs='+', default=[128, 64, 32],
type=int, help=docs.level_filters)
parser.add_argument('--level-depth', nargs='+', default=[2], type=int,
help=docs.level_depth)
# sparse eval and other optimization settings
parser.add_argument('--model', '-M', nargs='?', default='unet',
help=docs.model)
parser.add_argument('--multiscale', action='store_true',
help=docs.multiscale)
parser.add_argument('--use-var', action='store_true', help=docs.use_var)
# model hyperparameters
parser.add_argument('--dropout', nargs=1, default=[0.5], type=float,
help=docs.dropout)
parser.add_argument('--initial-epoch', nargs=1, default=[0], type=int,
help=docs.initial_epoch) # todo: get from ckpt
parser.add_argument('--epochs', '-e', nargs=1, default=[1], type=int,
help=docs.epochs)
parser.add_argument('--learning-rate', '-l', nargs=1, default=[0.1],
type=float, help=docs.learning_rate)
parser.add_argument('--tol', nargs=1, default=[0.1], type=float,
help=docs.tol)
# runtime settings
parser.add_argument('--num-parallel-calls', '--cores', nargs=1, default=[-1],
type=int, help=docs.num_parallel_calls)
parser.add_argument('--verbose', '-v', nargs='?', const=1, default=2,
type=int, help=docs.verbose)
parser.add_argument('--keras-verbose', nargs='?', const=2, default=1,
type=int, help=docs.keras_verbose)
parser.add_argument('--patient', action='store_true', help=docs.patient)
parser.add_argument('--show', action='store_true', help=docs.show)
parser.add_argument('--cache', action='store_true', help=docs.cache)
parser.add_argument('--seconds', '--time', '--reload', '-t', '-r', nargs='?',
default=0, const=-1, type=int, help=docs.seconds)
args = parser.parse_args()
art = Artifice(commands=args.commands,
convert_mode=args.convert_mode,
transformation=args.transformation,
identity_prob=args.identity_prob[0],
priority_mode=args.priority_mode[0],
labeled=args.labeled,
annotation_mode=args.annotation_mode[0],
record_size=args.record_size[0],
annotation_delay=args.annotation_delay[0],
data_root=args.data_root[0],
model_root=args.model_root[0],
overwrite=args.overwrite,
deep=args.deep,
figs_dir=args.figs_dir[0],
image_shape=args.image_shape,
data_size=args.data_size[0],
test_size=args.test_size[0],
batch_size=args.batch_size[0],
num_objects=args.num_objects[0],
pose_dim=args.pose_dim[0],
num_shuffle=args.num_shuffle[0],
base_shape=args.base_shape,
level_filters=args.level_filters,
level_depth=args.level_depth[0],
model=args.model,
multiscale=args.multiscale,
use_var=args.use_var,
dropout=args.dropout[0],
initial_epoch=args.initial_epoch[0],
epochs=args.epochs[0],
learning_rate=args.learning_rate[0],
tol=args.tol[0],
num_parallel_calls=args.num_parallel_calls[0],
verbose=args.verbose,
keras_verbose=args.keras_verbose,
eager=(not args.patient),
show=args.show,
cache=args.cache,
seconds=args.seconds)
logger.info(art)
art()
if __name__ == "__main__":
main()
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,650 | zhuokaizhao/artifice | refs/heads/master | /artifice/sparse/__init__.py | from .sparse import reduce_mask, gather, scatter # noqa
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,651 | zhuokaizhao/artifice | refs/heads/master | /artifice/ann.py | """Module for artifice's annotator, probably labelimg.
"""
import os
from time import sleep, strftime, time
import itertools
from operator import itemgetter
import numpy as np
from sortedcontainers import SortedList
from skimage.draw import circle
from artifice.log import logger
from artifice.sharedobjects import SharedDict
from artifice import dat
from artifice import utils
class AnnotationInfo(SharedDict):
"""Maintain a sorted list or heap of (index, priority) pairs, as well as a
list of annotated indices.
'priorities' and 'sorted_priorities' are (idx, priority) pairs guaranteed to
not have an annotation yet.
'limbo' is a set of indices which have been selected for annotation but don't
yet have an annotation. These cannot be added to priorities, but they cannot
belong to 'annotated' yet, in case the annotator is killed before it saves
their annotation. A new prioritizer should not clear limbo, but a new
annotator should.
'annotated' is a set of annotated indices. These are guaranteed to have
annotations.
Under this scheme, there may exist annotations for indices not yet added to
'annotated' (in case the annotator is killed just after it saves the
annotation but before it can move the indices out of limbo), but there will
never be an index in 'annotated' that does not have an annotation.
"""
def __init__(self, path, *, clear_priorities, clear_limbo):
"""Create a new annotation info dict.
:param path: path to save this dict.
:param clear_priorities: start with a fresh set of priorities. Typically,
will be True for the prioritizer, must be False for the Annotator.
:param clear_limbo: clear the limbo index set. Annotator should call with
clear_limbo=True. The Prioritizer should always set clear_limbo=False.
"""
super().__init__(path)
self.acquire()
if self.get('annotated') is None:
self['annotated'] = set()
if (clear_limbo or self.get('limbo') is None):
self['limbo'] = set()
if (clear_priorities or self.get('priorities') is None
or self.get('sorted_priorities') is None):
self['priorities'] = dict()
self['sorted_priorities'] = SortedList(key=itemgetter(1))
self.release()
def push(self, item):
"""Update sortec priorities with (idx, priority) item (or items).
If item already present, updates it. No-op if idx already in 'annotated' or
'limbo'.
"""
items = utils.listwrap(item)
self.acquire()
for idx, priority in items:
if idx in self['annotated'] or idx in self['limbo']:
continue
old_priority = self['priorities'].get(idx)
if old_priority is not None:
self['sorted_priorities'].remove((idx, old_priority))
self['priorities'][idx] = priority
self['sorted_priorities'].add((idx, priority))
self.release()
def pop(self):
"""Pop an example idx off the stack and add it to the 'limbo' set.
:returns: the popped idx, or None if stack is empty
"""
self.acquire()
if self['priorities']:
idx, _ = self['sorted_priorities'].pop()
del self['priorities'][idx]
self['limbo'].add(idx)
else:
idx = None
self.release()
return idx
def finalize(self, idx):
"""Discard idx or idxs from 'limbo' and add to 'annotated'.
Note that these need not be in limbo (could have multiple annotators, or
multiple prioritizers). Caller is responsible for making sure that all of
these indices have actually been annotated.
"""
idxs = utils.listwrap(idx)
self.acquire()
for idx in idxs:
self['limbo'].discard(idx)
self['annotated'].add(idx)
self.release()
class Annotator:
"""The annotator takes the examples off the annotation stack and annotates
them, if they have not already been annotated.
Subclasses should implement the annotate method, which takes an index or list
of indices and returns the (image, label, annotation) annotated example in
numpy form.
"""
def __init__(self, data_set, *, info_path, annotated_dir,
record_size=10, sleep_duration=15):
"""Annotator abstract class
:param data_set: ArtificeData object to annotate.
:param info_path:
:param annotated_dir: directory to store annotation tfrecords in
:param record_size: number of examples to save in each
tfrecord. Must be small enough for `record_size` annotated examples to fit
in memory.
:param sleep_duration: seconds to sleep if no examples on the annotation
stack, before checking again.
:returns:
:rtype:
"""
self.data_set = data_set
self.info = AnnotationInfo(info_path, clear_priorities=False,
clear_limbo=True)
self.annotated_dir = annotated_dir
self.record_size = record_size
self.sleep_duration = sleep_duration
def _generate_record_name(self):
return os.path.join(self.annotated_dir, strftime(
f"%Y%m%d%H%m%S_size-{self.record_size}.tfrecord"))
def run(self, seconds=-1):
"""Run for at most `seconds`. If `seconds` is negative, run forever."""
start_time = time()
for i in itertools.count():
examples = []
idxs = []
for _ in range(self.record_size):
idx = self.info.pop()
while idx is None:
logger.info("waiting {self.sleep_duration}s for more selections...")
sleep(self.sleep_duration)
idx = self.info.pop()
entry = self.data_set.get_entry(idx)
logger.info(f"annotating example {idx}...")
examples.append(self.annotate(entry))
idxs.append(idx)
record_name = self._generate_record_name()
dat.write_set(map(dat.proto_from_annotated_example, examples),
record_name)
self.info.finalize(idxs)
logger.info(f"saved {i}'th annotated set to {record_name}.")
if time() - start_time > seconds > 0:
logger.info(f"finished after {seconds}s.")
break
def annotate(self, entry):
"""Abstract method for annotating an example.
:param entry: numpy-form entry in the dataset
:returns: `(image, label, annotation)` tuple of numpy arrays
:rtype:
"""
raise NotImplementedError("subclasses should implement")
class SimulatedAnnotator(Annotator):
"""Simulate a human annotator.
Expects self.data_set to be a subclass of artifice LabeledData. Abstract
method `annotation_from_label()` used to convert a label to an annotation.
Loads all the labels in the dataset, which must fit in memory.
"""
def __init__(self, *args, annotation_delay=60, **kwargs):
"""Simulate a human annotator.
:param annotation_delay: time that the simulated annotator spends for each
example.
"""
self.annotation_delay = annotation_delay
super().__init__(*args, **kwargs)
assert issubclass(type(self.data_set), dat.LabeledData)
class DiskAnnotator(SimulatedAnnotator):
def annotate(self, entry):
sleep(self.annotation_delay)
image, label = entry[:2]
annotation = -1 * np.ones((image.shape[0], image.shape[1], 1), np.float32)
for i in range(label.shape[0]):
rr, cc = circle(label[i, 0], label[i, 1], 8, shape=image.shape[:2])
xs = []
ys = []
for x, y in zip(rr, cc):
if image[x, y] >= 0.1: # arbitrary threshold
xs.append(x)
ys.append(y)
annotation[xs, ys] = i
return image, label, annotation
class HumanAnnotator(Annotator):
def annotate(self, entry):
pass
| {"/artifice/sharedobjects/__init__.py": ["/artifice/sharedobjects/shared.py"], "/artifice/tform.py": ["/artifice/__init__.py"], "/artifice/mod.py": ["/artifice/__init__.py"], "/artifice/lay.py": ["/artifice/__init__.py"], "/artifice/sparse/sparse.py": ["/artifice/__init__.py", "/artifice/sparse/__init__.py"], "/scripts/arcing_sphere.py": ["/artifice/utils.py"], "/scripts/two_spheres.py": ["/artifice/utils.py"], "/test_utils/experiment.py": ["/artifice/utils.py", "/artifice/__init__.py"], "/artifice/conversions.py": ["/artifice/__init__.py"], "/test_utils/springs.py": ["/artifice/__init__.py"], "/test_utils/annotate.py": ["/artifice/utils.py"], "/artifice/sparse/sparse_lib.py": ["/artifice/__init__.py"], "/artifice/windows/__init__.py": ["/artifice/windows/annotator_window.py"], "/artifice/dat.py": ["/artifice/__init__.py"], "/artifice/prio.py": ["/artifice/__init__.py"], "/artifice/vis.py": ["/artifice/__init__.py"], "/artifice/main.py": ["/artifice/__init__.py"], "/artifice/sparse/__init__.py": ["/artifice/sparse/sparse.py"], "/artifice/ann.py": ["/artifice/sharedobjects/__init__.py", "/artifice/__init__.py"]} |
59,665 | Mesratya/Projet_Domino | refs/heads/master | /test.py | """
Quelques notes...
"""""
# Images
pixmap = QtGui.QPixmap("images/4/orange.png")
"""Méthodo pour communiquer avec l'IHM' :
Utiliser des signaux """
"1/ créer le signal sous la class ThreadGame:"
signalGameOk = QtCore.pyqtSignal()
"2/ le connecter à la méthode voulu dans la class UI"
self.thread.signalGameOk.connect(self.gameOk)
"3/ créeer la méthode si necessaire"
"4/ emettre le signal depuis le game en fesant referance au thread"
self.thread.signalGameOk.emit()
"générer une boite de dialogue qui récupère un texte"
text, ok = QInputDialog.getText(self, 'Input Dialog',
'Enter your name:')
"générer une message box toute simple --> Comment améliorer le rendu graphique ?"
QtWidgets.QMessageBox.about(self, "orientation", "Choisissez une orientation") | {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,666 | Mesratya/Projet_Domino | refs/heads/master | /Plateau.py | import numpy as np
"""
[Module secondaire]
Ce module est destiné à la définition de la classe plateau
"""
class Plateau(list):
"""La classe plateau modélise un plateau de jeu. Elle hérite de list. Ainsi la liste plateau est la chaîne de dominos posés.
La gestion de l'espace de jeu et des contraintes topologique se fait à l'aide du tableau numpy grid contenant des caractères(cf. attribut grid dans l'init)
"""
def __init__(self,game):
"""
On crée la grille de jeu. La grille de jeu possède les dimensions maximales atteignables par un joueur qui alignerait volontairement tout les dominos
Ainsi, aucun gestion des effets de bords n'est nécessaire.Initialement On remplit la grille de "x" signifiant que la case n'est pas jouable.
:param game: référence au jeu en cours
"""
self.Nb_ligne = game.Nb_ligne
self.Nb_colonne = game.Nb_colonne
self.grid = np.array([["x"]*(self.Nb_colonne+2)]*(self.Nb_ligne+2)) # cette grille permet de lier les valeurs des demi-domino à leurs position réel sur le plateau, elle ne sert pas à l'IHM mais à la recherche de "contrainte topologique locale"
for i in range(1,self.Nb_ligne+1): # on choisit une dimension jouables qui puisse être affiché dans la console python
for j in range(1,self.Nb_colonne+1):
self.grid[i,j] = " " # une case vide " " est une case jouable sur laquel un demi-domino peut se poser, tout autre caractère est un obstacle
self.extr_a = None # valeurs des extrémitées de la chaîne de dominos
self.extr_b = None
self.pos_extr_a = None # position des extremitées de la chaine sur le plateau
self.pos_extr_b = None
self.orientation_extr_a = None # orientation des extrémitées
self.orientation_extr_b = None
self.game = game
self.game.thread.signal_init_grid.emit()
def position_demi_domino(self,pos_extr,extr_orientation,domino_orientation):
"""
Fonctionnement:
Calcul de la position des deux demi domino en fonction de la position et de l'orientation
de l'extremité et de l'orientation du domino à poser.
Détails:
On peut poser un domino dans 3 orientation possibles et le domino à l'extrémité de
la chaine possède aussi une orientation. Il est necessaire de distingué proprement les cas. Un schéma expiquant la manière dont les indices
ont été calculés sera disponible dans le rapport
:param pos_extr: position de l'extrémité de chaine considéré
:param extr_orientation: orientation de l'extrémité de chaine considéré
:param domino_orientation: orientation du domino choisit
:return: position des deux demi dominos
"""
(i,j) = pos_extr
#print("(i,j) = ({0},{1})".format(i,j))
(i,j) = (int(i),int(j))
#print("(int(i),int(j)) = ({0},{1})".format(i,j))
#print(extr_orientation,domino_orientation)
if extr_orientation == "W":
if domino_orientation == "W" or domino_orientation == "E" :
return((i,j-1),(i,j-2))
elif domino_orientation == "N":
return ((i,j-1),(i-1,j-1))
elif domino_orientation == "S":
return ((i,j-1),(i+1,j-1))
if extr_orientation == "E":
if domino_orientation == "E" or domino_orientation == "W" :
return((i,j+1),(i,j+2))
elif domino_orientation == "N":
return ((i,j+1),(i-1,j+1))
elif domino_orientation == "S":
return ((i,j+1),(i+1,j+1))
if extr_orientation == "N": #à faire
if domino_orientation == "N" or domino_orientation == "S":
return ((i-1, j), (i-2, j))
elif domino_orientation == "W":
return ((i -1 , j), (i-1, j-1))
elif domino_orientation == "E":
return ((i - 1, j), (i-1, j+1))
if extr_orientation == "S": # à faire
if domino_orientation == "S" or domino_orientation == "N":
return ((i+1, j), (i+2, j))
elif domino_orientation == "W":
return ((i+1, j), (i + 1, j - 1))
elif domino_orientation == "E":
return ((i+1, j), (i + 1, j + 1))
def poser(self,domino,extr = None,orientation = None,couleur = "orange"):
"""
ajoute le domino au plateau à l'extremité souhaité et selon l'orientation choisit (North,South,East,West)
:param domino: domnio à poser
:param extr: extremité choisit pour la pose (facultatif, dans le cas d'une première pose par ex.)
:param orientation: orientation choisit (idem facultatif)
:return: ne renvoie rien...
"""
if self.game.premiere_pose : # Cette section concerne la toute première pose de domino
nb_domino = self.game.nb_domino
domino.posa = (int(self.Nb_ligne//2),int(self.Nb_colonne//2)) # On place le premier domino horizontalement au centre du plateau
domino.posb = (int(self.Nb_ligne//2),int(self.Nb_colonne//2)+1)
self.pos_extr_a = domino.posa
self.pos_extr_b = domino.posb
self.grid[domino.posa] = int(domino.vala) # à la position du demi domino on place sa valeur
self.grid[domino.posb] = int(domino.valb) # idem
self.extr_a = domino.vala
self.extr_b = domino.valb
self.orientation_extr_a = "W"
self.orientation_extr_b = "E"
self.append(domino)
# A ce stade, l'extremité du plateau est fixé, il s'agit de collé correctement le domino en le retournant si necessaire et en respectant l'orientation lors du positionnement dans le plan
if extr == "a" :
if domino.vala == self.extr_a :
domino = domino.inverser()
if orientation == self.game.opposite_orientation(self.orientation_extr_a) : # si le joueur exige l'orientation impossible convertir en son opposé
orientation = self.orientation_extr_a # on inverse l'orientation (par ex W-E n'est pas possible donc W-W)
# c'est le demi domino b qui est collé ici on le met donc en premier dans le tuple
domino.posb , domino.posa = self.position_demi_domino(self.pos_extr_a,self.orientation_extr_a,orientation)
self.grid[domino.posa] = int(domino.vala) # à la position du demi domino on place sa valeur
self.grid[domino.posb] = int(domino.valb) # idem
self.insert(0,domino)
self.extr_a = domino.vala
self.pos_extr_a = domino.posa
self.orientation_extr_a = orientation
elif extr == "b" :
if domino.valb == self.extr_b :
domino = domino.inverser()
if orientation == self.game.opposite_orientation(self.orientation_extr_b) : # si le joueur exige l'orientation impossible convertir en son opposé
orientation = self.orientation_extr_b # on inverse l'orientation (par ex W-E n'est pas possible donc W-W)
# c'est le demi domino a qui est collé ici on le met donc en premier dans le tuple
domino.posa , domino.posb = self.position_demi_domino(self.pos_extr_b, self.orientation_extr_b,orientation)
self.grid[domino.posa] = int(domino.vala) # à la position du demi domino on place sa valeur
self.grid[domino.posb] = int(domino.valb) # idem
self.append(domino)
self.extr_b = domino.valb
self.pos_extr_b = domino.posb
self.orientation_extr_b = orientation
domino.couleur = couleur
self.game.thread.signal_poser.emit(domino)
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,667 | Mesratya/Projet_Domino | refs/heads/master | /interface.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Yassi\PycharmProjects\ProjetDomino\interface.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5 import QtMultimedia as M
from PyQt5.QtMultimedia import QSound
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
#MainWindow.resize(800, 587)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("images/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.centrallayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.centrallayout.setObjectName("centrallayout")
self.gridwidget = QtWidgets.QWidget(self.centralwidget)
self.gridwidget.setObjectName("gridwidget")
self.gridlayout = QtWidgets.QGridLayout(self.gridwidget)
self.gridlayout.setObjectName("gridlayout")
self.centrallayout.addWidget(self.gridwidget)
self.handwidget = QtWidgets.QWidget(self.centralwidget)
self.handwidget.setObjectName("handwidget")
self.handlayout = QtWidgets.QHBoxLayout(self.handwidget)
self.handlayout.setObjectName("handlayout")
# self.dominowidget = QtWidgets.QWidget(self.handwidget)
# self.dominowidget.setObjectName("dominowidget")
# self.domino_layout = QtWidgets.QVBoxLayout(self.dominowidget)
# self.domino_layout.setObjectName("domino_layout")
# self.handlayout.addWidget(self.dominowidget)
self.centrallayout.addWidget(self.handwidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
#self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Domino Adventure"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,668 | Mesratya/Projet_Domino | refs/heads/master | /IHM.py | # -*- coding: utf-8 -*-
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from interface import Ui_MainWindow
from Main import *
from PyQt5.QtMultimedia import QSound
class ClickableLabel(QtWidgets.QLabel):
"""
label clicable inspiré de https://stackoverflow.com/questions/21354516/is-possible-to-put-an-image-instead-of-a-button-and-make-it-clickable
"""
clicked = QtCore.pyqtSignal(str)
def __init__(self,message):
super(ClickableLabel, self).__init__()
# pixmap = QtGui.QPixmap(width, height)
# pixmap.fill(QtGui.QColor(color))
# self.setPixmap(pixmap)
# self.setObjectName(color)
self.message = message
def mousePressEvent(self, event):
self.clicked.emit(self.message)
class UI(QtWidgets.QMainWindow):
signal_choix_fait = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__()
# Configuration de l'interface utilisateur.
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Ajout du fond d'écran
indice_background = str(np.random.randint(0,4))
print(indice_background)
self.setStyleSheet("QMainWindow{ background-image: url(images/textures/texture_"+ indice_background +"); }")
self.put_sound = QSound("sounds/effect/put.wav")
self.background_sound = QSound(None)
# Création d'une partie
#self.game = Game(UI = self)
#création du thread gérant le jeu lui même
self.thread = ThreadGame(UI = self)
#connection des signaux provenant du thread
self.thread.signal_init_grid.connect(self.init_grid_void)
self.thread.signal_poser.connect(self.poser)
self.thread.signal_main.connect(self.afficher_main)
self.thread.signal_choix_domino.connect(self.calque_choix_domino)
self.thread.signal_choix_extremite.connect(self.calque_choix_extremite)
self.thread.signal_refresh_plateau.connect(self.refresh_plateau)
self.thread.signal_choix_orientation.connect(self.calque_choix_orientation)
self.thread.signal_choix_mode.connect(self.calque_choix_mode)
self.thread.signal_choix_pseudo.connect(self.calque_choix_pseudo)
self.thread.signal_message_box.connect(self.afficher_message_box)
self.thread.signal_choix_recommencer.connect(self.choix_recommencer)
self.thread.signal_init_main.connect(self.init_main)
self.thread.signal_terminus.connect(self.terminus)
self.thread.signal_background_sound.connect(self.init_background_sound)
self.thread.signal_sound_fx.connect(self.sound_fx)
self.thread.signal_nb_joueur.connect(self.choix_nombre_joueur)
self.thread.signal_go.connect(self.go)
#démarage du thread et donc de Game
self.thread.start()
#Liste des layouts de dominos de la main
self.hand_layout_container = []
# # on lance la musique du jeu !
# self.background_sound.play()
def init_grid_void(self,Nb_ligne,Nb_colonne):
"""
initialisation d'une grille totalement transparante
"""
print("Nettoyage")
self.clearLayout(self.ui.gridlayout)
self.ui.gridlayout.setSpacing(0)
self.ui.gridlayout.setContentsMargins(0, 0, 0, 0)
# initialisation de la grille
for i in range(Nb_ligne):
for j in range(Nb_colonne):
pixmap = QtGui.QPixmap(None)
label = QtWidgets.QLabel()
label.setPixmap(pixmap)
label.setFixedSize(30, 30)
self.ui.gridlayout.addWidget(label, i, j)
def init_grid(self):
'''
initialisation d'une grille avec des cases grises en transparance
'''
Nb_ligne = self.thread.game.plateau.Nb_ligne
Nb_colonne = self.thread.game.plateau.Nb_colonne
self.clearLayout(self.ui.gridlayout)
self.ui.gridlayout.setSpacing(0)
self.ui.gridlayout.setContentsMargins(0, 0, 0, 0)
# initialisation de la grille
for i in range(Nb_ligne):
for j in range(Nb_colonne):
pixmap = QtGui.QPixmap("images/calque_gris")
label = QtWidgets.QLabel()
label.setPixmap(pixmap)
label.setFixedSize(49, 49)
self.ui.gridlayout.addWidget(label, i, j)
def refresh_plateau(self):
plateau = self.thread.game.plateau
self.init_grid()
"""
Méthode permettant de réafficher l'état du plateau proprement, ce qui permet de ce debarasser des layout clickables et autres ajouts temporaires
"""
for domino in plateau :
self.poser(domino)
def init_main(self):
self.clearLayout(self.ui.handlayout)
def terminus(self):
self.close()
def clearLayout(self,layout):
"""
Méthode permettant de vider un layout de ces widgets
"""
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
def poser(self,domino):
"""
Méthode permettant d'afficher un domino à l'ecran
"""
pixmap = QtGui.QPixmap("images/"+str(domino.vala)+"/"+domino.couleur+".png")
label = QtWidgets.QLabel()
label.setPixmap(pixmap)
label.setFixedSize(49, 49)
self.ui.gridlayout.addWidget(label,domino.posa[0]-1, domino.posa[1]-1)
pixmap = QtGui.QPixmap("images/" + str(domino.valb) + "/"+domino.couleur+".png")
label = QtWidgets.QLabel()
label.setPixmap(pixmap)
label.setFixedSize(49, 49)
self.ui.gridlayout.addWidget(label, domino.posb[0]-1, domino.posb[1]-1)
self.put_sound.play()
def afficher_main(self,num,name,main,couleur):
self.clearLayout(self.ui.handlayout)
self.hand_layout_container = []
# self.ui.handlayout.setSpacing(0)
# self.ui.handlayout.setContentsMargins(0, 0, 0, 0)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.ui.handlayout.addItem(spacerItem)
label = QtWidgets.QLabel("Joueur {0} [{1}]".format(num,name))
label.setStyleSheet("QLabel { background-color : rgb(71,55,55,129); color : black; font: bold 20px;}")
self.ui.handlayout.addWidget(label)
for domino in main :
dominowidget = QtWidgets.QWidget(self.ui.handwidget)
dominowidget.setObjectName("widget_"+str(domino))
domino_layout = QtWidgets.QVBoxLayout(dominowidget)
domino_layout.setObjectName(str(domino))
domino_layout.setSpacing(0)
domino_layout.setContentsMargins(0,0,0,0)
self.ui.handlayout.addWidget(dominowidget)
self.hand_layout_container.append(domino_layout)
label = self.label_pixmap("images/" + str(domino.vala) + "/" + couleur + ".png")
domino_layout.addWidget(label)
label = self.label_pixmap("images/" + str(domino.valb) + "/" + couleur + ".png")
domino_layout.addWidget(label)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.ui.handlayout.addItem(spacerItem)
def calque_choix_domino(self,joueur):
dominos_jouables = [str(domino) for domino in joueur.domino_jouable()]
cnt_layout = 0 # compte l'indice des layout jouables
for domino_layout in self.hand_layout_container:
if domino_layout.objectName() in dominos_jouables :
object_name = domino_layout.objectName()
vala, valb = object_name[1], object_name[3]
self.clearLayout(domino_layout)
label = self.label_pixmap_surligne("images/" + str(vala) + "/" + joueur.couleur + ".png",message= str(cnt_layout))
domino_layout.addWidget(label)
label = self.label_pixmap_surligne("images/" + str(valb) + "/" + joueur.couleur + ".png",message= str(cnt_layout))
domino_layout.addWidget(label)
cnt_layout += 1
def calque_choix_extremite(self,plateau):
extr_a = plateau.extr_a
extr_b = plateau.extr_b
pos_extr_a = plateau.pos_extr_a
pos_extr_b = plateau.pos_extr_b
couleur_a = plateau[0].couleur
couleur_b = plateau[-1].couleur
label = self.label_pixmap_surligne("images/" + str(extr_a) + "/" + couleur_a + ".png",message= str(0))
self.ui.gridlayout.addWidget(label, pos_extr_a[0] - 1, pos_extr_a[1] - 1)
label = self.label_pixmap_surligne("images/" + str(extr_b) + "/" + couleur_b + ".png", message=str(1))
self.ui.gridlayout.addWidget(label, pos_extr_b[0] - 1, pos_extr_b[1] - 1)
def calque_choix_orientation(self,extr_choisit):
orientations_legales = self.thread.game.orientations_legales(extr_choisit)
plateau = self.thread.game.plateau
for orientation in orientations_legales :
if extr_choisit == "a":
pos_extr = plateau.pos_extr_a
extr_orientation = plateau.orientation_extr_a
if extr_choisit == "b":
pos_extr = plateau.pos_extr_b
extr_orientation = plateau.orientation_extr_b
i,j = plateau.position_demi_domino(pos_extr, extr_orientation, domino_orientation=orientation)[1]
label = self.label_pixmap_surligne("images/arrow/" + orientation + ".png" ,message = orientation)
self.ui.gridlayout.addWidget(label,i-1,j-1)
def calque_choix_mode(self,num):
Nb_ligne = self.thread.game.plateau.Nb_ligne
Nb_colonne = self.thread.game.plateau.Nb_colonne
self.init_grid_void(Nb_ligne,Nb_colonne) # on s'assure que la grille ne contient des cases totalement tranparantes
pixmap = QtGui.QPixmap("images/human.png")
label = ClickableLabel(message = "human")
label.clicked.connect(self.envoyer)
label.setPixmap(pixmap)
label.setFixedSize(99, 99)
self.ui.gridlayout.addWidget(label, Nb_ligne//2, (Nb_colonne//2)-1)
pixmap = QtGui.QPixmap("images/bot.png")
label = ClickableLabel(message="IA_equilibre_global")
label.clicked.connect(self.envoyer)
label.setPixmap(pixmap)
label.setFixedSize(99, 99)
self.ui.gridlayout.addWidget(label, Nb_ligne //2, (Nb_colonne // 2) + 1)
# if num == 0 :
# self.init_intro_sound()
# self.intro_sound.play()
def calque_choix_pseudo(self):
pseudo = QtWidgets.QInputDialog.getText(self,"Choix du Pseudo","Entrer votre Pseudo :")[0]
self.signal_choix_fait.emit(pseudo)
def choix_nombre_joueur(self):
nb_joueur = QtWidgets.QInputDialog.getItem(self, "Choix du nombre de Joueurs", "Nombre de Joueurs :",("2","3","4"))[0]
self.signal_choix_fait.emit(str(nb_joueur))
def afficher_message_box(self,message):
msg = QtWidgets.QMessageBox.question(self,None,message,QtWidgets.QMessageBox.Ok)
self.signal_choix_fait.emit("ok")
def choix_recommencer(self):
message = QtWidgets.QInputDialog.getItem(self,"Voulez-vous recommencer la partie ?","Choix :",("Yes","No","Maybe","I don't know","can you repeat the question ?"))[0]
self.signal_choix_fait.emit(message)
def label_pixmap(self,image_adresse):
pixmap = QtGui.QPixmap(image_adresse)
label = QtWidgets.QLabel()
label.setPixmap(pixmap)
label.setFixedSize(49, 49)
return(label)
def label_pixmap_surligne(self,image_adresse,message):
image = QtGui.QImage(image_adresse)
overlay = QtGui.QImage("images/calque_selection.png")
painter = QtGui.QPainter()
painter.begin(image)
painter.drawImage(0, 0, overlay)
painter.end()
label = ClickableLabel(message)
label.clicked.connect(self.envoyer)
label.setPixmap(QtGui.QPixmap.fromImage(image))
label.setFixedSize(49, 49)
return (label)
def envoyer(self,message):
self.signal_choix_fait.emit(message)
def init_intro_sound(self):
indice_background = str(np.random.randint(0, 4))
print(indice_background)
self.setStyleSheet("QMainWindow{ background-image: url(images/textures/texture_" + indice_background + "); }")
indice_intro = str(np.random.randint(0, 4))
self.intro_sound = QSound("sounds/intro/intro_" + indice_intro + ".wav")
self.background_sound.stop()
def init_background_sound(self):
# choix des sons au hasard dans la playlist (extrait de Zelda Windwaker)
indice_theme = str(np.random.randint(0, 5))
self.background_sound = QSound("sounds/main_theme/theme_" + indice_theme + ".wav")
self.background_sound.setLoops(-1)
self.intro_sound.stop()
sleep(0.2)
self.background_sound.play()
print("back_sound")
def sound_fx(self,adress_sound):
self.background_sound.stop()
sleep(0.4)
self.fx_sound = QSound(adress_sound)
self.fx_sound.play()
def resize(self):
self.ui.setFixedSize(self.ui.centralWidget.size())
def go(self):
Nb_ligne = self.thread.game.plateau.Nb_ligne
Nb_colonne = self.thread.game.plateau.Nb_colonne
self.init_grid_void(Nb_ligne,Nb_colonne) # on s'assure que la grille ne contient des cases totalement tranparantes
pixmap = QtGui.QPixmap("images/manette.png")
label = ClickableLabel(message="human")
label.clicked.connect(self.envoyer)
label.setPixmap(pixmap)
label.setFixedSize(99, 99)
self.ui.gridlayout.addWidget(label, Nb_ligne // 2, (Nb_colonne // 2))
self.init_intro_sound()
self.intro_sound.play()
class ThreadGame(QtCore.QThread):
#Signaux customs
signal_init_grid = QtCore.pyqtSignal()
signal_poser = QtCore.pyqtSignal(Domino)
signal_main = QtCore.pyqtSignal(int,str,list,str)
signal_choix_domino = QtCore.pyqtSignal(Hand)
signal_choix_extremite = QtCore.pyqtSignal(Plateau)
signal_refresh_plateau = QtCore.pyqtSignal()
signal_choix_orientation = QtCore.pyqtSignal(str)
signal_choix_mode = QtCore.pyqtSignal(int)
signal_choix_pseudo = QtCore.pyqtSignal()
signal_message_box = QtCore.pyqtSignal(str)
signal_choix_recommencer = QtCore.pyqtSignal()
signal_init_main = QtCore.pyqtSignal()
signal_terminus = QtCore.pyqtSignal()
signal_background_sound = QtCore.pyqtSignal()
signal_sound_fx = QtCore.pyqtSignal(str)
signal_nb_joueur = QtCore.pyqtSignal()
signal_go = QtCore.pyqtSignal()
def __init__(self,UI,parent = None):
super(ThreadGame,self).__init__(parent)
self.choix_fait = None
self.UI = UI
self.UI.signal_choix_fait.connect(self.update_choix)
def run(self):
self.nb_joueur = self.choix_nombre_joueur()
self.game = Game(thread = self,nb_joueur=self.nb_joueur,scoring = True)
self.game.jouer_partie()
self.terminus()
def choix_domino(self,joueur):
#print("choix_domino executé")
#il faut demander à l'IHM de poser de souligner les dominos de la main qui sont jouables
# ces dominos devront être clicable et renvoyer un signal avec leurs nom
self.signal_refresh_plateau.emit()
self.signal_choix_domino.emit(joueur)
self.wait_signal(self.UI.signal_choix_fait)
self.signal_main.emit(joueur.num,joueur.name,joueur,joueur.couleur) # le choix (même invalide) à été fait donc on réaffiche la main pour faire disparaitre le surlignage
print("choix_fait :" + self.choix_fait)
return(self.choix_fait)
def choix_extremite(self,plateau):
#il faut demander à l'IHM de poser de souligner les deux extremités du plateau
# ces demi dominos devront être clicable et renvoyer un signal avec leurs nom
self.signal_choix_extremite.emit(plateau)
self.wait_signal(self.UI.signal_choix_fait)
self.signal_refresh_plateau.emit()
print("choix_fait :" + self.choix_fait)
return(self.choix_fait)
def choix_orientation(self,extr_choisit):
# il faut demander à l'IHM de poser des fleches (domino transparant ayant une inscription en fleche) clickable qui renvoie leurs orientations
# Il faut les poser autour de l'extremité choisit et n'afficher que celle appartenant à game.orientation_legale(extr_choisit)
self.signal_choix_orientation.emit(extr_choisit)
self.wait_signal(self.UI.signal_choix_fait)
self.signal_refresh_plateau.emit()
print("choix_fait :" + self.choix_fait)
return (self.choix_fait)
def choix_mode(self,num):
# il faut demander à l'IHM de poser des icones humain et Ordi pour choisir les modes de jeu
self.signal_refresh_plateau.emit()
self.signal_choix_mode.emit(num)
self.wait_signal(self.UI.signal_choix_fait)
print("choix_fait :" + self.choix_fait)
self.signal_init_grid.emit()
return (self.choix_fait)
def choix_pseudo(self):
self.signal_choix_pseudo.emit()
self.wait_signal(self.UI.signal_choix_fait)
self.signal_init_grid.emit()
print("choix_fait :" + self.choix_fait)
return (self.choix_fait)
def message_box(self,message,mode_joueur = None):
if (mode_joueur == None or mode_joueur == "human") :
self.signal_message_box.emit(message)
self.wait_signal(self.UI.signal_choix_fait)
print("choix_fait :" + self.choix_fait)
return (self.choix_fait)
def demande_recommencer(self):
self.signal_choix_recommencer.emit()
self.wait_signal(self.UI.signal_choix_fait)
self.signal_init_grid.emit()
print("choix_fait :" + self.choix_fait)
return (self.choix_fait)
def choix_nombre_joueur(self):
self.signal_nb_joueur.emit()
self.wait_signal(self.UI.signal_choix_fait)
print("choix_fait :" + self.choix_fait)
return (int(self.choix_fait))
def terminus(self):
self.signal_terminus.emit()
def update_choix(self,message):
self.choix_fait = message
def init_main(self):
self.signal_init_main.emit()
def go(self):
self.signal_go.emit()
self.wait_signal(self.UI.signal_choix_fait)
print("choix_fait :" + self.choix_fait)
self.signal_init_grid.emit()
def wait_signal(self,signal): # fonction qui bloque le thread jusqu'a reception du signal
loop = QtCore.QEventLoop()
signal.connect(loop.quit)
loop.exec_()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = UI()
window.show()
app.exec_()
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,669 | Mesratya/Projet_Domino | refs/heads/master | /Main.py | from Dominos import *
from Plateau import *
from PyQt5 import QtCore
from time import sleep
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
____ _ ____
| _ \ ___ _ __ ___ (_)_ __ ___ / ___| __ _ _ __ ___ ___
| | | |/ _ \| '_ ` _ \| | '_ \ / _ \ | | _ / _` | '_ ` _ \ / _ \
| |_| | (_) | | | | | | | | | | (_) | | |_| | (_| | | | | | | __/
|____/ \___/|_| |_| |_|_|_| |_|\___/ \____|\__,_|_| |_| |_|\___|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[Module principale]
Ce module est le coeur du Jeu de Domino. Pour jouer il suffit de lancer ce module. En effet le __main__ instanciera un objet game ce qui lancera automatiquement
le jeu avec les paramètres par défauts. Pour rejouer relancez le module ou instanciez un objet game.
Ce Module n'est pas indépendant !
La présence des modules Dominos et Plateau dans le même repertoire est necessaire ainsi que les fichiers txt score, game_opening et game_ending
Le jeu est en mode console. Il suffit de suivre les instructions de la console. L'état de la chaine de domino, la positon spatiale des valeurs
et toute information utile sera donné en console. Les erreurs de frappe sont géré par le jeu.
Source Github:
Notre projet à jour est disponile sur le dépot github : https://github.com/Mesratya/Projet_Domino
Warning :
La seul manière de faire planter le jeu est de modifier
les paramètres d'instanciation dans le main (ces derniers ne sont pas encore protégé par des setteur)
c'est le seul endroit ou l'enclapsulation à une utilité puisque, tout autre interaction avec l'utilisateur
se fait en controlant les données reçu.
Par défaut la partie se fait à deux, avec un jeu double-six (six points max sur un domino) et 7 domino par joueurs. Ces paramètres sont modifiable lors de l'instanciation
"""
class Game:
"""
Classe principale du Jeu. Elle gère le déroulement du jeu et utilise les autres classes.
"""
def __init__(self,pt_max = 6,nb_joueur = 2,nb_dominoparjoueur = 7,scoring = False,thread = None,Nb_ligne = 15,Nb_colonne = 15):
"""
On récupère les paramètres de la partie et on lance la partie avec self.jouer_partie
Ainsi instancier un objet game lance automatiquement la partie.
:param pt_max: pt_max est le nombre de points maximal sur un domino
:param nb_joueur:
:param nb_dominoparjoueur:
:param scoring:
A faire : utiliser un setteur pour assurer l'integrité des données entrée
"""
self.nb_joueur = nb_joueur
self.pt_max = pt_max
self.nb_domino = (pt_max+1)*(pt_max+2)/2
self.nb_dominoparjoueur=nb_dominoparjoueur
self.Nb_ligne = Nb_ligne
self.Nb_colonne = Nb_colonne
self.modes_disponibles = ["human","IA_max","IA_hasard","IA_equilibre_restreint","IA_equilibre_global"] # mode de jeu des joueurs
self.couleurs_disponibles = ["orange","vert","bleu","bordeau"]
self.scoring = scoring
self.thread = thread
#self.jouer_partie() # Pour l'instant(pré-IHM) on lance la partie dès l'instanciation de game.
def initialiser(self):
"""
Cette méthode permet d'initialiser ou ré-initialiser la partie en créant les différants objets nécessaires.
Cette procédure n'est pas effectuer dans l'init car on peut lancer plusieurs partie dans un même game (recommancement...)
:return:
"""
self.plateau = Plateau(self) # le plateau est initialement vide
self.talon = Talon(self.pt_max) # le talon se remplit automatiquement
self.Joueurs = [] # liste des joueurs (objets hand) de la partie
self.thread.go()
with open("game_opening") as f: # Ascii art (voir self.fin_de_partie() pour les références)
game_opening = f.read()
print(game_opening)
self.thread.message_box("Choisissez le mode de jeu des participants (humain ou IA)")
for i in range(self.nb_joueur): # création des mains
# on récupère le mode de jeu de chaque joueur
#mode = input("Donnez le mode du joueur {0} (choisissez parmi {1}) ".format(i,self.modes_disponibles))
mode = self.thread.choix_mode(num = i)
while mode not in self.modes_disponibles :
print("---Saisie Incorrecte Veuillez Recommencer ---")
#mode = input("Donnez le mode du joueur {0} (choisissez parmi {1}) ".format(i,self.modes_disponibles))
mode = self.thread.choix_mode(num = i)
# Dans le cas ou self.scoring = True on demande le pseudo du joueur sinon
# on s'en tient à son mode de jeu
hand_name = None
if self.scoring and mode == "human" :
#hand_name = input("Donnez votre Pseudo")
hand_name = self.thread.choix_pseudo()
else:
hand_name = mode
couleur = self.couleurs_disponibles[i]
self.Joueurs.append(Hand(i, self, mode,hand_name,couleur=couleur))
print("\n")
# on remplit chaque main en tirant dans le talon
for joueur in self.Joueurs:
for i in range (self.nb_dominoparjoueur):
joueur.append(self.talon.tirer())
# on positionne le joueur ayant le plus grand domino en tête de la liste Joueurs
self.rang_premier_joueur = 0 # position dans la liste Joueurs du joueur ayant le plus fort domino au début du jeu
for i in range(len(self.Joueurs)):
if self.Joueurs[i].max_domino() > self.Joueurs[self.rang_premier_joueur].max_domino():
self.rang_premier_joueur = i
premier_joueur = self.Joueurs.pop(self.rang_premier_joueur)
self.Joueurs.insert(0,premier_joueur)
def jouer_tour(self,joueur):
"""
Description générale :
Le joueur joue son tour en fonction de son mode de jeu (human,hasard...)
Il s'agit de choisir quel domino jouer parmit ce qui sont jouables(en prenant en compte les contraintes topologiques),
de choisir l'extremité de la chaine si il y'as ambiguité et choisir l'orientation du domino
Remarque:
On pourra probablement optimiser cette partie en employant pattern design de type stratégie
:param joueur: joueur qui joue le tour
:return: ne renvoie rien
"""
if joueur.cinq_meme_famille() :
"""
Si le joueur en question possède 5 domino de la même famille. On déclare la partie fini (self.Jeu_en_cours = False) et on déclare
le recommencement de la partie nécessaire (self.recommencer = True)
En effet un joueur n'as pas le droit d'être dans cette situation, il faut tout recommencer !
"""
self.Jeu_en_cours = False
self.recommencer = True
# on prévient tout le monde de la situation
#print("Le joueur {0} possède 5 dominos de la même famille, il faut recommencer la partie !".format(joueur.num))
self.thread.message_box("Le joueur {0} possède 5 dominos de la même famille, il faut recommencer la partie !".format(joueur.num))
else :
if self.premiere_pose:
"""
Cette section est executé lors du premier tour (premiere_pose == True) par le joueur en début liste (c'est celui possèdant le
plus grand domino). Il va ici poser son plus grand domino comme exigé par les règles du jeu.
"""
max_domino = joueur.max_domino()
joueur.remove(max_domino)
self.plateau.poser(max_domino,couleur = joueur.couleur)
self.premiere_pose = False # on ne repassera plus par cette section
# A partir d'ici (tour 2 et suivant) on distingue le mode de jeu du joueur pour jouer un tour
else:
domino_jouable = joueur.domino_jouable()
if domino_jouable == []: # Aucun domino n'est jouable
"""
On traite ici le cas du joueur bloqué et on pense à enrengistrer la situation du joueur
si il est définitivement bloqué (bloqué + talon vide)
"""
if len(self.talon) > 0: # il peut piocher donc il pioche et passe son tour
domino_pioche = self.talon.tirer()
joueur.append(domino_pioche)
#print("Joueur {0} ne peut pas jouer, il pioche {1} et passe son tour \n".format(joueur.num,
# domino_pioche))
self.thread.message_box("Joueur {0} [{1}] ne peut pas jouer, il pioche {2} et passe son tour \n".format(joueur.num,joueur.name,domino_pioche),joueur.mode)
else:
joueur.etat_bloque = True # il ne peut pas piocher, le talon est vide, il est définitvement bloqué
#print("Le talon est vide : Joueur {0} ne peut définitivement plus jouer \n".format(joueur.num))
self.thread.message_box("Le talon est vide : Joueur {0} ne peut définitivement plus jouer \n".format(joueur.num),joueur.mode)
else: # le joueur n'est pas bloqué, donc il joue
print("Plateau : {0}".format(self.plateau))
print(self.plateau.grid)
print("Joueur {0} [{1}] : {2}".format(joueur.num,joueur.name, joueur))
self.thread.signal_refresh_plateau.emit()
self.thread.signal_main.emit(joueur.num,joueur.name,joueur,joueur.couleur)
if joueur.mode == "human" or joueur.mode == "Human":
"""
Dans le cas d'un joueur human il choisit le domino, l'extremité de la chaine et l'orientation
en fonction de qui est disponible
"""
print("Dominos jouables : {0}\n".format(domino_jouable))
rang_domino_choisit = -1
while rang_domino_choisit < 0 or rang_domino_choisit > len(domino_jouable)-1:
rang_domino_choisit = self.thread.choix_domino(joueur)
#rang_domino_choisit = input("Quel domino souhaitez vous posez ? (rang de ce domino) ")
try :
rang_domino_choisit = int(rang_domino_choisit)
except ValueError :
print("---Vous n'avez pas saisi un nombre---")
rang_domino_choisit = -1
continue
if rang_domino_choisit < 0 :
print(("---Ce nombre est négatif---"))
if rang_domino_choisit > len(domino_jouable)-1:
print("---Ce nombre est plus grand que le rang maximal des dominos jouables---")
domino_choisit = domino_jouable[rang_domino_choisit]
if domino_choisit in joueur.domino_jouable_left_side():
extr_choisit = "a"
elif domino_choisit in joueur.domino_jouable_right_side():
extr_choisit = "b"
else:
rang_extr_choisit = -1
while rang_extr_choisit < 0 or rang_extr_choisit > 1:
#rang_extr_choisit = input("Choisissez l'extrémité du plateau 0: {0} ou 1: {1} en tapant 0 ou 1".format(self.plateau[0],self.plateau[-1]))
rang_extr_choisit = self.thread.choix_extremite(self.plateau)
try:
rang_extr_choisit = int(rang_extr_choisit)
except ValueError:
print("---Vous n'avez pas saisi un nombre---")
rang_extr_choisit = -1
continue
if rang_extr_choisit < 0:
print(("---Ce nombre est négatif---"))
if rang_extr_choisit > 1:
print("---Ce nombre est plus grand 1 ---")
if rang_extr_choisit == 0:
extr_choisit = "a"
elif rang_extr_choisit == 1:
extr_choisit = "b"
#orientation_choisit = input("Quel orientation pour votre domino ? (Orientations possibles : {0})".format(self.orientations_legales(extr_choisit)))
orientation_choisit = self.thread.choix_orientation(extr_choisit)
while orientation_choisit not in self.orientations_legales(extr_choisit) :
print("Saisie incorrecte tapez seulement une lettre parmi celle proposées")
#orientation_choisit = input("Orientations possibles : {0}".format(self.orientations_legales(extr_choisit)))
orientation_choisit = self.thread.choix_orientation(extr_choisit)
if joueur.mode != "human" :
sleep(0.5)
if joueur.mode == "IA_hasard":
"""
L'inteliigence artificielle IA_hasard choisit au hasard son domino parmit
ceux qui sont jouables
"""
domino_choisit = domino_jouable[random.randint(0, len(domino_jouable) - 1)]
if domino_choisit in joueur.domino_jouable_left_side():
extr_choisit = "a"
elif domino_choisit in joueur.domino_jouable_right_side():
extr_choisit = "b"
else:
# si ambiguité on choisit au hasard l'extremité
random_side = random.randint(0, 1)
if random_side == 0:
extr_choisit = "a"
elif random_side == 1:
extr_choisit = "b"
orientations_possibles = self.orientations_legales(extr_choisit)
orientation_choisit = random.choice(orientations_possibles)
if joueur.mode == "IA_max":
"""
L'inteliigence artificielle IA_max se débarasse du domino
ayant le nombre de points le plus important. En cas de blocage définitif du jeu
celui ayant le moins de points à gagné. Cette stratégie est "préventive" dans le sens
ou elle se focalise sur la victoire en cas de blocage.
"""
domino_choisit = max(domino_jouable)
if domino_choisit in joueur.domino_jouable_left_side():
extr_choisit = "a"
elif domino_choisit in joueur.domino_jouable_right_side():
extr_choisit = "b"
else:
random_side = random.randint(0, 1)
if random_side == 0:
extr_choisit = "a"
elif random_side == 1:
extr_choisit = "b"
orientations_possibles = self.orientations_legales(extr_choisit)
orientation_choisit = random.choice(orientations_possibles)
if joueur.mode == "IA_equilibre_restreint":
"""
Cette Inteligence artificielle est plus élaboré que les précédentes (et son nom est plus compliqué
à taper...)
Cette algo favorise la divesité dans la main afin d'être réactif au prochain tour
Elle consiste à choisir de poser le domino qui permet d'avoir une main contenant le plus de type de points
(le plus de familles de points). Dans la version restreinte on favorise la diversité seulement parmit les dominos
actuellement jouables (voir IA_equilibre_global pour une divesrité dans toute la main)
"""
domino_jouable = joueur.domino_jouable()
Nb_famille_dominos = [] # Cette liste contient pour chaque domino le nombre de famille de points restant dans la main jouable une fois le domino correspondant posé
for domino in domino_jouable: # Supposons que l'on pose le domino domino
dominos_restant = domino_jouable.copy()
dominos_restant.remove(domino) # il restera les autres dominos jouables (ici on utilise seulement les dominos jouables !)
# on va compter combien il y'as de membre de chaque famille dans les dominos restant
count_pt = [0] * (self.pt_max + 1) # chaque case correspond à une famille
for domino_restant in dominos_restant: # on parcourt chaque domino restant
if domino_restant.vala == domino_restant.valb:
count_pt[domino_restant.vala] += 1
else:
count_pt[domino_restant.vala] += 1
count_pt[domino_restant.valb] += 1
nb_famille = 0 # nombre de famille donne le nombre de famille de domino (1 pour chaque famille représenté)
for pt in count_pt : # on compte les familles présentes
if pt > 0 :
nb_famille += 1
Nb_famille_dominos.append(nb_famille)
nb_famille_max = max(Nb_famille_dominos) # on récupère le meuilleur score
dominos_equilibres = []
for rang_domino in range(len(domino_jouable)) : # on cherche le ou les dominos à l'origine de ce meuilleur score
if Nb_famille_dominos[rang_domino] == nb_famille_max :
dominos_equilibres.append(domino_jouable[rang_domino])
domino_choisit = max(dominos_equilibres) # si plusieurs dominos induisent de la diversité on prend celui qui à le plus de point (priorité à la prévention en cas de blocage)
if domino_choisit in joueur.domino_jouable_left_side():
extr_choisit = "a"
elif domino_choisit in joueur.domino_jouable_right_side():
extr_choisit = "b"
else:
random_side = random.randint(0, 1)
if random_side == 0:
extr_choisit = "a"
elif random_side == 1:
extr_choisit = "b"
orientations_possibles = self.orientations_legales(extr_choisit)
orientation_choisit = random.choice(orientations_possibles)
if joueur.mode == "IA_equilibre_global":
"""
Ce référer à la docstring de IA_equilibre_restreint. On procède simplement à une généralisation
C'était l'algo prévu initialement mais la version restreinte à été obtenue par erreur. Cependant
Il semble l'equilibre restreint soit plus performant (le hasard fait bien les choses)
"""
domino_jouable = joueur.domino_jouable()
Nb_famille_dominos = [] # Cette liste contient pour chaque domino le nombre de famille restant dans la main une fois le domino correspondant posé
for domino in domino_jouable: # Supposons que l'on pose le domino domino
dominos_restant = joueur.copy()
dominos_restant.remove(domino) # il restera les autres dominos
count_pt = [0] * (self.pt_max + 1) # on compte combien il y'as de membre de chaque famille dans les dominos restant
for domino_restant in dominos_restant:
if domino_restant.vala == domino_restant.valb:
count_pt[domino_restant.vala] += 1
else:
count_pt[domino_restant.vala] += 1
count_pt[domino_restant.valb] += 1
nb_famille = 0
for pt in count_pt :
if pt > 0 :
nb_famille += 1
Nb_famille_dominos.append(nb_famille)
nb_famille_max = max(Nb_famille_dominos)
dominos_equilibres = []
for rang_domino in range(len(domino_jouable)) :
if Nb_famille_dominos[rang_domino] == nb_famille_max :
dominos_equilibres.append(domino_jouable[rang_domino])
domino_choisit = max(dominos_equilibres) # si deux dominos induisent la diversité on prend celui qui à le plus de point (priorité à la defense)
if domino_choisit in joueur.domino_jouable_left_side():
extr_choisit = "a"
elif domino_choisit in joueur.domino_jouable_right_side():
extr_choisit = "b"
else:
random_side = random.randint(0, 1)
if random_side == 0:
extr_choisit = "a"
elif random_side == 1:
extr_choisit = "b"
orientations_possibles = self.orientations_legales(extr_choisit)
orientation_choisit = random.choice(orientations_possibles)
joueur.remove(domino_choisit)
self.plateau.poser(domino_choisit, extr_choisit,orientation_choisit,couleur=joueur.couleur)
print("\n")
if len(joueur) == 0:
self.Jeu_en_cours = False # le joueur à posé son dernier domino, arrêter le jeu
def opposite_orientation(self,orientation):
"""
:param orientation: orientation ("N","S","W","E")
:return: renvoie l'orientation opposée (à 180°)
"""
if orientation == "N" :
return("S")
elif orientation == "S" :
return("N")
elif orientation == "W" :
return("E")
elif orientation == "E" :
return("W")
def orientations_legales(self,extr_choisit):
"""
Pour l'extremité du plateau choisit ("a" ou "b") renvoie les orientations légales.
:param extr_choisit: "a" ou "b"
:return: listes des orientations légales pour cette extrémité
"""
# on récupère la position et l'orientation du demi-domino à l'extremité
if extr_choisit == "a" :
pos_extr = self.plateau.pos_extr_a
extr_orientation = self.plateau.orientation_extr_a
elif extr_choisit == "b":
pos_extr = self.plateau.pos_extr_b
extr_orientation = self.plateau.orientation_extr_b
orientation_possibles = ["N", "S", "E", "W"] # au départ...
orientations_legales = []
for orientation_a_tester in orientation_possibles : # Pour chaque orientation à tester on récupère les positions correspondantes et on verifie que la place est libre (" ")
(pos_a_checker_1,pos_a_checker_2) = self.plateau.position_demi_domino(pos_extr,extr_orientation,orientation_a_tester)
if self.plateau.grid[pos_a_checker_1] == " " and self.plateau.grid[pos_a_checker_2] == " " and orientation_a_tester != self.opposite_orientation(extr_orientation) :
orientations_legales.append(orientation_a_tester)
return(orientations_legales)
def jouer_partie(self):
"""
La partie est gérée dans cette méthode jusqu'a la fin de la partie
Cette méthode est RECURSIVE. En effet, suite à la boucle while on appel self.fin_de_partie() qui rappel jouer_partie
à moins que l'on soit dans le cas de base (cad que self.recommencer == False en fin de partie)
:return:
"""
self.initialiser() # initialisation ou remise à zéro
self.Jeu_en_cours = True
self.recommencer = False
self.premiere_pose = True
self.thread.signal_background_sound.emit()
while self.Jeu_en_cours :
for joueur in self.Joueurs :
if self.Jeu_en_cours :
self.jouer_tour(joueur)
# A la fin du tour on regarde si tout le monde est bloqué
if [joueur.etat_bloque for joueur in self.Joueurs] == [True]*self.nb_joueur :
self.Jeu_en_cours = False # Tout les joueurs sont bloqués, arrêter le jeu
self.fin_de_partie()
def fin_de_partie(self):
if self.recommencer == True : # le cas de base n'est pas vérifié donc on appelle self.jouer_partie
print("Nouvelle Partie")
self.thread.message_box("Nouvelle Partie")
self.thread.init_main()
self.jouer_partie()
else :
# le fameux cas de base : la partie n'as pas à être recommencé donc on ne rappel pas self.jouer_partie
gagnant = self.Joueurs[0]
for joueur in self.Joueurs:
if joueur.pt_restant() < gagnant.pt_restant():
gagnant = joueur
# le score du gagnant est le nombre de points des autres joueurs
score_gagnant = 0
for joueur in self.Joueurs:
if joueur != gagnant :
score_gagnant += joueur.pt_restant()
#print("Le gagnant est Joueur {0} [{1}] avec un score de {2} points !".format(gagnant.num,gagnant.name,score_gagnant))
self.thread.signal_sound_fx.emit("sounds/effect/win.wav")
self.thread.message_box("Le gagnant est Joueur {0} [{1}] avec un score de {2} points !".format(gagnant.num,gagnant.name,score_gagnant))
if self.scoring :
# on récupère le nom des heureux perdants...
Perdants = self.Joueurs.copy()
Perdants.remove(gagnant)
Perdants_name = []
for perdant in Perdants:
Perdants_name.append(perdant.name)
with open("score",mode='a') as f:
f.write("{0} gagne face a {1} Score ==> {2} points".format(gagnant.name,Perdants_name,score_gagnant))
f.write("\n")
self.thread.init_main()
self.recommencer = self.thread.demande_recommencer()
if self.recommencer in ["Yes","Maybe","can you repeat the question ?"]:
self.thread.message_box("Nouvelle Partie")
self.jouer_partie()
with open("game_ending") as f: # L'auteur des ascii arts dominos est David Riley et ils proviennent de http://ascii.co.uk/
game_ending = f.read()
print(game_ending)
# Consultation_score = input("Voulez-vous consulter la Table des Scores ? [répondre par oui ou par non]")
# while Consultation_score not in ["oui","non"]:
# print("-------Réponse incorrecte-------")
# Consultation_score = input("Voulez-vous consulter la Table des Scores ? [répondre par oui ou par non]")
# if Consultation_score == "oui":
# with open("score") as f:
# score = f.read()
# print(score)
if __name__ == "__main__":
"""
Cette section ne se lance quand lancant Main.py
On modifier cette section en instanciant plusieurs game avec des paramères différants de ceux par défaut
"""
Game = Game(scoring=False,nb_joueur=2)
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,670 | Mesratya/Projet_Domino | refs/heads/master | /Tests_unitaires.py | import unittest
from Main import *
"""Ce module contient quelques tests unitaires du Projet Domino
Ces tests sont pour l'instant écrit à des fins essentiellement pédagogique
puisque le dévloppement du code pré-IHM c'est fait en testant systématiquement le
code en faisant jouer des IA (aux stratégies diverses) plutot quand faisant tourner des test unitaires
"""
class Testdomino(unittest.TestCase):
"""Classe de test unitaire pour la classe Domino"""
def setUp(self):
"""Méthode d'initialisation appelér avant chaque test"""
pass
def testInit(self):
"""Test de la méthode d'initialisation des dominos"""
for i in range(10):
for j in range(10):
D = domino(i,j,(7,8),(7,9))
self.assertEqual(D.vala,i)
self.assertEqual(D.valb,j)
self.assertEqual(D.posa,(7,8))
self.assertEqual(D.posb, (7, 9))
def testComp(self):
"""Test de la comparaison entre des dominos"""
for i in range(10):
for j in range(10):
D1 = domino(i,j)
D2 = domino(i+1,j)
self.assertTrue(D1 < D2)
def testVal_totale(self):
for i in range(10):
for j in range(10):
val_totale = domino(i,j).val_totale()
self.assertEqual(val_totale,i+j)
def testInverser(self):
"""Test de l'inversion d'un domino"""
for i in range(10):
for j in range(10):
D = domino(i,j)
D_inv = D.inverser()
self.assertEqual(D.vala,D_inv.valb)
self.assertEqual(D.valb,D_inv.vala)
class Test_talon(unittest.TestCase):
"""Classe de test unitaire pour la classe talon"""
def setUp(self):
"""Méthode d'initialisation appelér avant chaque test"""
pass
# La classe talon ne necessite pas de test unitaire
# Le talon généré est valide
if __name__ == '__main__':
unittest.main() | {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,671 | Mesratya/Projet_Domino | refs/heads/master | /interface2.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\Yassi\PycharmProjects\ProjetDomino\interface_2.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 587)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.gridwidget = QtWidgets.QWidget(self.centralwidget)
self.gridwidget.setObjectName("gridwidget")
self.gridlayout = QtWidgets.QGridLayout(self.gridwidget)
self.gridlayout.setObjectName("gridlayout")
self.pushButton_2 = QtWidgets.QPushButton(self.gridwidget)
self.pushButton_2.setObjectName("pushButton_2")
self.gridlayout.addWidget(self.pushButton_2, 0, 1, 1, 1)
self.pushButton_4 = QtWidgets.QPushButton(self.gridwidget)
self.pushButton_4.setObjectName("pushButton_4")
self.gridlayout.addWidget(self.pushButton_4, 1, 1, 1, 1)
self.pushButton = QtWidgets.QPushButton(self.gridwidget)
self.pushButton.setObjectName("pushButton")
self.gridlayout.addWidget(self.pushButton, 0, 0, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.gridwidget)
self.pushButton_3.setObjectName("pushButton_3")
self.gridlayout.addWidget(self.pushButton_3, 1, 0, 1, 1)
self.verticalLayout_3.addWidget(self.gridwidget)
self.handwidget = QtWidgets.QWidget(self.centralwidget)
self.handwidget.setObjectName("handwidget")
self.handlayout = QtWidgets.QHBoxLayout(self.handwidget)
self.handlayout.setObjectName("handlayout")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.handlayout.addItem(spacerItem)
self.widget = QtWidgets.QWidget(self.handwidget)
self.widget.setObjectName("widget")
self.d = QtWidgets.QVBoxLayout(self.widget)
self.d.setObjectName("d")
self.pushButton_5 = QtWidgets.QPushButton(self.widget)
self.pushButton_5.setObjectName("pushButton_5")
self.d.addWidget(self.pushButton_5)
self.pushButton_6 = QtWidgets.QPushButton(self.widget)
self.pushButton_6.setObjectName("pushButton_6")
self.d.addWidget(self.pushButton_6)
self.handlayout.addWidget(self.widget)
self.widget_2 = QtWidgets.QWidget(self.handwidget)
self.widget_2.setObjectName("widget_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.pushButton_7 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_7.setObjectName("pushButton_7")
self.verticalLayout_5.addWidget(self.pushButton_7)
self.pushButton_8 = QtWidgets.QPushButton(self.widget_2)
self.pushButton_8.setObjectName("pushButton_8")
self.verticalLayout_5.addWidget(self.pushButton_8)
self.handlayout.addWidget(self.widget_2)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.handlayout.addItem(spacerItem1)
self.verticalLayout_3.addWidget(self.handwidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.pushButton_2.setText(_translate("MainWindow", "PushButton"))
self.pushButton_4.setText(_translate("MainWindow", "PushButton"))
self.pushButton.setText(_translate("MainWindow", "PushButton"))
self.pushButton_3.setText(_translate("MainWindow", "PushButton"))
self.pushButton_5.setText(_translate("MainWindow", "PushButton"))
self.pushButton_6.setText(_translate("MainWindow", "PushButton"))
self.pushButton_7.setText(_translate("MainWindow", "PushButton"))
self.pushButton_8.setText(_translate("MainWindow", "PushButton"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,672 | Mesratya/Projet_Domino | refs/heads/master | /Dominos.py | import random
"""
[Module secondaire]
Ce module est destiné à la définition des classes Domino, Hand et Talon car ces classes sont taille raisonable
"""
class Domino:
"""
La classe domino modelise un domino par ses valeurs et la position des demi-dominos
Initialement les dominos ne sont pas posé sur le plateau donc leurs positions sont à None
On considère qu'un domino possède deux parties chacune ayant une valeur et une position
Les lettres a et b distinguent les deux demi-dominos.
"""
def __init__(self,vala,valb,posa =(None,None),posb =(None,None),couleur = None):
self.vala=vala
self.valb=valb
self.posa=posa # stocker ici la position des demi-dominos permettera à l'IHM d'afficher les dominos simplement en les parcourant dans l'objet plateau sans avoir à parcourir la grille grid
self.posb=posb
self.couleur = couleur
def __repr__(self):
""" La repésentation d'un domino sous forme de chaine de caractère permet de jouer dans un terminal"""
return ("[{0}|{1}]".format(self.vala,self.valb))
def val_totale(self):
"""val_totale est la somme des points des deux parties d'un domino c'est
c'est la valeur d'un domino
"""
return(self.vala + self.valb)
def __gt__(self, other):
"""On surcharge la méthode de comparaison en utilisant la valeur totale des dominos comme critère.
Ainsi on pourra trier facilement des listes de dominos. Cela permet aussi d'appliquer la méthode max à une liste de domino
et obtenir le domino de poids fort (cf. comportement de l'IA_max qui pose le plus grand domino)
"""
if self.val_totale() > other.val_totale() :
return(True)
else : return (False)
def inverser(self):
"""return un domino inversé"""
return(Domino(self.valb,self.vala))
class Talon(list):
"""
La classe Talon modélise le talon contenant tous les dominos au début d'une partie
"""
def __init__(self,pt_max):
"""
Le constructeur init remplit le talon en créant les dominos necessaires à la parti.
La connaissance du nombre de points maximal (6 pour un jeu double-six) suffit à construire tout les dominos
:param pt_max: nombre de points maximal sur un domino
"""
for i in range(pt_max,-1,-1):
for j in range(i,-1,-1):
self.append( Domino(i,j) )
def tirer(self):
"""
Renvoie un domino au hasard et le supprime du talon
"""
domino = self.pop(random.randint(0,len(self)-1))
return(domino)
class Hand(list):
"""
Cette classe modèlise à la fois un joueur et sa main de domino. Elle hérite de list. En effet hand contient tout les dominos de la main d'un joueur
"""
def __init__(self,num,game,mode,name=None,couleur = None):
"""
:param num: numéro (entier) attribué lors de l'enrengistrement du joueur au début du jeu
:param game: référence à la l'instance game qui gère le jeu
:param mode: Un joueur peut être human, IA_hasard, IA_max..., c'est une chaine de caractère. Elle doit appartenir à game.modes_disponibles
:param name: Nom du joueur, chaine de caractère. Facultatif
"""
self.num = num
self.game = game
self.etat_bloque = False # si la main ne peut definitivement plus jouer cette variable vaut True
self.mode = mode # mode est la nature du joueur : human, IA (il peut y avoir plusieur type d'IA)
self.name = name # le nom du joueur sert dans le cas de l'enrengistrement des scores
self.couleur = couleur
def domino_jouable(self):
'''Renvoie les dominos de la main qui sont posables sur le plateau'''
dom_jouab = []
extr_a = self.game.plateau.extr_a
extr_b = self.game.plateau.extr_b
legal_a = self.game.orientations_legales("a")
legal_b = self.game.orientations_legales("b")
for domino in self :
if ((domino.vala == extr_a or domino.valb == extr_a) and legal_a != []) or ((domino.vala == extr_b or domino.valb == extr_b) and legal_b != []) :
dom_jouab.append(domino)
return(dom_jouab)
def domino_jouable_left_side(self):
'''Renvoie les dominos de la main qui sont posables du coté a du plateau au sens des valeurs et de la place disponible'''
dom_jouab_left_side = []
extr_a = self.game.plateau.extr_a
extr_b = self.game.plateau.extr_b
legal_a = self.game.orientations_legales("a")
legal_b = self.game.orientations_legales("b")
for domino in self:
if (domino.vala == extr_a or domino.valb == extr_a) and legal_a != [] and ((domino.vala != extr_b and domino.valb != extr_b) or legal_b == []) :
dom_jouab_left_side.append(domino) # le domino n'est jouable que sur extr_a
return (dom_jouab_left_side)
def domino_jouable_right_side(self):
'''Renvoie les dominos de la main qui sont posables du coté b au sens des valeurs et de la place disponible'''
dom_jouab_right_side = []
extr_a = self.game.plateau.extr_a
extr_b = self.game.plateau.extr_b
legal_a = self.game.orientations_legales("a")
legal_b = self.game.orientations_legales("b")
for domino in self:
if (domino.vala == extr_b or domino.valb == extr_b) and legal_b != [] and ((domino.vala != extr_a and domino.valb != extr_a) or legal_a == []):
dom_jouab_right_side.append(domino) # le domino n'est jouable que sur extr_b
return (dom_jouab_right_side)
def max_domino(self):
"""Renvoie le plus grand domino de la main"""
return(max(self))
def pt_restant(self): # renvoie le nombre de point que totalise les dominos de la main, il s'agit de minimiser ce total
pt_total = 0
for domino in self :
pt_total += domino.val_totale()
return (pt_total)
def cinq_meme_famille(self): # vérifie que la main ne contient pas 5 dominos de la même famille (possedant un nombre en commun)
count_control = [0] * (self.game.pt_max + 1)
for domino in self : # un domino appartient à deux famille si val_a et val_b sont différent, à une unique famille si val_a et val_b sont égaux
if domino.vala == domino.valb :
count_control[domino.vala] += 1
else :
count_control[domino.vala] += 1
count_control[domino.valb] += 1
for card_famille in count_control : # on controle le cardinal de chaque famille de points
if card_famille > 5 :
return(True)
return(False)
| {"/IHM.py": ["/interface.py", "/Main.py"], "/Main.py": ["/Dominos.py", "/Plateau.py"], "/Tests_unitaires.py": ["/Main.py"]} |
59,686 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/recipe_form.py | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired, Length, ValidationError
class RecipeForm(FlaskForm):
name = StringField('name', validators=[DataRequired(), Length(max=55)])
type = StringField('type',validators=[DataRequired(), Length(max=10)])
instructions = StringField('instructions', validators=[DataRequired()]) | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,687 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/add_mealplan_form.py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.validators import DataRequired, Length
class AddMealplanForm(FlaskForm):
name = StringField('name', validators=[DataRequired(), Length(max=55)])
userId = IntegerField('userId') | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,688 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/instruction_form.py | from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
class InstructionForm(FlaskForm):
instructions = StringField('instructions', validators=[DataRequired()]) | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,689 | southwestmogrown/saltandpreppr | refs/heads/main | /app/seeds/ingredients.py | from app.models import db
from app.models.ingredient import Ingredient
# Adds a demo user, you can add other users here if you want
def seed_ingredients():
i1 = Ingredient(
recipeId=1,
name='Tortillas',
type='Starch/Bread',
amount='30',
)
i2 = Ingredient(
recipeId=1,
name='Meat',
type='Protien',
amount='2 lbs',
)
i3 = Ingredient(
recipeId=1,
name='Cheese',
type='Dairy',
amount='24 oz',
)
db.session.add(i1)
db.session.add(i2)
db.session.add(i3)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and RESET IDENTITY
# resets the auto incrementing primary key, CASCADE deletes any
# dependent entities
def undo_ingredients():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit() | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,690 | southwestmogrown/saltandpreppr | refs/heads/main | /app/models/meal_plan.py | from .db import db
class Mealplan(db.Model):
"""Table to connect recipes to users"""
__tablename__ = 'Mealplans'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(55), nullable=False)
userId = db.Column(db.Integer, db.ForeignKey('Users.id'), nullable=False)
user = db.relationship('User', back_populates='mealplans')
mealplanrecipes = db.relationship('MealplanRecipe', back_populates='mealplan', cascade="all, delete-orphan")
def to_dict(self):
return {
'id': self.id,
'name': self.name,
'userId': self.userId,
} | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,691 | southwestmogrown/saltandpreppr | refs/heads/main | /app/models/__init__.py | from .db import db
from .user import User
from .recipe import Recipe
| {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,692 | southwestmogrown/saltandpreppr | refs/heads/main | /app/seeds/meal_plans.py | from app.models import db
from app.models.meal_plan import Mealplan
def seed_meal_plans():
mp1 = Mealplan(userId=1, name='mealplan 1')
mp2 = Mealplan(userId=1, name='mealplan 2')
mp3 = Mealplan(userId=2, name='mealplan 3')
db.session.add(mp1)
db.session.add(mp2)
db.session.add(mp3)
db.session.commit()
def undo_meal_plans():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit() | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,693 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/add_recipe_to_mealplan_form.py | from flask_wtf import FlaskForm
from wtforms import SelectField
from wtforms.validators import DataRequired
class AddRecipeToMealplanForm(FlaskForm):
recipe_id = SelectField('Recipe Id', coerce=int) | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,694 | southwestmogrown/saltandpreppr | refs/heads/main | /app/seeds/mealplan_recipe.py | from app.models import db
from app.models.mealplan_recipes import MealplanRecipe
def seed_mealplan_recipes():
mpr1 = MealplanRecipe(
mealplanId = 1,
recipeId = 1
)
mpr2 = MealplanRecipe(
mealplanId = 1,
recipeId = 2
)
mpr3 = MealplanRecipe(
mealplanId = 1,
recipeId = 3
)
db.session.add(mpr1)
db.session.add(mpr2)
db.session.add(mpr3)
db.session.commit()
def undo_mealplan_recipes():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit() | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,695 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/ingredient_form.py | from flask_wtf import FlaskForm
from wtforms import StringField, IntegerField
from wtforms.validators import DataRequired, Length
class IngredientForm(FlaskForm):
userId = IntegerField('userId')
recipeId = IntegerField('recipeId')
name = StringField('name', validators=[DataRequired(), Length(max=100)])
type = StringField('type', validators=[DataRequired(), Length(max=20)])
amount = StringField('amount', validators=[DataRequired(), Length(max=20)]) | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,696 | southwestmogrown/saltandpreppr | refs/heads/main | /app/models/ingredient.py | from .db import db
class Ingredient(db.Model):
__tablename__ = 'Ingredients'
id = db.Column(db.Integer, primary_key=True)
recipeId = db.Column(db.Integer, db.ForeignKey('Recipes.id'), nullable=False)
name = db.Column(db.String(100), nullable=False)
type = db.Column(db.String(20), nullable=True)
amount = db.Column(db.String(20), nullable=True)
recipe = db.relationship('Recipe', back_populates='ingredients')
def to_dict(self):
return {
'id': self.id,
'recipeId': self.recipeId,
'name': self.name,
'type': self.type,
'amount': self.amount
} | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,697 | southwestmogrown/saltandpreppr | refs/heads/main | /app/forms/__init__.py | from .login_form import LoginForm
from .signup_form import SignUpForm
from .recipe_form import RecipeForm
from .instruction_form import InstructionForm
from .ingredient_form import IngredientForm
from .edit_ingredient_form import EditIngredientForm
from .add_mealplan_form import AddMealplanForm
from .add_recipe_to_mealplan_form import AddRecipeToMealplanForm | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,698 | southwestmogrown/saltandpreppr | refs/heads/main | /app/models/mealplan_recipes.py | from .db import db
class MealplanRecipe(db.Model):
__tablename__ = 'MealplanRecipes'
id = db.Column(db.Integer, primary_key=True)
mealplanId = db.Column(db.Integer, db.ForeignKey('Mealplans.id'), nullable=False)
recipeId = db.Column(db.Integer, db.ForeignKey('Recipes.id'), nullable=False)
mealplan = db.relationship('Mealplan', back_populates='mealplanrecipes')
recipes = db.relationship('Recipe', back_populates='mealplanrecipe')
def to_dict(self):
return {
'id': self.id,
'mealplanId': self.mealplanId,
'recipeId': self.recipeId
} | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,699 | southwestmogrown/saltandpreppr | refs/heads/main | /app/models/recipe.py | from .db import db
class Recipe(db.Model):
__tablename__ = 'Recipes'
id = db.Column(db.Integer, primary_key=True)
userId = db.Column(db.Integer, db.ForeignKey('Users.id'), nullable=False)
name = db.Column(db.String(255), nullable=False)
type = db.Column(db.String(10))
instructions = db.Column(db.Text, nullable=False)
user = db.relationship('User', back_populates='recipes')
ingredients = db.relationship('Ingredient', back_populates='recipe', cascade="all, delete-orphan")
mealplanrecipe = db.relationship('MealplanRecipe', back_populates='recipes', cascade="all, delete-orphan")
def to_dict(self):
return {
'id': self.id,
'userId': self.userId,
'name': self.name,
'type': self.type,
'instructions': self.instructions
} | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,700 | southwestmogrown/saltandpreppr | refs/heads/main | /app/api/user_routes.py | from app.forms.add_recipe_to_mealplan_form import AddRecipeToMealplanForm
from app.forms.edit_ingredient_form import EditIngredientForm
from app.models import ingredient
from app.models.ingredient import Ingredient
from flask import Blueprint, jsonify, request
from flask_login import login_required
from app.models import db, User, Recipe
from app.models.meal_plan import Mealplan
from app.forms.recipe_form import RecipeForm
from app.forms.instruction_form import InstructionForm
from app.models.mealplan_recipes import MealplanRecipe
from app.forms.ingredient_form import IngredientForm
from app.forms.add_mealplan_form import AddMealplanForm
def validation_errors_to_error_messages(validation_errors):
"""
Simple function that turns the WTForms validation errors into a simple list
"""
errorMessages = []
for field in validation_errors:
for error in validation_errors[field]:
errorMessages.append(f'{field} : {error}')
return errorMessages
user_routes = Blueprint('users', __name__)
@user_routes.route('/')
@login_required
def users():
users = User.query.all()
return {'users': [user.to_dict() for user in users]}
@user_routes.route('/<int:id>')
@login_required
def user(id):
user = User.query.get(id)
return user.to_dict()
######### Recipe Routes ############
def get_all_recipes(userId):
return Recipe.query.where(Recipe.userId == userId).all()
@user_routes.route('/<int:id>/recipes')
@login_required
def recipes(id):
recipes = get_all_recipes(id)
return {'recipes': [recipe.to_dict() for recipe in recipes]}
@user_routes.route('/<int:userId>/recipes/<int:recipeId>')
@login_required
def get_one_recipe(userId, recipeId):
recipe = Recipe.query.where(Recipe.id == recipeId).first()
return recipe.to_dict()
@user_routes.route('/<int:id>/recipes', methods=['POST'])
@login_required
def add_recipe(id):
data = request.get_json()
print(data)
form = RecipeForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
new_recipe = Recipe(userId=data['userId'], name=data['name'], type=data['type'], instructions=data['instructions'])
db.session.add(new_recipe)
db.session.commit()
recipes = get_all_recipes(id)
return {'recipes': [recipe.to_dict() for recipe in recipes]}
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@user_routes.route('/<int:id>/recipes/<int:recipeId>', methods=['PATCH'])
@login_required
def edit_recipe(id, recipeId):
data = request.get_json()
form = InstructionForm()
print(data)
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
recipe = Recipe.query.get(recipeId)
recipe.instructions = data['instructions']
db.session.commit()
return recipe.to_dict()
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@user_routes.route('/<int:userId>/recipes/<int:recipeId>', methods=['DELETE'])
@login_required
def delete_recipe(userId, recipeId):
recipe = Recipe.query.get(recipeId)
db.session.delete(recipe)
db.session.commit()
recipes = get_all_recipes(userId)
return {'recipes': [recipe.to_dict() for recipe in recipes]}
########### Ingredients routes ###########
@user_routes.route('/<int:userId>/recipes/<int:recipeId>/ingredients')
@login_required
def get_all_ingredients(userId, recipeId):
ingredients = Ingredient.query.where(Ingredient.recipeId == recipeId).all()
return {'ingredients': [ingredient.to_dict() for ingredient in ingredients]}
@user_routes.route('/<int:userId>/recipes/<int:recipeId>/ingredients/<int:ingredientId>')
@login_required
def get_one_ingredient(userId, recipeId, ingredientId):
ingredient = Ingredient.query.where(Ingredient.id == ingredientId).first()
return ingredient.to_dict()
@user_routes.route('/<int:userId>/recipes/<int:recipeId>/ingredients', methods=['POST'])
@login_required
def add_ingredient(userId, recipeId):
data = request.get_json()
form = IngredientForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
new_ingredient = Ingredient(recipeId=data['recipeId'], name=data['name'], type=data['type'], amount=data['amount'])
db.session.add(new_ingredient)
db.session.commit()
ingredients = get_all_ingredients(userId, recipeId)
return ingredients
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@user_routes.route('/<int:userId>/recipes/<int:recipeId>/ingredients/<int:ingredientId>', methods=['PATCH'])
@login_required
def edit_ingredient(userId, recipeId, ingredientId):
data = request.get_json()
form = EditIngredientForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
ingredient = Ingredient.query.get(ingredientId)
ingredient.name = data['name']
ingredient.type = data['type']
ingredient.amount = data['amount']
db.session.commit()
return ingredient.to_dict()
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@user_routes.route('/<int:userId>/recipes/<int:recipeId>/ingredients/<int:ingredientId>', methods=['DELETE'])
@login_required
def delete_ingredient(userId, recipeId, ingredientId):
ingredient = Ingredient.query.get(ingredientId)
db.session.delete(ingredient)
db.session.commit()
ingredients = get_all_ingredients(userId, recipeId)
return ingredients
################### Mealplan Routes #####################################
@user_routes.route('/<int:userId>/mealplans')
@login_required
def get_mealplans(userId):
mealplans = Mealplan.query.where(Mealplan.userId == userId).all()
return {'mealplans': [mealplan.to_dict() for mealplan in mealplans]}
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>')
@login_required
def get_single_mealplan(userId, mealplanId):
mealplan = Mealplan.query.where(Mealplan.id == mealplanId).first()
return mealplan.to_dict()
@user_routes.route('/<int:userId>/mealplans', methods=['POST'])
@login_required
def add_mealplan(userId):
data = request.get_json()
form = AddMealplanForm()
form['csrf_token'].data = request.cookies['csrf_token']
if form.validate_on_submit():
new_mealplan = Mealplan(name=data['name'], userId=data['userId'])
db.session.add(new_mealplan)
db.session.commit()
mealplans = Mealplan.query.where(Mealplan.userId == userId)
return {'mealplans': [mealplan.to_dict() for mealplan in mealplans]}
return {'errors': validation_errors_to_error_messages(form.errors)}, 401
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>', methods=['DELETE'])
@login_required
def delete_mealplan(userId, mealplanId):
mealplan = Mealplan.query.get(mealplanId)
db.session.delete(mealplan)
db.session.commit()
mealplans = Mealplan.query.where(Mealplan.userId == userId)
return {'mealplans': [mealplan.to_dict() for mealplan in mealplans]}
############ Mealplan Recipe Routes ##################
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>/mealplan-recipes')
@login_required
def get_mealplan_recipes(userId, mealplanId):
mealplans = MealplanRecipe.query.where(MealplanRecipe.mealplanId == mealplanId).all()
mealplan_recipes = []
for mealplan in mealplans:
recipe = Recipe.query.get(mealplan.recipeId)
mealplan_recipes.append(recipe)
return {'mealplan_recipes': [mealplan_recipe.to_dict() for mealplan_recipe in mealplan_recipes]}
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>/mealplan-recipe-ids')
@login_required
def get_mealplan_recipe_ids(userId, mealplanId):
mealplanRecipeIds = MealplanRecipe.query.where(MealplanRecipe.mealplanId == mealplanId).all()
return {'mealplan_recipe_ids': [mealplanRecipeId.to_dict() for mealplanRecipeId in mealplanRecipeIds]}
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>/mealplan-recipes/<int:recipeId>')
@login_required
def get_single_mealplan_recipe(userId, mealplanId, recipeId):
mealplan_recipe = MealplanRecipe.query.where(MealplanRecipe.recipeId == recipeId).first()
return mealplan_recipe.to_dict()
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>/mealplan-recipes', methods=['POST'])
@login_required
def add_mealplan_recipe(userId, mealplanId):
data = request.get_json()
new_recipe = MealplanRecipe(mealplanId=data['mealplanId'], recipeId=data['recipeId'])
db.session.add(new_recipe)
db.session.commit()
mealplan_recipes = get_mealplan_recipes(userId, mealplanId)
return mealplan_recipes
@user_routes.route('/<int:userId>/mealplans/<int:mealplanId>/mealplan-recipes/<int:mealplanRecipeId>', methods=['DELETE'])
@login_required
def delete_mealplan_recipe(userId, mealplanId, mealplanRecipeId):
mealplan_recipe = MealplanRecipe.query.get(mealplanRecipeId)
db.session.delete(mealplan_recipe)
db.session.commit()
mealplan_recipes = get_mealplan_recipes(userId, mealplanId)
return mealplan_recipes
| {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,701 | southwestmogrown/saltandpreppr | refs/heads/main | /app/seeds/recipes.py | from app.models import db, Recipe
# Adds a demo user, you can add other users here if you want
def seed_recipes():
r1 = Recipe(
userId=1,
name='Enchiladas',
type='Dinner',
instructions='Nulla porttitor accumsan tincidunt. Proin eget tortor risus. Donec sollicitudin molestie malesuada. Curabitur aliquet quam id dui posuere blandit. Donec sollicitudin molestie malesuada. Vivamus suscipit tortor eget felis porttitor volutpat. Vestibulum ac diam sit amet quam vehicula elementum sed sit amet dui. Pellentesque in ipsum id orci porta dapibus. Praesent sapien massa, convallis a pellentesque nec, egestas non nisi. Praesent sapien massa, convallis a pellentesque nec, egestas non nisi.',
)
r2 = Recipe(
userId=1,
name='Grilled Cheese',
type='Lunch',
instructions='Vestibulum ac diam sit amet quam vehicula elementum sed sit amet dui. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus.',
)
r3 = Recipe(
userId=1,
name='Pancakes',
type='Breakfast',
instructions='Donec rutrum congue leo eget malesuada. Lorem ipsum dolor sit amet, consectetur adipiscing elit. Curabitur non nulla sit amet nisl tempus convallis quis ac lectus.',
)
db.session.add(r1)
db.session.add(r2)
db.session.add(r3)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and RESET IDENTITY
# resets the auto incrementing primary key, CASCADE deletes any
# dependent entities
def undo_recipes():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit() | {"/app/seeds/ingredients.py": ["/app/models/__init__.py", "/app/models/ingredient.py"], "/app/models/__init__.py": ["/app/models/recipe.py"], "/app/seeds/meal_plans.py": ["/app/models/__init__.py", "/app/models/meal_plan.py"], "/app/seeds/mealplan_recipe.py": ["/app/models/__init__.py", "/app/models/mealplan_recipes.py"], "/app/forms/__init__.py": ["/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py", "/app/forms/add_recipe_to_mealplan_form.py"], "/app/api/user_routes.py": ["/app/forms/add_recipe_to_mealplan_form.py", "/app/models/__init__.py", "/app/models/ingredient.py", "/app/models/meal_plan.py", "/app/forms/recipe_form.py", "/app/forms/instruction_form.py", "/app/models/mealplan_recipes.py", "/app/forms/ingredient_form.py", "/app/forms/add_mealplan_form.py"], "/app/seeds/recipes.py": ["/app/models/__init__.py"]} |
59,707 | diegoplascencia/Warehouse_REST_API | refs/heads/master | /create_database.py | import sqlite3
def create_db():
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
print("\n......................................")
print("Creating DB...")
print("......................................\n")
create_table = """CREATE TABLE IF NOT EXISTS inventory (id INTEGER PRIMARY KEY, item text,
price real, existance int)"""
cursor.execute(create_table)
print("\n......................................")
print("DB created...")
print("......................................\n")
connection.commit()
connection.close() | {"/app.py": ["/create_database.py"]} |
59,708 | diegoplascencia/Warehouse_REST_API | refs/heads/master | /app.py | from flask import Flask, render_template
from flask_restful import Resource, Api, reqparse
import sqlite3
from create_database import create_db
create_db()
app = Flask(__name__)
api = Api(app)
@app .route('/')
def index():
return render_template("base.html")
class ItemList(Resource):
def get(self):
all_items = []
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
query_all_items = "SELECT * FROM inventory"
for result in cursor.execute(query_all_items):
all_items.append(result)
connection.commit()
connection.close()
return all_items
class Item(Resource):
parser= reqparse.RequestParser()
parser.add_argument('price', type = float, required = True,
help = "This field can not be left blank!")
parser.add_argument('existance',type = int,required = True,
help = "This field can not be left blank!")
def get(self, item_name):
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
get_item = "SELECT * FROM inventory WHERE item=?"
result = cursor.execute(get_item,(item_name,))
row = result.fetchone()
if row:
return row
else:
return {'message': 'The item {} does not exist!'.format(item_name)}, 400
def post(self, item_name):
data = Item.parser.parse_args()
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
look_for_duplicates = "SELECT * FROM inventory WHERE item=?"
apply_query = cursor.execute(look_for_duplicates, (item_name,))
item_exist = apply_query.fetchone()
if item_exist:
return {'message': 'The item {} already exists!'.format(item_name)}, 400
else:
insert_query = "INSERT INTO inventory VALUES (NULL, ?, ?, ?)"
cursor.execute(insert_query,(item_name, data['price'], data['existance']))
connection.commit()
connection.close()
return {"message": "Item: {} added to the db.".format(item_name)}
def put(self,item_name):
data = Item.parser.parse_args()
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
existing_id_query = "SELECT id FROM inventory WHERE item=?"
apply_query = cursor.execute(existing_id_query, (item_name,))
item_id_exist = apply_query.fetchone()
print("\nitem_id_exist\n", item_id_exist)
if item_id_exist:
update_val_query = "UPDATE inventory SET price=?, existance=? WHERE id=?"
apply_update = cursor.execute(update_val_query, (data['price'], data['existance'], item_id_exist[0]))
connection.commit()
connection.close()
return {"message": "Item: {} values updated.".format(item_name)}
else:
insert_query = "INSERT INTO inventory VALUES (NULL, ?, ?, ?)"
cursor.execute(insert_query,(item_name, data['price'], data['existance']))
connection.commit()
connection.close()
return {"message": "Item: {} added to the db.".format(item_name)}
def delete(self, item_name):
connection = sqlite3.connect('warehouse_inventory.db')
cursor = connection.cursor()
look_for_duplicates = "SELECT * FROM inventory WHERE item=?"
apply_query = cursor.execute(look_for_duplicates, (item_name,))
item_exist = apply_query.fetchone()
if item_exist:
delete_query = "DELETE FROM inventory WHERE item=?"
apply_query = cursor.execute(delete_query, (item_name,))
connection.commit()
connection.close()
return {'message': 'Item {} deleted!!'.format(item_name)}
else:
return {'message': 'The item {} does not exist!'.format(item_name)}, 400
api.add_resource(Item, '/item/<string:item_name>')
api.add_resource(ItemList, '/items_list')
app.run(port = 5000, debug = True) | {"/app.py": ["/create_database.py"]} |
59,709 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/domain/users/fixtures/user_model_fixtures.py | import pytest
import uuid
from src.domain.users.user import User
from dataclasses import asdict, dataclass
dummy_data = {
'id': uuid.uuid4(),
'username': 'dummy_name',
'follows': []
}
@pytest.fixture
def dummy_user_instance_fixture():
dummy_user_instance_fixture = User(id = dummy_data.get('id'), username= dummy_data.get('username'))
return dummy_user_instance_fixture | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,710 | jesuschm/tasca | refs/heads/master | /src/domain/messages/message.py | from datetime import datetime
import uuid
import timeago
from dataclasses import dataclass, asdict, field
@dataclass
class Message():
content: str
user_id: uuid.UUID
id: uuid.UUID = field(default_factory=uuid.uuid4)
created_at: datetime = datetime.now()
@classmethod
def from_dict(self, d):
return self(**d)
def to_dict(self):
return asdict(self)
@classmethod
def print_messages(self, messages, usernames):
for m in messages:
message = Message.from_dict(m) # A way to validate the message
ago = timeago.format(message.created_at, datetime.now())
username = usernames.get(message.user_id, 'Unknown')
print(f"{username} - {message.content} ({ago})") | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,711 | jesuschm/tasca | refs/heads/master | /tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py | import pytest
from src.infra.databases.mongo import MongoRepository
bad_data = {
'good_id': 1
}
@pytest.fixture
def mongo_repo_instance_fixture():
repo = MongoRepository()
return repo
| {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,712 | jesuschm/tasca | refs/heads/master | /src/infra/databases/repo.py | from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
@dataclass
class Repo(metaclass=ABCMeta):
@classmethod
def __subclasshook_(cls, subclass):
return (hasattr(subclass, 'upsert') and
callable(subclass.upsert) and
hasattr(subclass, 'get') and
callable(subclass.get))
@abstractmethod
def upsert(self):
raise NotImplementedError
@abstractmethod
def get(self):
raise NotImplementedError | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,713 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/domain/messages/test_message_model.py | import pytest
from .fixtures.message_model_fixtures import dummy_message_instance_fixture, dummy_data
class TestMessageModel(object):
def test_001_from_dict(self, dummy_message_instance_fixture):
msg = dummy_message_instance_fixture.from_dict(dummy_data)
assert msg.to_dict() == dummy_data | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,714 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/application/test_users_service.py | import pytest
import mock
from src.application.users_service import create_user, get_user
@pytest.fixture
def mocked_mongo_repo():
mocked_repo = mock.Mock()
mocked_repo.create_user.return_value = True
mocked_repo.get_user.return_value = ['dummy_user']
return mocked_repo
class TestUsersService(object):
def test_001_create_user(self, mocked_mongo_repo):
rc = create_user(repo = mocked_mongo_repo, username = 'dummy_user')
assert rc
def test_002_get_user(self, mocked_mongo_repo):
rc = get_user(repo = mocked_mongo_repo, username = 'dummy_user')
assert rc | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,715 | jesuschm/tasca | refs/heads/master | /src/application/commands_service.py | import logging
from src.domain.messages.message import Message
from src.domain.users.user import User
from src.domain.messages.message import Message
def post(repo, username: str, message: str):
res = False
logging.debug(f"[+] {username} user is posting: {message}.")
user = User.from_dict(repo.get_user(username = username))
if user:
message = Message(content = message, user_id = user.id)
res = repo.insert_message(message= message.to_dict())
else:
raise Exception(f"{username} user not found")
return res
def read(repo, username: str):
res = None
logging.debug(f"[+] Reading {username}'s timeline")
user = User.from_dict(repo.get_user(username = username))
if user:
res = repo.get_messages(user_ids = [user.id])
username = {
user.id: user.username
}
Message.print_messages(messages = res, usernames = username)
else:
raise Exception(f"{username} user not found")
return res
def follow(repo, username: str, follow_username: str):
res = False
logging.debug(f"[+] User {username} wants to follow user {follow_username}.")
user = User.from_dict(repo.get_user(username = username))
follow_user = User.from_dict(repo.get_user(username = follow_username))
if user:
if follow_user:
user.add_follow(follow_user.id)
res = repo.update_user(user = user.to_dict())
else:
raise Exception(f"{follow_username} user to follow not found")
else:
raise Exception(f"{username} user not found")
return res
def wall(repo, username: str):
res = None
logging.debug(f"[+] Reading user {username} wall.")
user = User.from_dict(repo.get_user(username = username))
if user:
ids = [user.id]
# Username is a dict key-value (id-username)
usernames = {
user.id: user.username
}
if user.follows and isinstance(user.follows, list):
ids.extend(user.follows)
for f in user.follows:
usernames.update({
f: repo.get_user_by_id(f).get('username')
})
res = repo.get_messages(user_ids = ids)
Message.print_messages(messages = res, usernames = usernames)
else:
raise Exception(f"{username} user not found")
return res | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,716 | jesuschm/tasca | refs/heads/master | /src/tasca.py | import sys
import logging
from application.commands_service import post, read, follow, wall
from infra.databases.mongo import MongoRepository
_repo = MongoRepository()
_verbose = False
def main():
try:
command = None
if _repo.client:
logging.info("[+] Hello friend! Welcome to the Tasca. Get fun! ")
logging.info("[+] Control + C to exit.\n")
while command != "^C":
try:
command = str(input("> "))
# Posting command
if '->' in command:
data = command.split(" -> ")
if len(data) == 2:
post(_repo, username = data[0], message = data[1])
else:
logging.error("[-] Bad post command. Correct format: [username] -> [message].")
elif 'follows' in command:
data = command.split(" follows ")
if len(data) == 2:
user = data[0]
follow_user = data[1]
rc = follow(_repo, username = user, follow_username = follow_user)
if rc:
logging.debug(f"[+] {user} is now following {follow_user}.")
else:
logging.error(f"[-] Error trying to follow {follow_user}")
else:
logging.error("[-] Bad follow command. Correct format: [username] -> [username].")
elif 'wall' in command:
data = command.split(" wall")
if len(data) == 2 and data[1] == '':
wall(_repo, username = data[0])
else:
logging.error("[-] Bad wall command. Correct format: [username] wall.")
else:
data = command.split(" ")
if len(data) == 1:
read(_repo, username = command)
else:
logging.error("[-] Bad username to read. Usernames don't contain spaces.")
except Exception as e:
logging.error(f"[-] Error: {e}.")
else:
raise("Database not connected.")
except KeyboardInterrupt:
logging.info(f"\n[+] Quitting.. Bye!")
sys.exit(0)
except Exception as e:
logging.error(f"[-] Error: {e}. Quitting.")
sys.exit(1)
if __name__ == "__main__":
"""Entry point
"""
main() | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,717 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/application/test_commands_service.py | from src.application.commands_service import post, read, follow, wall
from .fixtures.commands_service_fixtures import mocked_mongo_repo
class TestCommandsService(object):
def test_001_post(self, mocked_mongo_repo):
rc = post(repo = mocked_mongo_repo, username = 'dummy_user', message = 'dummy_message')
assert rc
def test_002_read(self, mocked_mongo_repo):
rc = read(repo = mocked_mongo_repo, username = 'dummy_user')
assert rc
def test_003_follow(self, mocked_mongo_repo):
rc = follow(repo = mocked_mongo_repo, username = 'dummy_user', follow_username = 'dummy_user_2')
assert rc
def test_004_wall(self, mocked_mongo_repo):
rc = wall(repo = mocked_mongo_repo, username = 'dummy_user')
assert rc | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,718 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/application/fixtures/commands_service_fixtures.py | import pytest
from uuid import uuid4
import mock
from src.domain.users.user import User
from src.domain.messages.message import Message
@pytest.fixture
def mocked_mongo_repo():
mocked_repo = mock.Mock()
mocked_repo.insert_message.return_value = True
mocked_repo.get_messages.return_value = [Message(content = 'dummy_message', user_id = uuid4()).to_dict()]
mocked_repo.get_user.return_value = User(username = 'dummy_user').to_dict()
return mocked_repo | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,719 | jesuschm/tasca | refs/heads/master | /src/application/users_service.py | import logging
from src.domain.users.user import User
def create_user(repo, username):
res = False
logging.debug(f"[+] Creating the user {username}")
user = User(username=username)
if user:
res = repo.create_user(user = user.to_dict())
return res
def get_user(repo, username):
res = repo.get_user(username= username)
return res | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,720 | jesuschm/tasca | refs/heads/master | /src/config/settings.py | import sys
import os.path
import configparser
import logging
import logging.config
from pathlib import Path
CONFIG_FILE = os.path.join(Path(__file__).parent.parent.parent, 'tasca.conf')
config = configparser.ConfigParser()
if os.path.isfile(CONFIG_FILE):
logging.debug('[+] Loading config file tasca.conf')
config.read([CONFIG_FILE])
else:
logging.error('[-] Config file not found. Quitting')
sys.exit(1)
# Set up a logger
LOG_CONFIG_FILE = os.path.join(Path(__file__).parent.parent.parent, 'logging.conf')
if os.path.isfile(LOG_CONFIG_FILE):
logging.config.fileConfig(LOG_CONFIG_FILE)
logger = logging.getLogger('tascalog')
# mongodb settings
MONGO_HOST = config.get('mongodb', 'mongo_host')
MONGO_PORT = config.getint('mongodb', 'mongo_port')
MONGO_USER = config.get('mongodb', 'user')
MONGO_PASSWORD = config.get('mongodb', 'passwd')
MONGO_DB = config.get('mongodb', 'db')
MESSAGES_COLLECTION = config.get('mongodb', 'messages_collection')
USERS_COLLECTION = config.get('mongodb', 'users_collection') | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,721 | jesuschm/tasca | refs/heads/master | /src/scripts/createuser.py | import logging
import argparse
from src.application.users_service import create_user
from src.infra.databases.mongo import MongoRepository
_repo = MongoRepository()
def main():
"""Main function
"""
try:
# Initiate the parser
parser = argparse.ArgumentParser()
# Add long and short argument
parser.add_argument("user", help="Tasca username")
# Read arguments from the command line
args = parser.parse_args()
create_user(repo = _repo, username = args.user)
except Exception as err:
logging.error("[-] Unexpected error: {}".format(err))
if __name__ == "__main__":
"""Entry point
"""
main() | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,722 | jesuschm/tasca | refs/heads/master | /src/domain/users/user.py | import uuid
from dataclasses import dataclass, asdict, field
@dataclass
class User():
username: str
id: uuid.UUID = field(default_factory=uuid.uuid4)
follows: list = field(default_factory=list)
@classmethod
def from_dict(self, d):
return self(**d) if isinstance(d, dict) else None
def to_dict(self):
return asdict(self)
def add_follow(self, follow_user_id):
self.follows.append(follow_user_id) | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,723 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/domain/users/test_user_model.py | import pytest
from .fixtures.user_model_fixtures import dummy_user_instance_fixture, dummy_data
class TestUserModel(object):
def test_001_from_dict(self, dummy_user_instance_fixture):
user = dummy_user_instance_fixture.from_dict(dummy_data)
assert user.to_dict() == dummy_data | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,724 | jesuschm/tasca | refs/heads/master | /tests/integration_tests/infra/databases/test_mongo_repo.py | import pytest
from .fixtures.mongo_repo_fixtures import mongo_repo_instance_fixture, bad_data
class TestMongoRepository(object):
@pytest.mark.parametrize(
'data',
[pytest.param(bad_data, marks=pytest.mark.xfail)]
)
def test001_upsert(self, data, mongo_repo_instance_fixture):
rc = mongo_repo_instance_fixture.upsert(collection= 'tests_database', data = data, pk_field = 'bad_id')
assert rc | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,725 | jesuschm/tasca | refs/heads/master | /src/infra/databases/mongo.py | import logging
from pymongo import MongoClient
from src.config.settings import MONGO_HOST, MONGO_PORT, MONGO_USER, MONGO_PASSWORD, MONGO_DB, MESSAGES_COLLECTION, USERS_COLLECTION
from .repo import Repo
class MongoRepository(Repo):
def __init__(self):
self.client = self._get_client()
def _get_client(self, host=MONGO_HOST, port=MONGO_PORT, user=MONGO_USER, password=MONGO_PASSWORD, db=MONGO_DB):
try:
uri = f"mongodb://"
if user:
uri += f"{user}:{password}@"
uri += f"{host}:{port}/?authSource={db}"
logging.debug("[+] Connecting with the database...")
# uuidRepresentation = 'standard' helps mongo to manage uuid4 correctly.
client = MongoClient(uuidRepresentation='standard')
client[db].command('ping')
logging.debug("[+] Successfully connected to the database.")
return client
except Exception:
logging.error(f'[-] Cannot instance connection with db.')
return None
def upsert(self, collection, data, pk_field):
"""Upsert method.
Method to update or insert the data depending if the data already exists or not in the repository.
Args:
collection: collection name to update data.
data: data to upsert.
pk_field: field to be used to find the data to check if it already exists.
"""
logging.debug('[+] Inserting/Updating data into %s collection.', collection)
res = None
try:
res = self.client[MONGO_DB][collection].update({pk_field: data[pk_field]}, data, upsert=True)
nUpserted= res['n']
logging.debug(f'[+] Upserted {nUpserted} records in db.')
res = (nUpserted == 1)
except Exception as err:
logging.error(f'[-] db_upsert: Cannot upsert data in db: {collection}. Error {err}.')
return res
def get(self, collection, filter, many = False, order_by = None, mode = None, limit = 0, offset = 0):
"""Get data from the database. It removes by default the _id mongodb field.
Args:
collection: collection name.
filter: filter to apply to database collection.
many: to search more than one result
order_by: field to sort
mode: sorting mode (asc/desc)
limit: number of elements to return
offset: how many results are avoided in order to paginate the results
Returns:
response: post response in json format.
"""
res = None
try:
if many:
if order_by:
mode = 1 if mode == "asc" else -1 # By default, query is sorted desc.
res = self.client[MONGO_DB][collection].\
find(filter, {"_id": 0}).skip(offset).limit(limit).sort(order_by, mode)
else:
res = self.client[MONGO_DB][collection].\
find(filter, {"_id": 0}).skip(offset).limit(limit)
else:
res = self.client[MONGO_DB][collection].find_one(filter, {"_id": 0})
except Exception as err:
logging.error(f'[-] db_get: Cannot get data in db. Collection: {str(collection)} Filter: {str(filter)}')
return res
# Messages db actions
def insert_message(self, message):
return self.upsert(collection = MESSAGES_COLLECTION, data = message, pk_field = 'id')
def get_messages(self, user_ids):
filter = {
'user_id' : {
'$in' : user_ids
}
}
return self.get(collection= MESSAGES_COLLECTION, filter = filter, many = True, order_by = 'created_at', mode = 'desc')
# Users db actions
def create_user(self, user):
return self._upsert_user(user)
def update_user(self, user):
return self._upsert_user(user)
def _upsert_user(self, user):
return self.upsert(collection= USERS_COLLECTION, data = user, pk_field = 'id')
def get_user(self, username):
filter = {
'username': username
}
return self.get(collection= USERS_COLLECTION, filter = filter)
def get_user_by_id(self, user_id):
filter = {
'id': user_id
}
return self.get(collection= USERS_COLLECTION, filter = filter) | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,726 | jesuschm/tasca | refs/heads/master | /tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py | import pytest
import uuid
from datetime import datetime
from src.domain.messages.message import Message
from dataclasses import asdict, dataclass
dummy_data = {
'id': uuid.uuid4,
'user_id': uuid.uuid4,
'content': 'dummy_message',
'created_at': datetime.now()
}
@pytest.fixture
def dummy_message_instance_fixture():
dummy_message_instance_fixture = Message(id = dummy_data.get('id'), user_id= dummy_data.get('user_id'), content = dummy_data.get('content'), created_at = dummy_data.get('created_at'))
return dummy_message_instance_fixture | {"/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py": ["/src/domain/users/user.py"], "/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py": ["/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/messages/test_message_model.py": ["/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py"], "/tests/unit_tests/application/test_users_service.py": ["/src/application/users_service.py"], "/src/application/commands_service.py": ["/src/domain/messages/message.py", "/src/domain/users/user.py"], "/tests/unit_tests/application/test_commands_service.py": ["/src/application/commands_service.py", "/tests/unit_tests/application/fixtures/commands_service_fixtures.py"], "/tests/unit_tests/application/fixtures/commands_service_fixtures.py": ["/src/domain/users/user.py", "/src/domain/messages/message.py"], "/src/application/users_service.py": ["/src/domain/users/user.py"], "/src/scripts/createuser.py": ["/src/application/users_service.py", "/src/infra/databases/mongo.py"], "/tests/unit_tests/domain/users/test_user_model.py": ["/tests/unit_tests/domain/users/fixtures/user_model_fixtures.py"], "/tests/integration_tests/infra/databases/test_mongo_repo.py": ["/tests/integration_tests/infra/databases/fixtures/mongo_repo_fixtures.py"], "/src/infra/databases/mongo.py": ["/src/config/settings.py", "/src/infra/databases/repo.py"], "/tests/unit_tests/domain/messages/fixtures/message_model_fixtures.py": ["/src/domain/messages/message.py"]} |
59,727 | echudov/199-stocks | refs/heads/master | /main.py | import simulations
import time
import csvParser
import numpy as np
import graphing
class SingleIteration:
index = None
data_points = None
p = None
simulate_index = None
real_streak_counts = None
average_streaks = None
largest_streaks = None
def __init__(self, set_index):
self.index = set_index
self.p = float(csvParser.important_values_dict[self.index]['p value'])
self.largest_streaks = []
self.data_points = int(csvParser.important_values_dict[self.index]['number of data points'])
self.simulate_index = simulations.MultipleSimulations(100, self.data_points, self.p)
self.real_streak_counts = count_streaks(csvParser.streak_data_dict[self.index])
def iterate(self):
self.simulate_index.simulate()
self.average_streaks = self.simulate_index.averagestreaks()
self.largest_streaks = self.simulate_index.largeststreaklist
self.largest_streaks.sort()
graphing.dual_bar_chart(self.index, self.real_streak_counts, self.average_streaks)
graphing.graph_largest_streak(self.index, self.simulate_index.largeststreaklist)
def count_streaks(streak_list):
counted_streaks = [1]
current_streak = 1
for i in range(1, len(streak_list)):
if current_streak > int(streak_list[i]):
if current_streak > len(counted_streaks):
for j in range(len(counted_streaks), current_streak):
if j == current_streak - 1:
counted_streaks.append(1)
else:
counted_streaks.append(0)
else:
counted_streaks[current_streak - 1] += 1
current_streak = int(streak_list[i])
return counted_streaks
def iterate_indicies(symbols):
for index in symbols:
SingleIteration(index).iterate()
def main():
start_time = time.time()
iterate_indicies(csvParser.streak_data_indicies)
end_time = time.time()
print(end_time - start_time, "seconds")
main() | {"/main.py": ["/simulations.py", "/csvParser.py", "/graphing.py"]} |
59,728 | echudov/199-stocks | refs/heads/master | /simulations.py | import random
class MultipleSimulations:
k = None
n = None
p = None
totalstreaklist = None
largeststreaklist = None
def __init__(self, setk, setn, setp):
self.k = setk
self.n = setn
self.p = setp
self.totalstreaklist = []
self.largeststreaklist = []
def simulate(self):
for l in range(self.k):
trial = UpDown(self.p)
trial.onetrial(self.n)
self.largeststreaklist.append(trial.largeststreak)
self.addnewstreaks(trial.streakCount, self.totalstreaklist)
def addnewstreaks(self, newstreak, streaklist):
if len(newstreak) > len(streaklist):
for j in range(len(streaklist)):
streaklist[j] += newstreak[j]
for j in range(len(streaklist), len(newstreak)):
streaklist.append(newstreak[j])
else:
for j in range(len(newstreak)):
streaklist[j] += newstreak[j]
def largeststreak(self):
return len(self.totalstreaklist)
def averagestreaks(self):
returnaverage = []
for i in range(len(self.totalstreaklist)):
streakAverage = self.totalstreaklist[i] / self.k
returnaverage.append(streakAverage)
return returnaverage
class UpDown:
change = None
largeststreak = None
probability = None
streakCount = None # first index represents the count of 1
def __init__(self, p):
self.largeststreak = 0
self.probability = p
self.change = []
self.streakCount = []
# runs one trial of coin flips recording data in the lists of this individual class
def onetrial(self, n):
currentstreak = 0
recentflip = None
for i in range(n):
# down logic
if random.random() > self.probability:
self.change.append(-1) # tracks that it was down at the current index in the up array
if i == 0:
currentstreak += 1
elif recentflip == -1:
currentstreak += 1
# in case the streak doesn't get cut off before the end
if i == n - 1:
self.countstreak(currentstreak, self.streakCount)
else:
self.countstreak(currentstreak, self.streakCount)
currentstreak = 1
recentflip = -1
if currentstreak > self.largeststreak:
self.largeststreak = currentstreak
# up logic
else:
self.change.append(1)
if i == 0:
currentstreak += 1
elif recentflip == 1:
currentstreak += 1
# in case the streak doesn't get cut off before the end
if i == n - 1:
self.countstreak(currentstreak, self.streakCount)
else:
self.countstreak(currentstreak, self.streakCount)
currentstreak = 1
recentflip = 1
if currentstreak > self.largeststreak:
self.largeststreak = currentstreak
# records the end of the streak in the streakCount array
# appends the list if it hasn't reached the streak length yet until the length of the largest streak
def countstreak(self, newstreak, streaklist):
if newstreak > len(streaklist):
for x in range(len(streaklist), newstreak):
if x == newstreak - 1:
streaklist.append(1)
else:
streaklist.append(0)
else:
streaklist[newstreak - 1] += 1 # increases the counter by 1 | {"/main.py": ["/simulations.py", "/csvParser.py", "/graphing.py"]} |
59,729 | echudov/199-stocks | refs/heads/master | /graphing.py | import plotly.graph_objects as go
import numpy as np
def dual_bar_chart(index_name, r_streaks, sim_streaks):
labels = []
simulated_streaks = sim_streaks[1:].copy()
real_streaks = r_streaks[1:].copy()
for i in range(len(real_streaks)):
real_streaks[i] = int(real_streaks[i])
while len(simulated_streaks) > len(real_streaks):
real_streaks.append(0)
while len(real_streaks) < len(simulated_streaks):
simulated_streaks.append(0)
for i in range(2, len(simulated_streaks) + 1):
labels.append(i)
fig = go.Figure(data=[
go.Bar(name='Simulation', x=labels, y=simulated_streaks),
go.Bar(name='Empirical', x=labels, y=real_streaks)
])
fig.update_layout(
barmode='group',
title=go.layout.Title(
text=index_name,
font=dict(
family="Courier New, monospace",
size=24,
color="#7f7f7f"
)
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text="Length of Streak",
font=dict(
family="Courier New, monospace",
size=18,
color="#7f7f7f"
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text="Quantity of Streaks",
font=dict(
family="Courier New, monospace",
size=18,
color="#7f7f7f"
)
)
)
)
fig.show()
def graph_largest_streak(index_name, largest_streaks_uncounted):
labels = []
for i in range(min(largest_streaks_uncounted), max(largest_streaks_uncounted) + 1):
labels.append(i)
largest_streaks = []
for i in labels:
largest_streaks.append(largest_streaks_uncounted.count(i))
fig = go.Figure(data=[
go.Bar(name='Simulated Largest Streak', x=labels, y=largest_streaks),
])
fig.update_layout(
title=go.layout.Title(
text="Largest Streaks within " + index_name,
font=dict(
family="Courier New, monospace",
size=24,
color="#7f7f7f"
)
),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text="Length of Largest Streak",
font=dict(
family="Courier New, monospace",
size=18,
color="#7f7f7f"
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text="Quantity of Streaks",
font=dict(
family="Courier New, monospace",
size=18,
color="#7f7f7f"
)
)
)
)
fig.show()
| {"/main.py": ["/simulations.py", "/csvParser.py", "/graphing.py"]} |
59,730 | echudov/199-stocks | refs/heads/master | /csvParser.py | import csv
important_values = "importantValues.csv"
streak_data = "StreakData.csv"
important_values_fields = []
important_values_rows = []
streak_data_indicies = []
streak_data_rows = []
with open(important_values, 'r') as csvfile:
csvreader = csv.reader(csvfile)
important_values_fields = next(csvreader)
for row in csvreader:
important_values_rows.append(row)
# creates dictionary of values to iterate through simulations
important_values_dict = dict();
for i in range(len(important_values_rows)):
important_values_dict[important_values_rows[i][0]] = {important_values_fields[j]: important_values_rows[i][j]
for j in range(1, len(important_values_fields))}
with open(streak_data, 'r') as csvfile:
csvreader = csv.reader(csvfile)
streak_data_indicies = next(csvreader)
for row in csvreader:
streak_data_rows.append(row)
def column(col, rows):
return_column = []
for j in range(len(rows)):
if not (rows[j][col] == ''):
return_column.append(rows[j][col])
return return_column
streak_data_columns = []
for i in range(len(streak_data_indicies)):
streak_data_columns.append(column(i, streak_data_rows))
# creates dictionary of lists (1 for each index to keep track of streaks)
streak_data_dict = dict()
for i in range(len(streak_data_indicies)):
streak_data_dict[streak_data_indicies[i]] = streak_data_columns[i]
| {"/main.py": ["/simulations.py", "/csvParser.py", "/graphing.py"]} |
59,731 | jsosulski/tdlda | refs/heads/main | /tdlda/classification/__init__.py | from .tdlda import TimeDecoupledLda
from .feature_preprocessing import Vectorizer
| {"/tdlda/classification/__init__.py": ["/tdlda/classification/tdlda.py"], "/tdlda/__init__.py": ["/tdlda/classification/__init__.py"]} |
59,732 | jsosulski/tdlda | refs/heads/main | /tdlda/__init__.py | from .classification import TimeDecoupledLda
from .classification import Vectorizer
| {"/tdlda/classification/__init__.py": ["/tdlda/classification/tdlda.py"], "/tdlda/__init__.py": ["/tdlda/classification/__init__.py"]} |
59,733 | jsosulski/tdlda | refs/heads/main | /tdlda/classification/tdlda.py | from typing import Tuple
import numpy as np
import sklearn
import sklearn.utils.multiclass
import sklearn.linear_model
from sklearn.preprocessing import StandardScaler
def diag_indices_with_offset(p, offset):
idxdiag = np.diag_indices(p)
idxdiag_with_offset = list()
idxdiag_with_offset.append(np.array([i + offset for i in idxdiag[0]]))
idxdiag_with_offset.append(np.array([i + offset for i in idxdiag[1]]))
return tuple(idxdiag_with_offset)
def _shrinkage(X: np.ndarray, gamma=None, T=None, S=None, block=False,
N_channels=31, N_times=5, standardize=True) -> Tuple[np.ndarray, float]:
p, n = X.shape
if standardize:
sc = StandardScaler() # standardize_featurestd features
X = sc.fit_transform(X.T).T
Xn = X - np.repeat(np.mean(X, axis=1, keepdims=True), n, axis=1)
if S is None:
S = np.matmul(Xn, Xn.T)
Xn2 = np.square(Xn)
idxdiag = np.diag_indices(p)
# Target = B
nu = np.mean(S[idxdiag])
if T is None:
if block:
nu = list()
for i in range(N_times):
idxblock = diag_indices_with_offset(N_channels, i*N_channels)
nu.append([np.mean(S[idxblock])] * N_channels)
nu = [sl for l in nu for sl in l]
T = np.diag(np.array(nu))
else:
T = nu * np.eye(p, p)
# Ledoit Wolf
V = 1. / (n - 1) * (np.matmul(Xn2, Xn2.T) - np.square(S) / n)
if gamma is None:
gamma = n * np.sum(V) / np.sum(np.square(S - T))
if gamma > 1:
print("logger.warning('forcing gamma to 1')")
gamma = 1
elif gamma < 0:
print("logger.warning('forcing gamma to 0')")
gamma = 0
Cstar = (gamma * T + (1 - gamma) * S) / (n - 1)
if standardize: # scale back
Cstar = sc.scale_[np.newaxis, :] * Cstar * sc.scale_[:, np.newaxis]
return Cstar, gamma
class ShrinkageLinearDiscriminantAnalysis(
sklearn.base.BaseEstimator,
sklearn.linear_model._base.LinearClassifierMixin):
def __init__(self, priors=None, only_block=False, N_times=5, N_channels=31, pool_cov=True, standardize_shrink=True):
self.only_block = only_block
self.priors = priors
self.N_times = N_times
self.N_channels = N_channels
self.pool_cov = pool_cov
self.standardize_shrink = standardize_shrink
def fit(self, X_train, y):
self.classes_ = sklearn.utils.multiclass.unique_labels(y)
if set(self.classes_) != {0, 1}:
raise ValueError('currently only binary class supported')
assert len(X_train) == len(y)
xTr = X_train.T
n_classes = 2
if self.priors is None:
# here we deviate from the bbci implementation and
# use the sample priors by default
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
priors = np.bincount(y_t) / float(len(y))
# self.priors = np.array([1./n_classes] * n_classes)
else:
priors = self.priors
X, cl_mean = subtract_classwise_means(xTr, y)
if self.pool_cov:
C_cov, C_gamma = _shrinkage(X, N_channels=self.N_channels, N_times=self.N_times,
standardize=self.standardize_shrink)
else:
n_classes = 2
C_cov = np.zeros((xTr.shape[0], xTr.shape[0]))
for cur_class in range(n_classes):
class_idxs = y == cur_class
x_slice = X[:, class_idxs]
C_cov += priors[cur_class] * _shrinkage(x_slice)[0]
if self.only_block:
C_cov_new = np.zeros_like(C_cov)
for i in range(self.N_times):
idx_start = i * self.N_channels
idx_end = idx_start + self.N_channels
C_cov_new[idx_start:idx_end, idx_start:idx_end] = C_cov[idx_start:idx_end, idx_start:idx_end]
C_cov = C_cov_new
C_invcov = np.linalg.pinv(C_cov)
# w = np.matmul(C_invcov, cl_mean)
w = np.linalg.lstsq(C_cov, cl_mean)[0]
b = -0.5 * np.sum(cl_mean * w, axis=0).T + np.log(priors)
if n_classes == 2:
w = w[:, 1] - w[:, 0]
b = b[1] - b[0]
self.coef_ = w.reshape((1, -1))
self.intercept_ = b
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
return np.column_stack([1 - prob, prob])
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class TimeDecoupledLda(
ShrinkageLinearDiscriminantAnalysis):
"""shrinkage LdaClasswiseCovs with enhancement technique for ERP classification
Parameters
----------
inverted : bool (default: False)
If you want to estimate and change the diagonal blocks before
inverting the covariance matrix.
plot: bool (default: False)
If you want to plot the original covariance matrix,
the new diagonal box and the new matrix.
"""
def __init__(self, priors=None, N_times=5, N_channels=31, standardize_featurestd=False, preproc=None,
standardize_shrink=True, channel_gamma=None):
self.priors = priors
self.N_times = N_times
self.N_channels = N_channels
self.standardize_featurestd = standardize_featurestd
self.standardize_shrink = standardize_shrink
self.channel_gamma = channel_gamma
self.preproc = preproc # This is needed to obtain time interval standardization factors from vectorizer
def fit(self, X_train, y):
self.classes_ = sklearn.utils.multiclass.unique_labels(y)
if set(self.classes_) != {0, 1}:
raise ValueError('currently only binary class supported')
assert len(X_train) == len(y)
xTr = X_train.T
if self.priors is None:
# here we deviate from the bbci implementation and
# use the sample priors by default
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
priors = np.bincount(y_t) / float(len(y))
else:
priors = self.priors
X, cl_mean = subtract_classwise_means(xTr, y) # outsourced to method
C_cov, C_gamma = _shrinkage(X, N_channels=self.N_channels, N_times=self.N_times,
standardize=self.standardize_shrink)
C_cov = change_diagonal_entries(C_cov, xTr, y, inverted=False, N_times=self.N_times,
N_channels=self.N_channels, standardize=self.standardize_featurestd,
jumping_means_ivals=self.preproc.jumping_mean_ivals,
channel_gamma=self.channel_gamma)
w = np.linalg.lstsq(C_cov, cl_mean)[0]
b = -0.5 * np.sum(cl_mean * w, axis=0).T + np.log(priors)
w = w[:, 1] - w[:, 0]
b = b[1] - b[0]
self.coef_ = w.reshape((1, -1))
self.intercept_ = b
def subtract_classwise_means(xTr, y):
n_classes = 2
n_features = xTr.shape[0]
X = np.zeros((n_features, 0))
cl_mean = np.zeros((n_features, n_classes))
for cur_class in range(n_classes):
class_idxs = y == cur_class
cl_mean[:, cur_class] = np.mean(xTr[:, class_idxs], axis=1)
X = np.concatenate([
X,
xTr[:, class_idxs] - np.dot(cl_mean[:, cur_class].reshape(-1, 1),
np.ones((1, np.sum(class_idxs))))],
axis=1)
return X, cl_mean
def change_diagonal_entries(S, xTr, y, inverted=False, N_times=5, N_channels=31, standardize=False,
jumping_means_ivals=None, channel_gamma=None):
# compute sigma_c
# information about time not relevant
if standardize:
if jumping_means_ivals is not None:
num_samples = ((np.diff(np.array(jumping_means_ivals))+0.001)/0.01).squeeze()
factors = np.sqrt(num_samples / np.min(num_samples))
for ti in range(N_times):
start_i = N_channels * ti
end_i = start_i + N_channels
xTr[start_i:end_i, :] *= factors[ti]
xTr_meanfree, class_means = subtract_classwise_means(xTr, y)
X_long_slim = xTr_meanfree.reshape((N_channels, -1), order='F')
sigma_c_ref, gamma_c = _shrinkage(X_long_slim, N_channels=N_channels, N_times=N_times, standardize=True,
gamma=channel_gamma)
sigma_c = np.linalg.pinv(sigma_c_ref) if inverted else sigma_c_ref
sign_sigma_c, slogdet_sigma_c = np.linalg.slogdet(sigma_c)
logdet_sigma_c = slogdet_sigma_c * sign_sigma_c
# compute scalar to scale sigma_c and change diagonal boxes of cov.matrix
S_new = np.copy(S)
for i in range(N_times):
idx_start = i*N_channels
idx_end = idx_start + N_channels
sigma_block = S[idx_start:idx_end, idx_start:idx_end]
sign_sigma_block, slogdet_sigma_block = np.linalg.slogdet(sigma_block)
logdet_sigma_block = slogdet_sigma_block * sign_sigma_block
scalar_via_determinant = np.exp(logdet_sigma_block - logdet_sigma_c)**(1.0/S[idx_start:idx_end, idx_start:idx_end].shape[1])
if scalar_via_determinant < 1:
pass
S_new[idx_start:idx_end, idx_start:idx_end] = sigma_c * scalar_via_determinant # * scaling_factor
S = S_new
if np.any(np.isnan(S)) or np.any(np.isinf(S)):
raise OverflowError('Diagonal-block covariance matrix is not numeric.')
return S
| {"/tdlda/classification/__init__.py": ["/tdlda/classification/tdlda.py"], "/tdlda/__init__.py": ["/tdlda/classification/__init__.py"]} |
59,738 | SimonaytesYan/profile | refs/heads/master | /MyApp/views.py | from django.shortcuts import render
from django.http import HttpResponse
def start(request):
return render(request, 'Start.html')
def hobby(request):
return render(request, 'hobby.html')
# Create your views here.
def project(request):
return render(request,'Projects.html')
def progress(request):
return render(request, 'progress.html')
| {"/first/urls.py": ["/MyApp/views.py"]} |
59,739 | SimonaytesYan/profile | refs/heads/master | /first/urls.py | """first URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from MyApp.views import start
from MyApp.views import hobby
from MyApp.views import project
from MyApp.views import progress
urlpatterns =[
path('progress/', progress),
path('project/', project),
path('hobby/',hobby),
path('main/',start),
path('admin/', admin.site.urls),
]
| {"/first/urls.py": ["/MyApp/views.py"]} |
59,782 | JhonTXdons/RecyclerMan | refs/heads/master | /data/trash_tube.py | import pygame
import random
class Tube:
def __init__(self):
self._dimX = 300
self._dimY = 0
self._weight = 300
self._height = 100
self.tube = ['tubo1', 'tubo2', 'tubo3']
# Immagine del Tubo
self.trash_tube = pygame.image.load('data/assets/images/tubo-1.png')
self.trash_tube_resized = pygame.transform.scale(self.trash_tube, (300, 100))
def get_dimX(self):
return self._dimX
def get_dimY(self):
return self._dimY
def get_weight(self):
return self._weight
def get_height(self):
return self._height
def randomTube(self):
random.shuffle(self.tube)
x = random.choice(self.tube)
if 'tubo1' in x:
spawn = random.randint(30, 210)
return spawn
if 'tubo2' in x:
spawn = random.randint(330, 510)
return spawn
if 'tubo3' in x:
spawn = random.randint(630, 810)
return spawn
| {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,783 | JhonTXdons/RecyclerMan | refs/heads/master | /data/trash.py | from data.trash_tube import *
class Trash:
def __init__(self):
self._dimX = 200
self._dimY = 20
self._weight = 50
self._height = 50
self._velocity = 2
self._color = 255, 0, 0
self._t_tube = Tube()
self.TRASH = 5
# VETRO -----------------------------------------------------------------------
# Immagine della spazzatura VETRO1
self.trash_green1 = pygame.image.load('data/assets/images/vetro 1.png')
self.trash_green_resized1 = pygame.transform.scale(self.trash_green1, (30, 65))
# Immagine della spazzatura VETRO2
self.trash_green2 = pygame.image.load('data/assets/images/vetro 2.png')
self.trash_green_resized2 = pygame.transform.scale(self.trash_green2, (50, 50))
# Immagine della spazzatura VETRO3
self.trash_green3 = pygame.image.load('data/assets/images/vetro 3.png')
self.trash_green_resized3 = pygame.transform.scale(self.trash_green3, (50, 60))
# Immagine della spazzatura VETRO4
self.trash_green4 = pygame.image.load('data/assets/images/vetro 4.png')
self.trash_green_resized4 = pygame.transform.scale(self.trash_green4, (30, 65))
# CARTA -----------------------------------------------------------------------
# Immagine della spazzatura CARTA1
self.trash_yellow1 = pygame.image.load('data/assets/images/cartone 1.png')
self.trash_yellow_resized1 = pygame.transform.scale(self.trash_yellow1, (40, 65))
# Immagine della spazzatura CARTA2
self.trash_yellow2 = pygame.image.load('data/assets/images/cartone 2.png')
self.trash_yellow_resized2 = pygame.transform.scale(self.trash_yellow2, (50, 60))
# Immagine della spazzatura CARTA3
self.trash_yellow3 = pygame.image.load('data/assets/images/cartone 3.png')
self.trash_yellow_resized3 = pygame.transform.scale(self.trash_yellow3, (60, 50))
# Immagine della spazzatura CARTA4
self.trash_yellow4 = pygame.image.load('data/assets/images/cartone 4.png')
self.trash_yellow_resized4 = pygame.transform.scale(self.trash_yellow4, (40, 65))
# PLASTICA -----------------------------------------------------------------------
# Immagine della spazzatura PLASTICA1
self.trash_blue1 = pygame.image.load('data/assets/images/plastica 1.png')
self.trash_blue_resized1 = pygame.transform.scale(self.trash_blue1, (30, 65))
# Immagine della spazzatura PLASTICA2
self.trash_blue2 = pygame.image.load('data/assets/images/plastica 2.png')
self.trash_blue_resized2 = pygame.transform.scale(self.trash_blue2, (55, 50))
# Immagine della spazzatura PLASTICA3
self.trash_blue3 = pygame.image.load('data/assets/images/plastica 3.png')
self.trash_blue_resized3 = pygame.transform.scale(self.trash_blue3, (50, 60))
# Immagine della spazzatura PLASTICA4
self.trash_blue4 = pygame.image.load('data/assets/images/plastica 4.png')
self.trash_blue_resized4 = pygame.transform.scale(self.trash_blue4, (50, 55))
# UMIDO -----------------------------------------------------------------------
# Immagine della spazzatura UMIDO1
self.trash_gray1 = pygame.image.load('data/assets/images/cibo 1.png')
self.trash_gray_resized1 = pygame.transform.scale(self.trash_gray1, (60, 55))
# Immagine della spazzatura UMIDO2
self.trash_gray2 = pygame.image.load('data/assets/images/cibo 2.png')
self.trash_gray_resized2 = pygame.transform.scale(self.trash_gray2, (60, 65))
# Immagine della spazzatura UMIDO3
self.trash_gray3 = pygame.image.load('data/assets/images/cibo 3.png')
self.trash_gray_resized3 = pygame.transform.scale(self.trash_gray3, (60, 55))
# Immagine della spazzatura UMIDO4
self.trash_gray4 = pygame.image.load('data/assets/images/cibo 4.png')
self.trash_gray_resized4 = pygame.transform.scale(self.trash_gray4, (60, 65))
# Liste di Oggetti da cui random.choiche pesca
self.color = ['blu', 'grigio', 'verde', 'giallo']
self.VETRO = [self.trash_green_resized1, self.trash_green_resized2, self.trash_green_resized3,
self.trash_green_resized4]
self.CARTA = [self.trash_yellow_resized1, self.trash_yellow_resized2, self.trash_yellow_resized3,
self.trash_yellow_resized4]
self.PLASTICA = [self.trash_blue_resized1, self.trash_blue_resized2, self.trash_blue_resized3,
self.trash_blue_resized4]
self.UMIDO = [self.trash_gray_resized1, self.trash_gray_resized2, self.trash_gray_resized3,
self.trash_gray_resized4]
# Definizione dello stato Iniziale
self.state = {
'x': self._t_tube.randomTube(),
'y': -1000 - random.randint(0, 1000),
'image': self.trash_gray_resized4,
'color': 'grigio',
'velocity': self.randomVelocity(),
}
self.trash_list = [self.state]
# Inizializzazione della lista con elemanti pari a TRASH
self.initTrashList()
def initTrashList(self):
for i in range(self.TRASH):
state = {
'x': self._t_tube.randomTube(),
'y': -500 - random.randint(0, 1000),
'color': self.randomColor(),
'image': self.randomImage(),
'velocity': self.randomVelocity(),
}
self.trash_list.append(state)
for i in range(self.TRASH):
for y in range(self.TRASH):
if self.trash_list[y].get('x') - self.trash_list[i].get('x') < 60:
self.trash_list[i].update({'y': -1000 - random.randint(0, 1000)})
def set_color(self, color):
self._color = color
return self._color
def get_dimX(self):
return self._dimX
def get_dimY(self):
return self._dimY
def get_weight(self):
return self._weight
def get_height(self):
return self._height
def get_color(self):
return self._color
def get_velocity(self):
return self._velocity
# GESTIONE DELLA SCELTA DEL COLORE E DELL'IMMAGINE RELATIVA AL COLORE e DELLA VELOCITA' -----------------------------
def randomColor(self):
random.shuffle(self.color)
x = random.choice(self.color)
return x
def randomImage(self):
for i in range(len(self.trash_list)):
if self.trash_list[i].get('color') == 'blu':
self.trash_list[i].update({'image': self.randomImageBlue()})
elif self.trash_list[i].get('color') == 'grigio':
self.trash_list[i].update({'image': self.randomImageGray()})
elif self.trash_list[i].get('color') == 'verde':
self.trash_list[i].update({'image': self.randomImageGreen()})
elif self.trash_list[i].get('color') == 'giallo':
self.trash_list[i].update({'image': self.randomImageYellow()})
def randomImageGreen(self):
random.shuffle(self.VETRO)
x = random.choice(self.VETRO)
return x
def randomImageYellow(self):
random.shuffle(self.CARTA)
x = random.choice(self.CARTA)
return x
def randomImageBlue(self):
random.shuffle(self.PLASTICA)
x = random.choice(self.PLASTICA)
return x
def randomImageGray(self):
random.shuffle(self.UMIDO)
x = random.choice(self.UMIDO)
return x
def randomVelocity(self):
vel = self.get_velocity() + random.randrange(25, 75, 5) / 100
return vel
def chekBorder(self):
for i in range(self.TRASH):
if self.trash_list[i].get('y') >= 1200:
self.trash_list[i].update({'y': -1500 + random.randint(0, 1000)})
self.trash_list[i].update({'x': self._t_tube.randomTube()})
if self.trash_list[i].get('y') < 0:
if (self.trash_list[i].get('color') == 'blu'):
self.trash_list[i].update({'image': self.randomImageBlue()})
elif (self.trash_list[i].get('color') == 'grigio'):
self.trash_list[i].update({'image': self.randomImageGray()})
elif (self.trash_list[i].get('color') == 'verde'):
self.trash_list[i].update({'image': self.randomImageGreen()})
elif (self.trash_list[i].get('color') == 'giallo'):
self.trash_list[i].update({'image': self.randomImageYellow()})
def updatePosition(self):
for i in range(self.TRASH):
vel = self.trash_list[i].get('velocity')
pos = self.trash_list[i].get('y')
self.trash_list[i].update({'y': pos + vel})
def speedUp(self):
for i in range(self.TRASH):
currentVelocity = self.trash_list[i].get('velocity')
self.trash_list[i].update({'velocity': currentVelocity + 0.2})
# RESET DELLA Y DEGLI OGGETTI DOPO LA PERDITA DI UNA VITA ---------------------------------------------------------
def resetAfterLifeLoss(self):
for i in range(self.TRASH):
self.trash_list[i].update({'y': -1000 + random.randint(0, 1000)})
self.trash_list[i].update({'x': self._t_tube.randomTube()})
if self.trash_list[i].get('color') == 'blu':
self.trash_list[i].update({'image': self.randomImageBlue()})
elif self.trash_list[i].get('color') == 'grigio':
self.trash_list[i].update({'image': self.randomImageGray()})
elif self.trash_list[i].get('color') == 'verde':
self.trash_list[i].update({'image': self.randomImageGreen()})
elif self.trash_list[i].get('color') == 'giallo':
self.trash_list[i].update({'image': self.randomImageYellow()})
# ----------------------------------------------------------------------------------------------------------------- | {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,784 | JhonTXdons/RecyclerMan | refs/heads/master | /main.py | import pygame.rect
from data.trash import *
from data.player import *
from data.ui import *
class App:
def __init__(self):
self._running = True
# GAME STATES -------------------------------------------------------------------------------------------------
self.menu = True
self.PLAY_STATE = False
self.PAUSE_STATE = False
self.LOST_STATE = False
self.WIN_STATE = False
# -------------------------------------------------------------------------------------------------------------
# MAIN SCREEN DEFINITION --------------------------------------------------------------------------------------
self._display_surf = None
self.clock = pygame.time.Clock()
self._title_ = None
self._p = Player()
self._bin = Bin()
self._t_tube = Tube()
self._t = Trash()
self._ui = Ui()
pygame.font.init()
self.font = pygame.font.Font('data/assets/fonts/Coiny-Regular.ttf', 20)
# Numero di spazzatura da iterare
self.TRASH = self._t.TRASH
self.trash_list = self._t.trash_list
# Proprietà dello Schermo
self.size = self.weight, self.height = 1200, 1200
self._display_surf = pygame.display.set_mode(self.size)
self._title_ = pygame.display.set_caption("Recycler Man")
# PUNTEGGIO
self.POINT = 0
self.BEST_POINT = 0
self.point_check = 0
self.point_text1 = self.font.render("PUNTEGGIO", True, (0, 0, 0))
self.point_text2 = self.font.render(str(self.POINT), True, (0, 0, 0))
# INQUINAMENTO
self.POLLUTION = 0
self.gm_pollution = False
self.pollution_text1 = self.font.render("INQUINAMENTO", True, (255, 0, 0))
self.pollution_text2 = self.font.render(str(self.POLLUTION) + " %", True, (255, 0, 0))
# VITA
self.VITA = 3
self.life_text1 = self.font.render("VITE", True, (255, 0, 0))
self.life_text2 = self.font.render(str(self.VITA), True, (255, 0, 0))
# Immagine di Menu
self.mn = pygame.image.load('data/assets/images/home.png')
self.mn_resized = pygame.transform.scale(self.mn, (1200, 1200))
self.mn_resized_HD = pygame.transform.scale(self.mn, (1000, 1000))
# Immagine di Background
self.bg = pygame.image.load('data/assets/images/sfondo.png').convert()
self.bg_resized = pygame.transform.scale(self.bg, (900, 1200))
# Immagine di Background_Muro
self.bg_w = pygame.image.load('data/assets/images/muro.png').convert()
self.bg_w_resized = pygame.transform.scale(self.bg_w, (300, 1200))
# Immagine di Background_Muro_Tasti
self.bg_w_keys = pygame.image.load('data/assets/images/pulsanti-wasd.png')
# Immagine del giocatore
self.player = pygame.image.load('data/assets/images/omino.png')
self.L_payer_resized = pygame.transform.scale(self.player, (150, 300))
def on_init(self):
pygame.init()
self.PLAY_STATE = True
# GESTIONE DELLE PROPRIETA' DEL GIOCATORE, BIDONE e TUBO ----------------------------------------------------------
def getPlayerStat(self):
PlayerX = self._p.get_dimX()
PlayerY = self._p.get_dimY()
PlayerWeight = self._p.get_weight()
PlayerHeight = self._p.get_height()
PlayerPosition = PlayerX, PlayerY, PlayerWeight, PlayerHeight
return PlayerPosition
def getScoreLifePollution(self, score, life, pollution):
self.point_text2 = self.font.render(str(score), True, (0, 0, 0))
if self.gm_pollution:
self.pollution_text2 = self.font.render(str(pollution) + " %", True, (255, 0, 0))
if life == 3:
self._display_surf.blit(self._ui.i_heart_resized, [970, 400])
self._display_surf.blit(self._ui.i_heart_resized, [1020, 400])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 400])
elif life == 2:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 400])
self._display_surf.blit(self._ui.i_heart_resized, [1020, 400])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 400])
elif life == 1:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 400])
self._display_surf.blit(self._ui.i_heart_e_resized, [1020, 400])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 400])
elif life == 0:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 400])
self._display_surf.blit(self._ui.i_heart_e_resized, [1020, 400])
self._display_surf.blit(self._ui.i_heart_e_resized, [1070, 400])
self.LOST_STATE = True
self.PLAY_STATE = False
else:
if life == 3:
self._display_surf.blit(self._ui.i_heart_resized, [970, 360])
self._display_surf.blit(self._ui.i_heart_resized, [1020, 360])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 360])
elif life == 2:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 360])
self._display_surf.blit(self._ui.i_heart_resized, [1020, 360])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 360])
elif life == 1:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 360])
self._display_surf.blit(self._ui.i_heart_e_resized, [1020, 360])
self._display_surf.blit(self._ui.i_heart_resized, [1070, 360])
elif life == 0:
self._display_surf.blit(self._ui.i_heart_e_resized, [970, 360])
self._display_surf.blit(self._ui.i_heart_e_resized, [1020, 360])
self._display_surf.blit(self._ui.i_heart_e_resized, [1070, 360])
self.LOST_STATE = True
self.PLAY_STATE = False
def getBinStat(self):
BinX = self._bin.get_dimX()
BinY = self._bin.get_dimY()
BinWeight = self._bin.get_weight()
BinHeight = self._bin.get_height()
BinPosition = BinX, BinY, BinWeight, BinHeight
return BinPosition
def getTubeStat(self):
TubeX = self._t_tube.get_dimX()
TubeY = self._t_tube.get_dimY()
TubeWeight = self._t_tube.get_weight()
TubeHeight = self._t_tube.get_height()
TubePosition = TubeX, TubeY, TubeWeight, TubeHeight
return TubePosition
# -----------------------------------------------------------------------------------------------------------------
def playerBinLoop(self):
keys = pygame.key.get_pressed()
self._bin.bMov(keys)
self._display_surf.blit(self._bin.get_type(), self.getBinStat())
self._bin.binChangeColor(keys)
self._p.pMov(keys)
self._display_surf.blit(pygame.transform.flip(self.player, True, False), self.getPlayerStat())
def trashLoop(self):
for i in range(self.TRASH):
self._display_surf.blit(self.trash_list[i].get('image'),
(self.trash_list[i].get('x'), self.trash_list[i].get('y')))
self._display_surf.blit(self._t_tube.trash_tube_resized, self.getTubeStat())
self._display_surf.blit(self._t_tube.trash_tube_resized, (0, 0, 300, 100))
self._display_surf.blit(self._t_tube.trash_tube_resized, (600, 0, 300, 100))
self._t.updatePosition()
self.checkCollisionBin()
self.mode_pollution(self.gm_pollution)
self._t.chekBorder()
# Aumento di velocità in base al punteggio raggiunto ----------------------------------------------------------
if 500 <= self.POINT < 2500 and self.point_check == 0:
self._t.speedUp()
self.point_check = 1 # impedisce che speedUp venga chiamata in continuazione
if 2500 <= self.POINT < 5000 and self.point_check == 1:
self._t.speedUp()
self.point_check = 2
if 5000 <= self.POINT < 7500 and self.point_check == 2:
self._t.speedUp()
self.point_check = 3
# -----------------------------------------------------------------------------------------------------------------
# CONTROLLO COLLISIONI CON IL BIDONE ------------------------------------------------------------------------------
def checkCollisionBin(self):
for i in range(self.TRASH):
rec1 = self._bin.bin_blue_resized.get_rect()
rec1.x = self._bin.get_dimX()
rec1.y = self._bin.get_dimY()
rec2 = self.trash_list[i].get('image').get_rect()
rec2.x = self.trash_list[i].get('x')
rec2.y = self.trash_list[i].get('y')
if rec2.colliderect(rec1) and self.VITA != 0:
if self._bin.get_color() == self.trash_list[i].get('color'):
self.POINT += 100
self.trash_list[i].update({'y': -1500 + random.randint(0, 1000)})
self.trash_list[i].update({'x': self._t_tube.randomTube()})
if self.trash_list[i].get('color') == 'blu':
self.trash_list[i].update({'image': self._t.randomImageBlue()})
elif self.trash_list[i].get('color') == 'grigio':
self.trash_list[i].update({'image': self._t.randomImageGray()})
elif self.trash_list[i].get('color') == 'verde':
self.trash_list[i].update({'image': self._t.randomImageGreen()})
elif self.trash_list[i].get('color') == 'giallo':
self.trash_list[i].update({'image': self._t.randomImageYellow()})
print(self.POINT)
else:
self.VITA -= 1
self._t.resetAfterLifeLoss()
else:
pass
# -----------------------------------------------------------------------------------------------------------------
def main_loop(self):
if self.on_init() == False:
self._running = False
while self._running:
if self.menu:
self.main_menu()
elif self.PLAY_STATE:
self.game_play()
elif self.LOST_STATE and self.VITA == 0:
self.game_over()
elif self.WIN_STATE:
self.game_win()
self.on_cleanup()
def on_cleanup(self):
pygame.quit()
# GAME ----------------------------------------------------------------------------------------------------------
def main_menu(self):
self._display_surf.fill((255, 255, 255))
self._display_surf.blit(self.mn_resized, [0, 0])
title_text = pygame.image.load('data/assets/images/titolo.png')
title_resized = pygame.transform.scale(title_text, (500, 200))
font = pygame.font.Font('data/assets/fonts/ARCADECLASSIC.TTF', 30)
play_text = font.render("GIOCA", True, (255, 255, 255))
self._display_surf.blit(title_resized, [380, 530])
self._display_surf.blit(play_text, [590, 800])
rect = pygame.Rect(470, 775, 330, 90)
while self.menu:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.menu = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and rect.collidepoint(pos):
self.game_mode_switch()
pygame.display.flip()
self.clock.tick(60)
def game_mode_switch(self):
reading = True
self._display_surf.fill((0, 0, 0))
font = pygame.font.Font('data/assets/fonts/Coiny-Regular.ttf', 35)
text1 = font.render("SCEGLI LA MODALITA' GIOCO", True, (255, 255, 255))
text2 = font.render("INFINITA", True, (255, 255, 255))
text3 = font.render("INQUINAMENTO", True, (255, 255, 255))
self._display_surf.blit(text1, [300, 150])
self._display_surf.blit(text2, [450, 300])
self._display_surf.blit(text3, [450, 400])
R_rect = text2.get_rect()
R_rect.x = 450
R_rect.y = 300
L_rect = text3.get_rect()
L_rect.x = 450
L_rect.y = 400
while reading:
for event in pygame.event.get():
if event.type == pygame.QUIT:
reading = False
self.menu = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and R_rect.collidepoint(pos):
reading = False
self.tutorial_infinite()
if pygame.mouse.get_pressed()[0] and L_rect.collidepoint(pos):
reading = False
self.tutorial_pollution()
pygame.display.flip()
self.clock.tick(60)
def tutorial_infinite(self):
reading = True
self._display_surf.fill((255, 255, 255))
tut_image = pygame.image.load('data/assets/images/tutorial3.png')
tut_image_resized = pygame.transform.scale(tut_image, (1200, 1200))
self._display_surf.blit(tut_image_resized, [0, 0])
left_arrow = pygame.image.load('data/assets/images/left_arrow.png')
left_arrow_resized = pygame.transform.scale(left_arrow, (200, 100))
right_arrow_resized = pygame.transform.flip(left_arrow_resized, True, False)
self._display_surf.blit(left_arrow_resized, [150, 1050])
self._display_surf.blit(right_arrow_resized, [900, 1050])
R_rect = right_arrow_resized.get_rect()
R_rect.x = 900
R_rect.y = 1050
L_rect = left_arrow_resized.get_rect()
L_rect.x = 150
L_rect.y = 1050
while reading:
for event in pygame.event.get():
if event.type == pygame.QUIT:
reading = False
self.menu = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and R_rect.collidepoint(pos):
self.menu = False
reading = False
self.gm_pollution = False
self.PLAY_STATE = True
if pygame.mouse.get_pressed()[0] and L_rect.collidepoint(pos):
reading = False
self.game_mode_switch()
pygame.display.flip()
self.clock.tick(60)
def tutorial_pollution(self):
reading = True
self._display_surf.fill((255, 255, 255))
tut_image = pygame.image.load('data/assets/images/tutorial2.png')
tut_image_resized = pygame.transform.scale(tut_image, (1200, 1200))
self._display_surf.blit(tut_image_resized, [0, 0])
left_arrow = pygame.image.load('data/assets/images/left_arrow.png')
left_arrow_resized = pygame.transform.scale(left_arrow, (200, 100))
right_arrow_resized = pygame.transform.flip(left_arrow_resized, True, False)
self._display_surf.blit(left_arrow_resized, [150, 1050])
self._display_surf.blit(right_arrow_resized, [900, 1050])
R_rect = right_arrow_resized.get_rect()
R_rect.x = 900
R_rect.y = 1050
L_rect = left_arrow_resized.get_rect()
L_rect.x = 150
L_rect.y = 1050
while reading:
for event in pygame.event.get():
if event.type == pygame.QUIT:
reading = False
self.menu = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and R_rect.collidepoint(pos):
reading = False
self.menu = False
self.PLAY_STATE = True
self.gm_pollution = True
if pygame.mouse.get_pressed()[0] and L_rect.collidepoint(pos):
reading = False
self.game_mode_switch()
pygame.display.flip()
self.clock.tick(60)
# GAME STATES -----------------------------------------------------------------------------------------------------
def game_play(self):
while self.PLAY_STATE:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.PLAY_STATE = False
self._running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.PAUSE_STATE = not self.PAUSE_STATE
if event.key == pygame.K_r:
self.game_replay()
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and self._ui.pause_rect.collidepoint(pos):
self.PAUSE_STATE = not self.PAUSE_STATE
if pygame.mouse.get_pressed()[0] and self._ui.replay_rect.collidepoint(pos):
self.game_replay()
if pygame.mouse.get_pressed()[0] and self._ui.esc_rect.collidepoint(pos):
self.menu = True
self.VITA = 3
self.POINT = 0
self._t.resetAfterLifeLoss()
self.LOST_STATE = False
self.PLAY_STATE = False
self._display_surf.fill((0, 0, 0))
self._display_surf.blit(self.bg_resized, [0, 0])
self._display_surf.blit(self.bg_w_resized, [900, 0])
if self.gm_pollution:
self._display_surf.blit(self.point_text1, [970, 250])
self._display_surf.blit(self.point_text2, [970, 280])
self._display_surf.blit(self.pollution_text1, [970, 310])
self._display_surf.blit(self.pollution_text2, [970, 340])
self._display_surf.blit(self.life_text1, [970, 370])
else:
self._display_surf.blit(self.point_text1, [970, 250])
self._display_surf.blit(self.point_text2, [970, 280])
self._display_surf.blit(self.life_text1, [970, 320])
self.playerBinLoop()
self.trashLoop()
self.getScoreLifePollution(self.POINT, self.VITA, self.POLLUTION)
# UI -------------------------------------------------------------
self._display_surf.blit(self.bg_w_keys, [950, 600])
if self.PAUSE_STATE:
self.game_pause()
else:
self._display_surf.blit(self._ui.b_pause_resized, [950, 900])
self._display_surf.blit(self._ui.b_replay_resized, [1030, 900])
self._display_surf.blit(self._ui.b_esc_resized, [1110, 900])
pygame.display.flip()
self.clock.tick(60)
def game_replay(self):
self.VITA = 3
self.BEST_POINT = self.POINT
self.POINT = 0
self.LOST_STATE = False
self.WIN_STATE = False
self.PLAY_STATE = True
self._t.initTrashList()
def game_pause(self):
self._display_surf.blit(self._ui.b_play_resized, [950, 900])
self._display_surf.blit(self._ui.b_replay_resized, [1030, 900])
self._display_surf.blit(self._ui.b_esc_resized, [1110, 900])
while self.PAUSE_STATE:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.PAUSE_STATE = not self.PAUSE_STATE
self.PLAY_STATE = False
self.menu = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and self._ui.play_rect.collidepoint(pos):
self.PAUSE_STATE = not self.PAUSE_STATE
if pygame.mouse.get_pressed()[0] and self._ui.replay_rect.collidepoint(pos):
self.PAUSE_STATE = not self.PAUSE_STATE
self.game_replay()
if pygame.mouse.get_pressed()[0] and self._ui.esc_rect.collidepoint(pos):
self.PAUSE_STATE = not self.PAUSE_STATE
self.PLAY_STATE = False
self.menu = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.PAUSE_STATE = not self.PAUSE_STATE
if event.key == pygame.K_p:
self.PAUSE_STATE = not self.PAUSE_STATE
if event.key == pygame.K_r:
self.PAUSE_STATE = not self.PAUSE_STATE
self.game_replay()
pygame.display.update()
self.clock.tick(60)
def game_win(self):
# Immagine di Background
bg_loss = pygame.image.load('data/assets/images/sfondo_win.png')
bg_loss_resized = pygame.transform.scale(bg_loss, (1200, 1200))
self._display_surf.blit(bg_loss_resized, [0, 0])
font = pygame.font.Font('data/assets/fonts/ARCADECLASSIC.TTF', 60)
font2 = pygame.font.Font('data/assets/fonts/ARCADECLASSIC.TTF', 35)
game_over_text = font.render("VITTORIA", True, (255, 0, 0))
point_text3 = font2.render("PUNTEGGIO", True, (0, 0, 0))
point_text4 = font2.render(str(self.POINT), True, (0, 0, 0))
self._display_surf.blit(game_over_text, [450, 520])
self._display_surf.blit(point_text3, [500, 620])
self._display_surf.blit(point_text4, [550, 650])
# UI --------------------------------------------------------------------------------------------------------
self._display_surf.blit(self._ui.b_play_resized, [500, 940])
self._display_surf.blit(self._ui.b_esc_resized, [580, 940])
# -----------------------------------------------------------------------------------------------------------
while self.WIN_STATE:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.WIN_STATE = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and self._ui.play_rect_go.collidepoint(pos):
self.game_replay()
if pygame.mouse.get_pressed()[0] and self._ui.esc_rect_go.collidepoint(pos):
self.WIN_STATE = not self.WIN_STATE
self.menu = not self.menu
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.WIN_STATE = False
self.menu = True
if event.key == pygame.K_p:
self.WIN_STATE = False
self.PLAY_STATE = True
pygame.display.update()
self.clock.tick(60)
def game_over(self):
# Immagine di Background
bg_loss = pygame.image.load('data/assets/images/sfondo_lost.png')
bg_loss_resized = pygame.transform.scale(bg_loss, (1200, 1200))
self._display_surf.blit(bg_loss_resized, [0, 0])
font = pygame.font.Font('data/assets/fonts/ARCADECLASSIC.TTF', 60)
font2 = pygame.font.Font('data/assets/fonts/ARCADECLASSIC.TTF', 35)
game_over_text = font.render("GAME OVER", True, (255, 0, 0))
point_text3 = font2.render("PUNTEGGIO", True, (0, 0, 0))
point_text4 = font2.render(str(self.POINT), True, (0, 0, 0))
self._display_surf.blit(game_over_text, [450, 520])
self._display_surf.blit(point_text3, [500, 620])
self._display_surf.blit(point_text4, [550, 650])
# UI --------------------------------------------------------------------------------------------------------
self._display_surf.blit(self._ui.b_play_resized, [500, 940])
self._display_surf.blit(self._ui.b_esc_resized, [580, 940])
# -----------------------------------------------------------------------------------------------------------
while self.LOST_STATE:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.LOST_STATE = False
self._running = False
if event.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
if pygame.mouse.get_pressed()[0] and self._ui.play_rect_go.collidepoint(pos):
self.game_replay()
if pygame.mouse.get_pressed()[0] and self._ui.esc_rect_go.collidepoint(pos):
self.LOST_STATE = not self.LOST_STATE
self.menu = not self.menu
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.LOST_STATE = False
self.menu = True
if event.key == pygame.K_p:
self.LOST_STATE = False
self.PLAY_STATE = True
pygame.display.update()
self.clock.tick(60)
# GAME MODE POLLUTION ---------------------------------------------------------------------------------------------
def mode_pollution(self, gm):
if gm == True:
for i in range(self.TRASH):
if self.trash_list[i].get('y') >= 1200:
self.POLLUTION += 5
print(str(self.POLLUTION))
if self.POINT == 10000 and self.POLLUTION != 100:
self.WIN_STATE = True
self.PLAY_STATE = False
if self.POLLUTION == 100:
self.LOST_STATE = True
self.PLAY_STATE = False
if __name__ == "__main__":
theApp = App()
theApp.main_loop()
| {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,785 | JhonTXdons/RecyclerMan | refs/heads/master | /data/ui.py | import pygame
class Ui:
def __init__(self):
# Immagine di Bottone Play
self.b_play = pygame.image.load('data/assets/images/play_button.png')
self.b_play_resized = pygame.transform.scale(self.b_play, (60, 60))
self.play_rect = self.b_play_resized.get_rect()
self.play_rect.x = 970
self.play_rect.y = 900
self.b_play_position = self.play_rect.x, self.play_rect.y
self.play_rect_go = self.b_play_resized.get_rect()
self.play_rect_go.x = 500
self.play_rect_go.y = 940
self.b_play_position_go = self.play_rect_go.x, self.play_rect_go.y
# Immagine di Bottone Replay
self.b_replay = pygame.image.load('data/assets/images/replay_button.png')
self.b_replay_resized = pygame.transform.scale(self.b_replay, (60, 60))
self.replay_rect = self.b_play_resized.get_rect()
self.replay_rect.x = 1060
self.replay_rect.y = 900
self.b_replay_position = self.replay_rect.x, self.replay_rect.y
# Immagine di Bottone Pausa
self.b_pause = pygame.image.load('data/assets/images/pause_button.png')
self.b_pause_resized = pygame.transform.scale(self.b_pause, (60, 60))
self.pause_rect = self.b_pause_resized.get_rect()
self.pause_rect.x = 970
self.pause_rect.y = 900
self.b_pause_position = self.pause_rect.x, self.pause_rect.y
# Immagine di Bottone Esci
self.b_esc = pygame.image.load('data/assets/images/esc_button.png')
self.b_esc_resized = pygame.transform.scale(self.b_esc, (60, 60))
self.esc_rect = self.b_pause_resized.get_rect()
self.esc_rect.x = 1130
self.esc_rect.y = 900
self.b_esc_position = self.esc_rect.x, self.esc_rect.y
self.esc_rect_go = self.b_play_resized.get_rect()
self.esc_rect_go.x = 580
self.esc_rect_go.y = 940
self.b_esc_position_go = self.esc_rect_go.x, self.esc_rect_go.y
# Immagine del Cuore pieno
self.i_heart = pygame.image.load('data/assets/images/cuore.png')
self.i_heart_resized = pygame.transform.scale(self.i_heart, (40, 40))
# Immagine del Cuore pieno
self.i_heart_e = pygame.image.load('data/assets/images/cuore vuoto.png')
self.i_heart_e_resized = pygame.transform.scale(self.i_heart_e, (40, 40))
| {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,786 | JhonTXdons/RecyclerMan | refs/heads/master | /data/player.py | from data.bin_collector import *
import pygame
class Player:
def __init__(self):
self._dimX = 300
self._dimY = 900
self._weight = 150
self._height = 300
self._velocity = 10
self._orientation = ''
self._color = 244, 164, 96
self.playable_screen = 900
self._bin = Bin()
def get_dimX(self):
return self._dimX
def get_dimY(self):
return self._dimY
def get_weight(self):
return self._weight
def get_height(self):
return self._height
def get_color(self):
return self._color
def pMov(self, KEY):
if KEY[pygame.K_LEFT] and self._dimX > 0:
self._dimX -= self._velocity
self._orientation = 'L'
return self._orientation
if KEY[pygame.K_RIGHT] and self._dimX < self.playable_screen - (50 + self._weight):
self._dimX += self._velocity
self._orientation = 'R'
return self._orientation
| {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,787 | JhonTXdons/RecyclerMan | refs/heads/master | /data/bin_collector.py | import pygame
class Bin():
def __init__(self):
self._dimX = 400
self._dimY = 950
self._weight = 100
self._height = 100
self._velocity = 10
self._orientation = True
self._color = 'blu'
# BIDONI -----------------------------------------------------------------------
# Immagine del bidone BLU
self.bin_blue = pygame.image.load('data/assets/images/bidone blu.png')
self.bin_blue_resized = pygame.transform.scale(self.bin_blue, (100, 100))
# Immagine del bidone VERDE
self.bin_green = pygame.image.load('data/assets/images/bidone verde.png')
self.bin_green_resized = pygame.transform.scale(self.bin_green, (100, 100))
# Immagine del bidone GRIGIO
self.bin_gray = pygame.image.load('data/assets/images/bidone grigio.png')
self.bin_gray_resized = pygame.transform.scale(self.bin_gray, (100, 100))
# Immagine del bidone GIALLO
self.bin_yellow = pygame.image.load('data/assets/images/bidone giallo.png')
self.bin_yellow_resized = pygame.transform.scale(self.bin_yellow, (100, 100))
self._type = self.bin_blue_resized
def set_dimX(self, dimX):
self._dimX = dimX
return self._dimX
def set_color(self, color):
self._color = color
return self._color
def set_type(self, type):
self._type = type
return self._type
def get_dimX(self):
return self._dimX
def get_dimY(self):
return self._dimY
def get_weight(self):
return self._weight
def get_height(self):
return self._height
def get_color(self):
return self._color
def get_type(self):
return self._type
def bMov(self, KEY):
if KEY[pygame.K_LEFT] and self._dimX > self._weight:
self._dimX -= self._velocity
self._orientation = True
return self._orientation
if KEY[pygame.K_RIGHT] and self._dimX < 900 - self._weight:
self._dimX += self._velocity
self._orientation = False
return self._orientation
def binChangeColor(self, KEY):
if KEY[pygame.K_w]:
self.BinColor = 'blu'
self.set_color(self.BinColor)
self.set_type(self.bin_blue_resized)
print(self._color)
if KEY[pygame.K_a]:
self.BinColor = 'verde'
self.set_color(self.BinColor)
self.set_type(self.bin_green_resized)
print(self._color)
if KEY[pygame.K_s]:
self.BinColor = 'grigio'
self.set_color(self.BinColor)
self.set_type(self.bin_gray_resized)
print(self._color)
if KEY[pygame.K_d]:
self.BinColor = 'giallo'
self.set_color(self.BinColor)
self.set_type(self.bin_yellow_resized)
print(self._color)
| {"/data/trash.py": ["/data/trash_tube.py"], "/main.py": ["/data/trash.py", "/data/player.py", "/data/ui.py"], "/data/player.py": ["/data/bin_collector.py"]} |
59,792 | nehasaini-git/Calculator | refs/heads/master | /steps/steps.py | from behave import given, when, then
from calculator import Calculator
@given("I have entered {number1:d} into the calculator")
def step_impl(context, number1):
context.number1 = number1
@given("I have also entered {number2:d} into the calculator")
def step_impl(context, number2):
context.number2 = number2
@when("I press add")
def step_impl(context):
context.calculator = Calculator()
context.result = context.calculator.add(context.number1, context.number2)
@then("the sum should be {result:d}")
def step_impl(context, result):
assert context.result == result | {"/steps/steps.py": ["/calculator.py"]} |
59,793 | nehasaini-git/Calculator | refs/heads/master | /calculator.py | class Calculator:
def add(self, num1, num2):
self.num1 = num1
self.num2 = num2
return num1 + num2 | {"/steps/steps.py": ["/calculator.py"]} |
59,836 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/decorators.py | from functools import wraps
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from mtvc_client.client import APIClientException
def view_exception_handler(view_func):
"""
A view decorator that undestands MTVC error codes and how to deal
with the state changes that they hint at.
Eg when a Subscriber's Profile information is incomplete or outdated
then the API returns HTTP 401 (Unauthorized) with the following
error details in the response:
{
"error_code": "NO_SUBSCRIBER_PROFILE"
}
The API client would catch these, raise APIClientException and add
the error details to the exception context.
"""
def _decorator(request, *args, **kwargs):
try:
return view_func(request, *args, **kwargs)
except APIClientException, e:
if e.error_code == 'HANDSET_NOT_SUPPORTED':
return HttpResponseRedirect(reverse('handset-not-supported'))
if e.error_code == 'NO_SUBSCRIBER_PROFILE':
return HttpResponseRedirect(reverse('profile'))
if e.error_code == 'NO_SUBSCRIPTION':
return HttpResponseRedirect(reverse('product'))
if e.error_code == 'TRANSACTION_FAILED':
return HttpResponseRedirect(reverse('transaction-failed'))
raise
return wraps(view_func)(_decorator)
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,837 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/urls.py | from django.conf.urls import patterns, url
from mtvc_client.libs.django import views
urlpatterns = patterns(
'',
url(r'^$', views.ChannelsView.as_view(), name='home'),
url(r'^channels/$', views.ChannelsView.as_view(), name='channels'),
url(r'^channel/(?P<slug>[\w-]+)/$', views.EPGView.as_view(), name='epg'),
url(r'^shows/$', views.ShowsView.as_view(), name='shows'),
url(r'^clips/$', views.ClipsView.as_view(), name='clips'),
url(r'^clips/popular/$', views.ClipsPopularView.as_view(),
name='clips-popular'),
url(r'^clips/featured/$', views.ClipsFeaturedView.as_view(),
name='clips-featured'),
url(r'^clips/channel/(?P<slug>[\w-]+)/$',
views.ClipsByChannelView.as_view(),
name='clips-by-channel'),
url(r'^clip/(?P<slug>[\w-]+)/$',
views.ClipDetailView.as_view(), name='clip-detail'),
url(r'^channel/(?P<slug>[\w-]+)/$', views.EPGView.as_view(), name='epg'),
url(r'^(?P<content_type>channel|clip)/(?P<slug>[\w-]+)/watch/$',
views.WatchView.as_view(), name='watch'),
url(r'^help/$', views.HelpView.as_view(), name='help'),
url(r'^account/$', views.AccountView.as_view(), name='account'),
url(r'^profile/$', views.ProfileView.as_view(), name='profile'),
url(r'^product/$', views.ProductView.as_view(), name='product'),
url(r'^handset-not-supported/$', views.HandsetNotSupportedView.as_view(),
name='handset-not-supported'),
)
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,838 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/utils.py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.cache import cache
from mtvc_client.client import APIClient
def get_request_msisdn(request):
try:
return request.META.get(settings.MSISDN_HEADER, '')
except AttributeError:
raise ImproperlyConfigured(
'Missing setting MSISDN_HEADER in the settings file')
def get_request_ip(request):
try:
return request.META.get(settings.CLIENT_IP_HEADER, '')
except AttributeError:
raise ImproperlyConfigured(
'Missing setting CLIENT_IP_HEADER in the settings file')
def get_request_user_agent(request):
return request.META.get('HTTP_USER_AGENT', '')
def get_request_referer(request):
"""
Returns the HTTP referer header of the request, defaults to ''
"""
return request.META.get('HTTP_REFERER', '')
def get_request_page_number(request):
"""
Returns the page number in the request.
Defaults to 1.
"""
return int(request.GET.get('page', 1))
def get_response_paginator(request, meta):
"""
Returns a paginator for the response meta-data given the request.
"""
# calculate number of pages
pages = meta['total_count'] / meta['limit']
# add a page for the remainder
if meta['total_count'] % meta['limit']:
pages += 1
current_page = (meta['offset'] / meta['limit']) + 1
return {
'pages': [{
'current': page == current_page,
'index': page,
'url': '%s?page=%s' % (request.path_info, page)
} for page in range(1, pages + 1)]
}
def get_cached_api_response(key, timeout, fn, **fn_kwargs):
"""
Returns an API response from cache, if available, else updates
the cache
"""
results = cache.get(key)
if not results:
results = fn(**fn_kwargs)
cache.set(key, results, timeout)
return results
def get_profile_schema(timeout=60 * 60 * 24):
"""
Returns the schema for a subscriber profile form, cached for 24
hours by default
"""
return get_cached_api_response(
'PROFILE-SCHEMA', timeout,
APIClient(**settings.API_CLIENT).get_profile_schema)
def get_transaction_schema(timeout=60 * 60 * 24):
"""
Returns the schema for a subscriber transaction form, cached for 24
hours by default
"""
return get_cached_api_response(
'TRANSACTION-SCHEMA', timeout,
APIClient(**settings.API_CLIENT).get_transaction_schema)
def get_channel_list(page=1, results_per_page=30, timeout=60):
"""
Returns a channel list, by default cached for 60 seconds.
"""
return get_cached_api_response(
'CHANNELS:::%d' % page, timeout,
APIClient(**settings.API_CLIENT).get_channels,
limit=results_per_page, offset=(page - 1) * results_per_page)
def get_clips_list(page=1, results_per_page=5, timeout=60 * 5):
"""
Returns a clips list, by default cached for 5 minutes.
"""
return get_cached_api_response(
'CLIPS:::%d' % page, timeout,
APIClient(**settings.API_CLIENT).get_clips,
limit=results_per_page, offset=(page - 1) * results_per_page)
def get_featured_clips(page=1, results_per_page=5, timeout=60 * 5):
"""
Returns a list of featured clips for the page number specified.
Results for the page are cached for 5 minutes by default.
"""
return get_cached_api_response(
'CLIPS:::FEATURED:::%d' % page, timeout,
APIClient(**settings.API_CLIENT).get_clips, featured=True,
limit=results_per_page, offset=(page - 1) * results_per_page)
def get_popular_clips(page=1, results_per_page=5, timeout=60 * 5):
"""
Returns a list of the popular clips for the page number specified.
Results for the page are cached for 5 minutes by default.
"""
return get_cached_api_response(
'CLIPS:::POPULAR:::%d' % page, timeout,
APIClient(**settings.API_CLIENT).get_clips,
order_by='-stream_requests', limit=results_per_page,
offset=(page - 1) * results_per_page)
def get_clips_by_channel(slug, page=1, results_per_page=5, timeout=60 * 5):
"""
Returns a list of the clips, filtered by channel slug, for the page
number specified.
Results for the page are cached for 5 minutes by default.
"""
return get_cached_api_response(
'CLIPS:::CHANNEL:::%s:::%d' % (slug, page), timeout,
APIClient(**settings.API_CLIENT).get_clips,
show__show_channel__slug__exact=slug, limit=results_per_page,
offset=(page - 1) * results_per_page)
def get_clips_by_show(
slug, channel_slug=None, page=1, results_per_page=5, timeout=60 * 5):
"""
Returns a list of the clips, filtered by show slug, for the page
number specified.
Results for the page are cached for 5 minutes by default.
"""
kwargs = {
'show__slug__exact': slug,
'limit': results_per_page,
'offset': (page - 1) * results_per_page
}
if channel_slug is not None:
kwargs['show__show_channel__slug__exact'] = channel_slug
return get_cached_api_response(
'CLIPS:::SHOW:::%s:::%s:::%d' % (slug, channel_slug, page),
timeout,
APIClient(**settings.API_CLIENT).get_clips,
**kwargs)
def get_clip_detail(slug, timeout=60 * 5):
"""
Looks up and returns a clip by slug.
Returns None if no clip was found.
The result (possibly None) is cached for 5 minutes by default.
"""
result = get_cached_api_response(
'CLIP:::%s' % slug, timeout,
APIClient(**settings.API_CLIENT).get_clips,
slug__exact=slug)
if result and 'objects' in result and result['objects']:
return result['objects'][0]
else:
return None
def get_shows(channel_slug=None, timeout=60 * 5):
"""
Returns a clip show list, optionally filtered by channel slug.
Results are cached for 5 minutes by default
"""
if channel_slug:
return get_cached_api_response(
'SHOWS:::%s' % channel_slug, timeout,
APIClient(**settings.API_CLIENT).get_shows,
show_channel__slug__exact=channel_slug,
order_by='show__order')
else:
return get_cached_api_response(
'SHOWS', timeout, APIClient(**settings.API_CLIENT).get_shows,
order_by='show__order')
def get_show(slug, timeout=60 * 5):
"""
Looks up and returns a clip show by slug, cached for 5 minutes by
default
"""
result = get_cached_api_response(
'SHOW:::%s' % slug, timeout,
APIClient(**settings.API_CLIENT).get_shows, slug__exact=slug)
if result and 'objects' in result and result['objects']:
return result['objects'][0]
else:
return None
def get_showchannels(timeout=60 * 5):
"""
Returns a list of clip show channels, cached for 5 minutes by default
"""
return get_cached_api_response(
'SHOWCHANNELS', timeout,
APIClient(**settings.API_CLIENT).get_showchannels)
def get_showchannel(slug, timeout=60 * 5):
"""
Looks up and returns a clip show channel by slug.
Returns None if no clip show channel was found.
The result (possibly None) is cached for 5 minutes by default.
"""
result = get_cached_api_response(
'SHOWCHANNEL:::%s' % slug, timeout,
APIClient(**settings.API_CLIENT).get_showchannels, slug__exact=slug)
if result and 'objects' in result and result['objects']:
return result['objects'][0]
else:
return None
def get_channel_epgs(slug, timeout=60 * 5):
"""
Returns a list of EPGs for the channel identified by slug.
The result is cached for 5 minutes by default
"""
return get_cached_api_response(
'CHANNEL-EPG:::%s' % slug, timeout,
APIClient(**settings.API_CLIENT).get_epg, channel_id=slug)
def get_content_type_banners(model=None, slug=None, slot=None, timeout=60 * 5):
"""
Returns a banner list, optionally filtered by content type,
content object slug and banner slot slug.
Results are cached for 5 minutes by default.
"""
filters = {}
key = {
'model': '__ANY__',
'slug': '__ANY__',
'slot': '__ANY__',
}
if model:
key['model'] = model
filters['content_type__model'] = model
if slug:
key['slug'] = slug
filters['content_object__slug'] = slug
if slot:
key['slot'] = slot
filters['slot__slug'] = slot
return get_cached_api_response(
'%(model)s:::%(slug)s:::%(slot)s' % key, timeout,
APIClient(**settings.API_CLIENT).get_banners, **filters)
def get_gender_choices():
schema = get_profile_schema()
return [['', '---------']] + schema['fields']['gender']['choices']
def get_region_choices():
schema = get_profile_schema()
return [['', '---------']] + schema['fields']['region']['choices']
def get_product_choices():
schema = get_transaction_schema()
return schema['fields']['product_name']['choices']
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,839 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/middleware.py | import base64
from django.http import HttpResponse
from django.template import RequestContext
from django.shortcuts import render
from django.conf import settings
from mtvc_client.client import APIClientException
class APIClientExceptionMiddleware(object):
"""
A middleware class that adds additional context information when
APIClientExceptions are raised by the application
"""
def process_exception(self, request, exception):
if isinstance(exception, APIClientException):
return render(
request, 'smart/500.html', {
'error_code': exception.error_code,
'error_message': exception.error_message,
}, context_instance=RequestContext(request), status=500)
return None
class BasicAuthMiddleware(object):
"""
Use this Middleware to password-protect a portal using HTTP Basic
Authentication with creds stored in a settings file.
This is useful for eg. locking down a QA site pre-launch.
Requires BASIC_AUTH_CREDS in Django settings which should be a
dict with passwords keyed by username.
"""
def get_rfa_response(self):
response = HttpResponse(
'<html><title>Authentication required</title><body>'
'<h1>Authentication Required</h1></body></html>', status=401)
response['WWW-Authenticate'] = 'Basic realm="Restricted"'
return response
def process_request(self, request):
# fail if we don't have proper BA headers
try:
auth_type, data = request.META['HTTP_AUTHORIZATION'].split()
except KeyError:
return self.get_rfa_response()
# this is basic auth only
if auth_type.lower() != 'basic':
return self.get_rfa_response()
# decode the BA data
try:
username, password = base64.b64decode(data).decode('utf-8').split(
':', 1)
except (TypeError, ValueError):
return self.get_rfa_response()
if not hasattr(settings, 'BASIC_AUTH_CREDS') or \
username not in settings.BASIC_AUTH_CREDS or \
settings.BASIC_AUTH_CREDS[username] != password:
return self.get_rfa_response()
return None
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,840 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/forms.py | import datetime
import json
from django import forms
from django.utils.functional import lazy
from django.core.exceptions import ValidationError
from django.forms.widgets import RadioFieldRenderer, mark_safe, \
RadioChoiceInput, conditional_escape
from django.utils.encoding import force_text
from mtvc_client.libs.django import utils
def validate_year_of_birth(value):
if value < (datetime.date.today().year - 100) or \
value > datetime.date.today().year:
raise ValidationError('Not a valid year: %s' % value)
def validate_accepted_tc(value):
if not value:
raise ValidationError('Terms and Conditions must be accepted')
class RadioInputNoLabel(RadioChoiceInput):
"""
An object used by RadioFieldBreakRenderer that represents a single
<input type='radio'>, but without the label that Django
automatically prefixes during rendering
"""
def __unicode__(self):
choice_label = conditional_escape(force_text(self.choice_label))
return mark_safe(u'%s %s' % (self.tag(), choice_label))
class RadioFieldParagraphRenderer(RadioFieldRenderer):
"""
Overrides rendering of the object used by RadioSelect to wrap
choices in html paragraph-tags (<p></p>) instead of the lu-li option
that Django offers by default
"""
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInputNoLabel(
self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # let IndexErrors propagate
return RadioInputNoLabel(
self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
"""
Outputs radio fields wrapped in p-tags.
"""
return mark_safe(u'\n'.join([
u'<p>%s</p>' % force_text(w) for w in self]))
class JSONDataForm(forms.Form):
def get_json_data(self):
return json.dumps(self.cleaned_data)
class ProfileForm(JSONDataForm):
product = forms.ChoiceField(
label='Package',
choices=lazy(utils.get_product_choices, list)(),
initial='',
required=False,
widget=forms.RadioSelect(renderer=RadioFieldParagraphRenderer))
is_trial = forms.BooleanField(widget=forms.HiddenInput(), required=False)
gender = forms.ChoiceField(
label='Gender',
choices=lazy(utils.get_gender_choices, list)(),
initial='')
year_of_birth = forms.TypedChoiceField(
label='Year of birth',
coerce=int,
validators=[validate_year_of_birth],
choices=[('', '---------')] + [(i, i) for i in reversed(range(
datetime.date.today().year - 100, datetime.date.today().year))])
region = forms.ChoiceField(
label='Region',
choices=lazy(utils.get_region_choices, list)(),
initial='')
dstv_at_home = forms.ChoiceField(
help_text='Do you have DSTV at home?',
label='Do you have DSTV at home?',
choices=(('', '---------'), (True, 'Yes'), (False, 'No')),
initial='')
accepted_tc = forms.BooleanField(
label='Accept terms & conditions',
validators=[validate_accepted_tc])
class ProductForm(JSONDataForm):
product = forms.ChoiceField(
label='Package',
choices=lazy(utils.get_product_choices, list)(),
initial='',
widget=forms.RadioSelect(renderer=RadioFieldParagraphRenderer))
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,841 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/libs/django/views.py | from django.conf import settings
from django.http import Http404
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from django.utils.decorators import method_decorator
from mtvc_client.client import APIClient
from mtvc_client.libs.django import utils
from mtvc_client.libs.django.forms import ProfileForm, ProductForm
from mtvc_client.libs.django.decorators import view_exception_handler
class TemplateViewBase(TemplateView):
@method_decorator(view_exception_handler)
def dispatch(self, *args, **kwargs):
return super(TemplateViewBase, self).dispatch(*args, **kwargs)
class ChannelsView(TemplateViewBase):
template_name = 'channels.html'
def get_context_data(self, **kwargs):
results = super(ChannelsView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
channels = utils.get_channel_list(page)
results['meta'] = channels['meta']
results['paginator'] = utils.get_response_paginator(
self.request, channels['meta'])
results['object_list'] = channels['objects']
return results
class ShowsView(TemplateViewBase):
template_name = 'shows.html'
def get_context_data(self, **kwargs):
results = super(ShowsView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
shows = utils.get_show_list(page)
results['meta'] = shows['meta']
results['paginator'] = utils.get_response_paginator(
self.request, shows['meta'])
results['object_list'] = shows['objects']
return results
class ClipsView(TemplateViewBase):
template_name = 'clips.html'
def get_context_data(self, **kwargs):
results = super(ClipsView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
clips = utils.get_clips_list(page)
results['meta'] = clips['meta']
results['paginator'] = utils.get_response_paginator(
self.request, clips['meta'])
results['object_list'] = clips['objects']
return results
class ClipsFeaturedView(TemplateViewBase):
template_name = 'clips.html'
def get_context_data(self, **kwargs):
results = super(ClipsFeaturedView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
clips = utils.get_featured_clips(page)
results['meta'] = clips['meta']
results['paginator'] = utils.get_response_paginator(
self.request, clips['meta'])
results['object_list'] = clips['objects']
return results
class ClipsPopularView(TemplateViewBase):
template_name = 'clips.html'
def get_context_data(self, **kwargs):
results = super(ClipsPopularView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
clips = utils.get_popular_clips(page)
results['meta'] = clips['meta']
results['paginator'] = utils.get_response_paginator(
self.request, clips['meta'])
results['object_list'] = clips['objects']
return results
class ClipsByChannelView(TemplateViewBase):
template_name = 'clips_list.xml'
def get_context_data(self, **kwargs):
show_channel = utils.get_showchannel(kwargs['slug'])
if not show_channel:
raise Http404
results = super(ClipsByChannelView, self).get_context_data(**kwargs)
page = utils.get_request_page_number(self.request)
clips = utils.get_clips_by_channel(kwargs['slug'], page)
results['meta'] = clips['meta']
results['paginator'] = utils.get_response_paginator(
self.request, clips['meta'])
results['object_list'] = clips['objects']
results['show_channel'] = show_channel
return results
class ClipDetailView(TemplateViewBase):
template_name = 'clip_detail.xml'
def get_context_data(self, **kwargs):
clip = utils.get_clip_detail(slug=kwargs['slug'])
if not clip:
raise Http404
results = super(ClipDetailView, self).get_context_data(**kwargs)
results['object'] = clip
return results
class EPGView(TemplateViewBase):
template_name = 'epg.html'
def get_context_data(self, **kwargs):
results = super(EPGView, self).get_context_data(**kwargs)
results['object'] = utils.get_channel_epgs(self.kwargs['slug'])
return results
class WatchView(TemplateViewBase):
template_name = 'watch.html'
def get_context_data(self, **kwargs):
kwargs = super(WatchView, self).get_context_data(**kwargs)
kwargs['object'] = APIClient(**settings.API_CLIENT).get_stream_url(
self.kwargs['content_type'], self.kwargs['slug'],
user_agent=utils.get_request_user_agent(self.request),
msisdn=utils.get_request_msisdn(self.request),
client_ip=utils.get_request_ip(self.request))
return kwargs
class HelpView(TemplateViewBase):
template_name = 'help.html'
class HandsetNotSupportedView(TemplateViewBase):
template_name = 'handset_not_supported.html'
def render_to_response(self, context, **response_kwargs):
response_kwargs['status'] = 412
return super(HandsetNotSupportedView, self).render_to_response(
context, **response_kwargs)
class ProfileView(FormView):
template_name = 'profile_form.html'
form_class = ProfileForm
success_url = '/'
def form_valid(self, form):
APIClient(**settings.API_CLIENT).post_profile(
msisdn=utils.get_request_msisdn(self.request),
client_ip=utils.get_request_ip(self.request),
data=form.get_json_data())
return super(ProfileView, self).form_valid(form)
class ProductView(FormView):
template_name = 'product_form.html'
form_class = ProductForm
success_url = '/'
def form_valid(self, form):
APIClient(**settings.API_CLIENT).post_transaction(
user_agent=utils.get_request_user_agent(self.request),
msisdn=utils.get_request_msisdn(self.request),
client_ip=utils.get_request_ip(self.request),
data=form.get_json_data())
return super(ProductView, self).form_valid(form)
class AccountView(TemplateViewBase):
template_name = 'account.html'
def get_context_data(self, **kwargs):
kwargs = super(AccountView, self).get_context_data(**kwargs)
kwargs['object'] = APIClient(**settings.API_CLIENT).get_account_info(
msisdn=utils.get_request_msisdn(self.request),
client_ip=utils.get_request_ip(self.request))
return kwargs
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,842 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/tests/test_client.py | import unittest
import threading
import SocketServer
import BaseHTTPServer
from nose.tools import assert_raises
from mtvc_client.client import APIClient, APIClientException
HOST_NAME = 'localhost'
PORT_NUMBER = 9999
RESP_SUBS_VALIDATION_ERROR = """{
"subscriberprofile": {
"subscriber": [
"Subscriber profile with this Subscriber already exists."
]
},
"error_message": "Validation error",
"error_code": "ERROR_VALIDATION"
}"""
class TestHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
pass
class TestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
"""
Returns fixed HTTP responses
"""
s.send_response(400)
s.send_header("Content-type", "text/json")
s.end_headers()
if s.path.lower().startswith('/api/v1/channel/1/play/'):
s.wfile.write(RESP_SUBS_VALIDATION_ERROR)
def log_message(self, *args, **kwargs):
# silencio!
pass
class TestClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.server = TestHTTPServer((HOST_NAME, PORT_NUMBER), TestHandler)
server_thread = threading.Thread(target=cls.server.serve_forever)
server_thread.daemon = True
server_thread.start()
cls.client = APIClient(
offering_id='test',
host='localhost',
username='test',
key='test',
port=9999)
@classmethod
def tearDownClass(cls):
cls.server.shutdown()
def test_01_subscriber_exists_error(self):
assert_raises(
APIClientException,
self.client.get_stream_url,
content_type='channel',
content_id=1,
user_agent='test',
msisdn='test',
client_ip='127.0.0.1',
)
try:
self.client.get_stream_url(
content_type='channel',
content_id=1,
user_agent='test',
msisdn='test',
client_ip='127.0.0.1',
)
except APIClientException, e:
assert hasattr(e, 'subscriberprofile')
assert 'subscriber' in e.subscriberprofile
assert len(e.subscriberprofile['subscriber']) == 1
assert e.subscriberprofile['subscriber'][0] == \
'Subscriber profile with this Subscriber already exists.'
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,843 | praekelt/mtvc-api-client | refs/heads/master | /mtvc_client/client.py | import logging
import hammock
from requests.auth import AuthBase
logger = logging.getLogger(__name__)
class APIClientException(Exception):
"""
Exception class that contains the error code and message from
the MTVC
"""
def __init__(self, error_code=None, error_message=None, **kwargs):
self.error_code = error_code
self.error_message = error_message
self.__dict__.update(kwargs)
def __str__(self):
return '[%(error_code)s] %(error_message)s' % (self.__dict__)
class APIClientAuthentication(AuthBase):
"""
Attaches Tastypie-style HTTP ApiKey Authentication to the given
Request object.
"""
def __init__(self, username, key):
self.username = username
self.key = key
def __call__(self, r):
r.headers['Authorization'] = 'ApiKey %s:%s' % (self.username, self.key)
return r
class APIClient(object):
def __init__(self, offering_id, host, username, key, port=80,
version='v1'):
self.api = hammock.Hammock(
'http://%s:%s/api/%s' % (host, port, version),
auth=APIClientAuthentication(username, key),
append_slash=True)
self.offering_id = offering_id
def from_json_response(self, response):
if response.status_code < 200 or response.status_code >= 300:
error_context = {
'status_code': response.status_code,
'status_reason': response.reason,
'error_code': response.status_code,
'error_message': response.reason,
'content': response.content,
}
try:
error_context.update(response.json())
except ValueError:
pass
logger.error('MTVC Server error %s: %s' % (
response.status_code, error_context))
raise APIClientException(**error_context)
try:
return response.json()
except ValueError:
# the server did not return JSON, so just return {}
return {}
def get_countries(self):
return self.from_json_response(self.api.country.GET())
def get_channels(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.channel.GET(params=params))
def get_shows(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.show.GET(params=params))
def get_showchannels(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.showchannel.GET(params=params))
def get_clips(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.clip.GET(params=params))
def get_clip(self, clip_id, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.clip(clip_id).GET(params=params))
def get_epg(self, channel_id, **kwargs):
params = {'days': 1}
params.update(kwargs)
return self.from_json_response(
self.api.channel(channel_id).GET(params=params))
def get_banners(self, **kwargs):
params = {'offering__slug': self.offering_id}
params.update(kwargs)
return self.from_json_response(
self.api.banner.GET(params=params))
def get_stream_url(
self, content_type, content_id, user_agent, msisdn, client_ip):
return self.from_json_response(
self.api(content_type)(content_id).play.GET(
params={'offering__slug': self.offering_id},
headers={
'User-Agent': user_agent,
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
}))
def get_account_info(self, msisdn, client_ip):
return self.from_json_response(self.api.subscriber(msisdn).GET())
def get_profile_schema(self):
return self.from_json_response(self.api.subscriberprofile.schema.GET(
params={'offering__slug': self.offering_id}))
def post_profile(self, msisdn, client_ip, data):
return self.from_json_response(self.api.subscriberprofile.POST(
headers={
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
'Content-Type': 'application/json'},
params={'offering__slug': self.offering_id},
data=data))
def get_transaction_schema(self):
return self.from_json_response(
self.api.subscribertransaction.schema.GET(
params={'offering__slug': self.offering_id}))
def post_transaction(self, user_agent, msisdn, client_ip, data):
return self.from_json_response(self.api.subscribertransaction.POST(
headers={
'User-Agent': user_agent,
'X-MSISDN': msisdn,
'X-FORWARDED-FOR': client_ip,
'Content-Type': 'application/json'},
params={'offering__slug': self.offering_id},
data=data))
| {"/mtvc_client/libs/django/utils.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/middleware.py": ["/mtvc_client/client.py"], "/mtvc_client/libs/django/views.py": ["/mtvc_client/client.py", "/mtvc_client/libs/django/forms.py", "/mtvc_client/libs/django/decorators.py"]} |
59,860 | ZihengZZH/christmas-hat | refs/heads/master | /camera.py | # camera.py
import math
import numpy as np
import cv2
import dlib
w_h_ratio = 0.6
hat_file = "./hats/02.png"
landmarks_file = "./database/shape_predictor_68_face_landmarks.dat"
class VideoCamera(object):
# Constructor
def __init__(self, visual=False):
self.video = cv2.VideoCapture(-1)
# loading dlib's Hog Based face detector
self.face_detector = dlib.get_frontal_face_detector()
# loading dlib's 68 points-shape-predictor
self.landmark_predictor = dlib.shape_predictor(landmarks_file)
self.hat = cv2.imread(hat_file, cv2.IMREAD_UNCHANGED)
self.visual = visual
self.angle_radian = 0
self.length = 0
self.position = None
def open_camera(self):
self.video.open(0)
# Using OpenCV to open web-camera
# Notice the index of camera
def close_camera(self):
self.video.release()
# Function for creating landmark coordinate list
def land2coords(self, landmarks, dtype="int"):
coords = np.zeros((68,2), dtype=dtype)
for i in range(0, 68):
coords[i] = (landmarks.part(i).x, landmarks.part(i).y)
return coords
# Function for overlaying the hat
def transparent_overlay(self, src, overlay, pos=(0, 0), scale=1):
"""
:param src: input color background image
:param overlay: transparent image (BGRA)
:param pos: position where the image to be built
:param scale: scale factor of transparent image
:return: resultant image
"""
height, width = src.shape[:2]
overlay = cv2.resize(overlay, (width, height), interpolation=cv2.INTER_CUBIC)
h, w, _ = overlay.shape # size of foreground
rows, cols, _ = src.shape # size of background
y, x = pos[0], pos[1] # position of overlay image
# loop over all pixels and apply the blending equation
for i in range(h):
for j in range(w):
if x+i >= rows or y+j >= cols:
continue
alpha = float(overlay[i][j][3]/255.0) # read the alpha channel
src[x+i][y+j] = alpha*overlay[i][j][:3]+(1-alpha)*src[x+i][y+j]
return src
# Function for rotating the image
def rotate_about_center(self, src, rangle, scale=1.):
"""
:param src: input color image
:param rangle: input angle in radian
:param scale: scale factor for rotation image
:return:
"""
w = src.shape[1]
h = src.shape[0]
angle = np.rad2deg(rangle)
# calculate new image width and height
nw = (abs(np.sin(rangle)*h) + abs(np.cos(rangle)*w)) * scale
nh = (abs(np.cos(rangle)*h) + abs(np.sin(rangle)*w)) * scale
# get the rotation matrix
rot_mat = cv2.getRotationMatrix2D((nw*0.5, nh*0.5), angle, scale)
# calculate the move from old center to new center
rot_move = np.dot(rot_mat, np.array([(nw-w)*0.5, (nh-h)*0.5, 0]))
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh)))), int(nw), int(nh)
# Main function for video stream
# Function for getting frame and returning to flask
def get_frame(self):
success, image = self.video.read()
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
self.hat = cv2.imread(hat_file, cv2.IMREAD_UNCHANGED)
# detect faces
face_boundaries = self.face_detector(image_gray, 0)
for (enum, face) in enumerate(face_boundaries):
x = face.left()
y = face.top()
w = face.right() - x
h = face.bottom() - y
# predict and draw landmarks
landmarks = self.landmark_predictor(image_gray, face)
# convert co-ordinates to NumPy array
landmarks = self.land2coords(landmarks)
if self.visual:
for (a, b) in landmarks:
cv2.circle(image, (a, b), 2, (0, 255, 0), -1)
self.get_pose(image, landmarks)
hat_rotate, nw, nh = self.rotate_about_center(self.hat, self.angle_radian)
image = self.add_hat(image, hat_rotate, nw, nh)
ret, jpg = cv2.imencode('.jpg', image)
return jpg.tobytes()
# Main function for single picture
# Function for saving modified picture
def get_pic(self, filename):
img = cv2.imread('./uploads/'+filename)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
self.hat = cv2.imread(hat_file, cv2.IMREAD_UNCHANGED)
# detect faces
face_boundaries = self.face_detector(img_gray, 0)
for (enum, face) in enumerate(face_boundaries):
x = face.left()
y = face.top()
w = face.right() - x
h = face.bottom() - y
# predict and draw landmarks
landmarks = self.landmark_predictor(img_gray, face)
# convert co-ordinates to NumPy array
landmarks = self.land2coords(landmarks)
if self.visual:
for (a, b) in landmarks:
cv2.circle(img, (a, b), 2, (0, 255, 0), -1)
self.get_pose(img, landmarks)
hat_rotate, nw, nh = self.rotate_about_center(self.hat, self.angle_radian)
img = self.add_hat(img, hat_rotate, nw, nh)
cv2.imwrite('./uploads/hat_' + filename, img)
return
def get_singal(self, filename):
image = cv2.imread(filename)
ret, jpg = cv2.imencode('.jpg', image)
return jpg.tobytes()
# Function for getting head pose / position for hat, and resizing the hat with length
def get_pose(self, frame, landmarks):
# 18th and 25th points
(a1, b1) = landmarks[18]
(a2, b2) = landmarks[25]
(a3, b3) = landmarks[38]
(a4, b4) = landmarks[43]
# 27th, 28th, 29th, 30th points
(x1, y1) = landmarks[27]
(x2, y2) = landmarks[28]
(x3, y3) = landmarks[29]
(x4, y4) = landmarks[30]
h, w = self.hat.shape[:2]
# due to blanks in hat image, hat rectangle may change a little bit
if self.visual:
cv2.line(frame, (a1, b1), (a2, b2), (255, 0, 0), 2)
# the blue line is where a hat will be placed
cv2.line(frame, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.line(frame, (x2, y2), (x3, y3), (255, 0, 0), 2)
cv2.line(frame, (x3, y3), (x4, y4), (255, 0, 0), 2)
cv2.circle(frame, (a1, b1), 4, (0, 0, 255))
self.angle_radian = np.arctan((x4-x1)/(y4-y1)) # NOTE THE ORIENTATION
self.length = math.sqrt(pow(a2-a1, 2)+pow(b2-b1, 2))
self.position = (a1, b1)
self.hat = cv2.resize(self.hat, (int(self.length), int(self.length*h/w)))
return
# Function for adding christmas hat
def add_hat(self, frame, hat, nw, nh):
h, w = frame.shape[:2]
(x, y) = self.position
x_left = int(x-nw*0.5*w_h_ratio)
x_right = int(x+nw*(1+1.5*w_h_ratio))
y_bottom = int(y-nh*w_h_ratio)
y_top = int(y-nh*(1+3*w_h_ratio))
if self.visual:
cv2.rectangle(frame, (x_left, y_top), (x_right, y_bottom), (255, 0, 0), 3)
if y_top > 0 and x_left > 0 and y_bottom < h and x_right < w:
result = self.transparent_overlay(frame[y_top:y_bottom, x_left:x_right], hat, (0, 0), 1)
frame[y_top:y_bottom, x_left:x_right] = result
return frame
| {"/main.py": ["/camera.py"]} |
59,861 | ZihengZZH/christmas-hat | refs/heads/master | /main.py | # main.py
import os
from flask import Flask, render_template, Response, request, url_for, send_from_directory
from werkzeug.utils import secure_filename
from camera import VideoCamera
htmlfile = open('./templates/index.html', 'r')
html = htmlfile.read()
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = './uploads'
video_camera = VideoCamera()
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
@app.route('/upload_file', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
pic = VideoCamera()
pic.get_pic(filename)
filename = 'hat_' + filename
file_url = url_for('uploaded_file', filename=filename)
# SOME BUGS HERE
return html + '<br><img src=' + file_url + '>'
return html
@app.route('/')
def index():
return render_template('index.html')
def gen(camera):
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-type: image/jpg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
video_camera.open_camera()
return Response(gen(video_camera),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/video_close')
def video_close():
video_camera.close_camera()
frame = video_camera.get_singal('./templates/signal.png')
return Response(frame, mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
# app.run(host='0,0,0,0', debug=True)
app.run()
| {"/main.py": ["/camera.py"]} |
59,864 | cxrodgers/STRF | refs/heads/master | /__init__.py | import io
from base import * | {"/__init__.py": ["/io.py", "/base.py"]} |
59,865 | cxrodgers/STRF | refs/heads/master | /io.py | """STRFlab-type format spec
Each stimulus is labeled with a string suffix, such as '0001'.
Each stimulus has 3 corresponding files:
Stimulus files, eg stim0001.wav
* A wav file
Spike files, eg spike0001
* Plain-text
* Each line of the file is the spike times, in seconds, from a single
trial. Each spike time is separated with a space.
* Each trial can start at time 0.0, or not. The presence of an
interval file indicates which of these is the case.
* The number of newline characters is exactly equal to the number of
trials.
Interval files, eg interval0001
* Plain text
* Each line of the file is the start time of the trial, then a space,
then the stop time of the trial, in seconds.
* Each trial can start at time 0.0, or not.
* The number of newline characters is exactly equal to the number of
trials.
"""
import os, re
import numpy as np
import kkpandas # could consider making this optional
import scipy.io
def read_timefreq_from_matfile(filename):
"""Returns (Pxx, freqs, t) for a given matfile
If Pxx cannot be loaded, raises error.
If freqs or t cannot be loaded, returns None for those.
"""
try:
res = scipy.io.loadmat(filename)
except IOError:
raise IOError("Cannot load matfile %s" % filename)
try:
Pxx = res['Pxx']
except KeyError:
raise IOError("Pxx not in matfile %s" % filename)
try:
t = res['t'].flatten()
except KeyError:
t = None
try:
freqs = res['freqs'].flatten()
except KeyError:
freqs = None
return Pxx, freqs, t
def load_waveform_from_matfile(filename, fs=None):
"""Load 'waveform' from matlab file. If possible, loads sampling rate.
If fs is None, look for a variable called 'fs' in the matfile.
If 'fs' is not in the matfile, returns whatever you pass as fs.
"""
res = scipy.io.loadmat(filename)
waveform = res['waveform'].flatten()
if 'fs' in res and fs is None:
fs = res['fs']
return waveform, fs
class STRFlabFileSchema:
"""Object encapsulating format spec for STRFlab-type files"""
stim_file_prefix = 'stim'
interval_file_prefix = 'interval'
spike_file_prefix = 'spike'
timefreq_file_prefix = 'timefreq'
stim_file_regex = r'stim(\d+)\.wav'
spike_file_regex = r'^spike(\d+)$'
interval_file_regex = r'^interval(\d+)$'
timefreq_file_regex = r'^timefreq(\d+)\.mat$'
def __init__(self, directory):
self.directory = os.path.abspath(directory)
self.name = os.path.split(self.directory)[1]
self.timefreq_path = None
self.spike_path = self.directory
self.populate()
def populate(self):
all_files = os.listdir(self.directory)
# Find files matching regexes
self.spike_file_labels = apply_and_filter_by_regex(
self.spike_file_regex, all_files, sort=True)
self.interval_file_labels = apply_and_filter_by_regex(
self.interval_file_regex, all_files, sort=True)
# Handle timefreq slightly differently
# Make this the generic loading syntax for all files
try:
timefreq_files = os.listdir(self.timefreq_path)
do_load = True
except (IOError, TypeError):
do_load = False
if do_load:
self.timefreq_file_labels = apply_and_filter_by_regex(
self.timefreq_file_regex, timefreq_files, sort=True)
self.timefreq_filenames = [os.path.join(self.timefreq_path,
self.timefreq_file_prefix + label)
for label in self.timefreq_file_labels]
# Reconstruct the filenames that match
self.spike_filenames = [self.spike_file_prefix + label
for label in self.spike_file_labels]
self.interval_filenames = [self.interval_file_prefix + label
for label in self.interval_file_labels]
self._force_reload = False
@property
def timefreq_filename(self):
return dict([(label, filename) for label, filename in
zip(self.timefreq_file_labels, self.timefreq_filenames)])
def apply_and_filter_by_regex(pattern, list_of_strings, sort=True):
"""Apply regex pattern to each string, return first hit from each match"""
res = []
for s in list_of_strings:
m = re.match(pattern, s)
if m is None:
continue
else:
res.append(m.groups()[0])
if sort:
return sorted(res)
else:
return res
def parse_space_sep(s, dtype=np.int):
"""Returns a list of integers from a space-separated string"""
s2 = s.strip()
if s2 == '':
return []
else:
return [dtype(ss) for ss in s2.split()]
def read_directory(directory, **folding_kwargs):
"""Return dict {suffix : folded} for all stimuli in the directory"""
sls = STRFlabFileSchema(directory)
assert np.all(
np.asarray(sls.spike_file_labels) ==
np.asarray(sls.interval_file_labels))
dfolded = {}
for suffix in sls.spike_file_labels:
dfolded[suffix] = read_single_stimulus(directory, suffix, **folding_kwargs)
return dfolded
def read_single_stimulus(directory, suffix, **folding_kwargs):
"""Read STRFlab-type text files for a single stimulus
TODO:
split this into smaller functions, to read just spikes or just intervals
eventually these should become properties in some Reader object
"""
# Load the spikes
spikes_filename = os.path.join(directory, 'spike' + suffix)
with file(spikes_filename) as fi:
lines = fi.readlines()
spike_times = [parse_space_sep(s, dtype=np.float)
for s in lines]
# Load the intervals
intervals_filenames = os.path.join(directory, 'interval' + suffix)
with file(intervals_filenames) as fi:
lines = fi.readlines()
intervals = np.asarray([parse_space_sep(s, dtype=np.float)
for s in lines])
starts, stops = intervals.T
# Create the folded
folded = kkpandas.Folded(spike_times, starts=starts, stops=stops,
**folding_kwargs)
return folded
def write_single_stimulus(suffix, spikes, starts=None, stops=None,
output_directory='.', time_formatter=None, write_intervals=True,
flush_directory=True):
"""Write out STRFlab-type text files for a single stimulus.
suffix : string labelling this stimulus
This will be appended to the filenames.
ie '0001' for spike0001
spikes : list of arrays of spike times, or kkpandas.Folded
These can be starting from zero for each trial, or not.
starts : array-like, start times of each trial
If None, use spikes.starts
TODO:
If None, use 0.0 for all trials. In this case you should probably
reference your spike times to the beginning of their trial.
stops : array-like, stop times of each trial
If None, use spikes.stops.
TODO:
If None, use 0.0 for all trials. Probably not too useful.
output_directory : where to write the files
time_formatter : function to apply to each time to turn it into a string
The default is a floating point formatter with 5 digits after the
decimal point.
write_intervals : if False, just write the spike files
flush_directory : erase everything in the directory before writing
The following error checking is done:
1) The lengths of `spikes`, `starts`, and `stops` should be the same
2) The spike times on each trial must fall in between the start time
and stop time of that trial.
TODO:
currently this is a strict < and > test. One of these should allow
equality, probably the start.
"""
# Set defaults
if starts is None and write_intervals:
starts = spikes.starts
if stops is None and write_intervals:
stops = spikes.stops
if time_formatter is None:
time_formatter = lambda v: '%.5f' % v
# Set filenames
spike_filename = os.path.join(output_directory, 'spike' + suffix)
interval_filename = os.path.join(output_directory, 'interval' + suffix)
# error check lengths
if write_intervals:
assert len(spikes) == len(starts), \
"Length of spikes must equal length of starts"
assert len(spikes) == len(stops), \
"Length of spikes must equal length of stops"
# error check ordering
for trial_start, trial_stop, trial_spikes in zip(
starts, stops, spikes):
assert np.all(np.asarray(trial_spikes) > trial_start), \
"Some spikes fall before trial start"
assert np.all(np.asarray(trial_spikes) < trial_stop), \
"Some spikes fall after trial stop"
# Set up output directory
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Write the spikes for each repetition
to_write = []
for trial_spikes in spikes:
to_write.append(' '.join(map(time_formatter, trial_spikes)))
with file(spike_filename, 'w') as fi:
fi.write("\n".join(to_write))
# Write the start and stop time of each repetition
if write_intervals:
to_write = []
for trial_start, trial_stop in zip(starts, stops):
to_write.append(time_formatter(trial_start) + ' ' +
time_formatter(trial_stop))
with file(interval_filename, 'w') as fi:
fi.write("\n".join(to_write))
| {"/__init__.py": ["/io.py", "/base.py"]} |
59,866 | cxrodgers/STRF | refs/heads/master | /base.py | import numpy as np
import io
from io import STRFlabFileSchema
import kkpandas, pandas
def metric(b_real, b_pred):
return np.sum((b_real - b_pred) ** 2, axis=0)
def iterboost_fast(A_train, b_train, A_test=None, b_test=None, niters=100,
step_size=None, return_metrics=False):
"""Iteratively find regularized solution to Ax = b using boosting.
Currently there is no stopping condition implemented. You will have to
determine the best stopping iteration offline.
A_train, b_train : training set
A_test, b_test : test set
niters : number of iterations to do
step_size : amount to update the coefficient by at each iteration
return_metrics : if True, return the calculated predictions on the
training and testing set at each iteration
"""
# Set step size
if step_size is None:
step_size = .01 * np.sqrt(b_train.var() / A_train.var()) # about 1e-4
# If a test matrix was provided, check the fit quality at each iteration
if A_test is None:
do_test = False
else:
do_test = True
# Start with a zero STRF
h_current = np.zeros((A_train.shape[1], 1))
# We need to precalculate as much as possible to save computation time
# at each iteration. We will precalculate the new predictions, in terms
# of the previous predictions and the update matrix. We take advantage
# of the fact that the effect of each update on the current predictions
# (and errors) is simply additive.
#
# It would be more stable (but much slower) to re-calculate these every
# time!
#
# Precalculate the update matrices
# Each column is the same shape as h_current
# There are two times as many columns as rows
# Each one is a plus or minus update of one coefficient
# So this is just two identity matrices put together, but multiplied by
# plus or minus step_size
update_matrix = np.concatenate([
step_size * np.eye(len(h_current)),
-step_size * np.eye(len(h_current))], axis=1)
# Determine the amount that predicted output will change by, given each
# possible coefficient update
bpred_update = np.dot(A_train, update_matrix)
# If necessary, precalculate the update on the prediction on the test set
# for each possible update
if do_test:
bpred_update_test = np.dot(A_test, update_matrix)
# Initialize the error signals
# Each column is the error between the current filter + all possible updates,
# and the true b.
# Since the current filter is zero, the first error is just bpred_update - b
errs = bpred_update - b_train
if do_test:
errs_test = bpred_update_test - b_test
# Variable to store results in
best_update_l, metric_train_l, metric_test_l, h_all = [], [], [], []
# Iterate
for niter in range(niters):
# Find the RSS for each possible update (sum of squared errors for
# each possible update).
# This is the inner loop but I do not see any way to optimize it!
rss = np.sum(errs**2, axis=0)
# Find the update that minimizes the RSS and store
best_update = np.argmin(rss)
metric_train_l.append(rss[best_update])
# Update the errors by including the newest coefficient change
errs += bpred_update[:, best_update][:, None]
# Store performance on test set
if do_test:
metric_test_l.append(np.sum(errs_test[:, best_update]**2))
# Update errors on test set by including the effect of the newest
# update.
if do_test:
errs_test += bpred_update_test[:, best_update][:, None]
# Update h with the new best update
best_update_l.append(best_update)
h_current += update_matrix[:, best_update][:, None]
h_all.append(h_current.copy())
if return_metrics:
return h_all, metric_test_l, metric_train_l
else:
return h_all
def iterboost_slow(A_train, b_train, A_test, b_test, niters=100, step_size=None):
"""Simple version of boost. Needs to be fixed, should be easier to debug"""
best_update_l, metric_train_l, metric_test_l, h_all = [], [], [], []
for niter in range(50):
# Current predictions on training_set
b_train_predictions = np.dot(A_train, h_current + update_matrix)
metric_current_predictions = metric(b_train, b_train_predictions)
# Choose the best
best_update = np.argmin(metric_current_predictions)
myutils.printnow(best_update)
best_update_l.append(best_update)
h_updated = h_current + update_matrix[:, best_update][:, None]
b_train_predictions = b_train_predictions[:, best_update][:, None]
# Find error on training set
metric_train = metric_current_predictions[best_update]
metric_train_l.append(metric_train)
# Find error on test set
metric_test = metric(b_test, np.dot(A_test, h_updated))[0]
metric_test_l.append(metric_test)
# Update h
h_current = h_updated
h_all.append(h_updated)
# Stopping condition
#~ if (
#~ (best_update + len(h_current) in best_update_l) or
#~ (best_update - len(h_current) in best_update_l)):
#~ break
# Define the algorithms
def fit_lstsq(A, b):
return np.linalg.lstsq(A, b)[0]
def fit_ridge_by_SVD(A, b, alpha=None, precomputed_SVD=None, check_SVD=False,
normalize_alpha=True, keep_dimensions_up_to=None):
"""Calculate the regularized solution to Ax = b.
A, b : input and output matrices
alpha : ridge parameter to apply to the singular values
precomputed_SVD : tuple of u, s, vh for A
normalize_alpha : if True, normalize alpha to be roughly equivalent to
the size of the singular values
keep_dimensions_up_to : set all singular values to zero beyond the Nth,
where the first N singular values account for this much of the variance
"""
# Calculate the SVD of A
if precomputed_SVD is None:
# Find SVD such that u * np.diag(s) * vh = A
print "doing SVD"
u, s, vh = np.linalg.svd(A, full_matrices=False)
else:
# If the SVD was provided, optionally check that it is correct
u, s, vh = precomputed_SVD
if check_SVD:
# This implements u * diag(s) * vh in matrix multiplication
check = reduce(np.dot, [u, np.diag(s), vh])
if not np.allclose(check, A):
raise ValueError("A does not agree with SVD")
# optionally normalize alpha
if normalize_alpha:
alpha = np.sqrt(alpha) * len(vh)
# Project the output onto U. ub = u.T * b
ub = np.dot(u.T, b)
# Optionally set some singular values to zero
if keep_dimensions_up_to is not None:
if (keep_dimensions_up_to < 0) or (keep_dimensions_up_to > 1):
raise ValueError("keep_dimensions_up_to must be between 0 and 1")
s = s.copy()
# Find the index for the first singular value beyond the amount required
sidx = np.where(
(np.cumsum(s**2) / (s**2).sum()) >= keep_dimensions_up_to)[0][0]
# Set the values beyond this to zero
if sidx + 1 < len(s):
s[sidx + 1:] = 0
# (pseudo-)invert s to get d
# Note that this explodes dimensions with low variance (small s)
# so we can replace diag(1/s) with diag(s/(s**2 + alpha**2))
# d and s are one-dimensional, so this is element-wise
d = s / (s**2 + alpha**2)
# Account for 0/0
if keep_dimensions_up_to is not None:
d[sidx + 1:] = 0
# solve for w = ub * diag(1/s)
w = ub * d[:, None]
# Project w onto vh to find the solution
x_svdfit = np.dot(vh.T, w)
return x_svdfit
def fit_ridge(A, b, alpha=None, ATA=None, normalize_alpha=True):
# Calculate ATA if necessary
if ATA is None:
ATA = np.dot(A.T, A)
if alpha is None:
# No ridge, just pinv
to_invert = ATA
else:
if normalize_alpha:
alpha = alpha * (len(ATA) ** 2)
# Ridge at alpha
to_invert = ATA + alpha * np.eye(len(ATA))
return reduce(np.dot, (np.linalg.inv(to_invert), A.T, b))
def fit_STA(A, b):
return np.dot(A.T, b) / b.sum()
def check_fit(A, b, X_fit=None, b_pred=None, scale_to_fit=False):
if b_pred is None:
b_pred = np.dot(A, X_fit)
if scale_to_fit:
b_pred = b_pred * np.sqrt(b.var() / b_pred.var())
err = b - b_pred
# Calculate xcorr
bx = ((b - b.mean()) / b.std()).flatten()
b_predx = ((b_pred - b_pred.mean()) / b_pred.std()).flatten()
xcorr = np.inner(bx, b_predx) / float(len(bx))
return err.var(), err.mean(), b_pred.var(), err.var() / b.var(), xcorr
def jackknife_with_params(A, b, params, n_jacks=5, keep_fits=False, warn=True,
kwargs_by_jack=None, meth_returns_list=False, **kwargs):
"""Solve Ax=b with jack-knifing over specified method.
The data will be split into training and testing sets, in `n_jacks`
different ways. For each set, each analysis in `params` will be run.
Fit metrics are calculated and returned for each run.
Parameters
----------
A : M, N
b : N, 1
params : DataFrame specifying analyses to run
Has the following columns:
name : name of the analysis
meth : handle to function to call for analysis
kwargs : dict of keyword arguments to pass on that analysis
keep_fits : if False, do not return anything for fits
warn : if True, check that A and b are zero-meaned
meth_returns_list : If None, assumes that `meth` returns a
single fit.
Otherwise, this argument should be a list of strings, and `meth`
should return a list of fits that is the same length.
Each string in meth_returns_list will be appended to `name` and
attached to the corresponding fit.
Example: meth_returns_list=('step1', 'step2', 'step3')
for an iterative method returning all of its fits.
kwargs_by_jack : list of length n_jacks
**kwargs_by_jack[n_jack] is passed to `meth` on that jack.
Any remaining keyword arguments are passed on every call to `meth`.
Returns: jk_metrics, jk_fits
jk_metrics : DataFrame, length n_jacks * len(params)
Has the following columns:
name : name of the analysis, from params.name
n_jack : jack-knife number, ranges from 0 to n_jacks
evar : variance of the error
ebias : bias of the error
predvar : variance of the prediction
eratio : variance of error / variance of true result
xcorr : cross correlation between prediction and result
evar_cv, ebias_cv, predvar_cv, eratio_cv, xcorr_cv :
Same as above but on the testing set.
jk_fits : list of length n_jacks * len(params)
The calculated fit ('x') for each analysis
"""
# warn
if warn:
if not np.allclose(0, A.mean(axis=0)):
print "warning: A is not zero meaned"
if not np.allclose(0, b.mean(axis=0)):
print "warning: b is not zero meaned"
# set up the jackknife
jk_len = len(A) / n_jacks
jk_starts = np.arange(0, len(A) - jk_len + 1, jk_len)
# where to put results
results = []
# Jack the knifes
for n_jack, jk_start in enumerate(jk_starts):
# Set up test and train sets
jk_idxs = np.arange(jk_start, jk_start + jk_len)
jk_mask = np.zeros(len(A), dtype=np.bool)
jk_mask[jk_idxs] = 1
A_test, A_train = A[jk_mask], A[~jk_mask]
b_test, b_train = b[jk_mask], b[~jk_mask]
# Iterate over analyses to do
for na, (name, meth, run_kwargs) in params.iterrows():
# Add the universal kwargs to the run kwargs
dkwargs = run_kwargs.copy()
if kwargs_by_jack is not None:
dkwargs.update(kwargs_by_jack[n_jack])
dkwargs.update(kwargs)
# Call the fit
if meth_returns_list is not None:
X_fit_l = meth(A_train, b_train, **dkwargs)
assert len(meth_returns_list) == len(X_fit_l)
for xxfit, xxfitname in zip(X_fit_l, meth_returns_list):
# Get the metrics
train_metrics = check_fit(A_train, b_train, xxfit)
test_metrics = check_fit(A_test, b_test, xxfit)
# Append the results
results.append([name+str(xxfitname), n_jack, xxfit] +
list(train_metrics) + list(test_metrics))
else:
X_fit = meth(A_train, b_train, **dkwargs)
# Get the metrics
train_metrics = check_fit(A_train, b_train, X_fit)
test_metrics = check_fit(A_test, b_test, X_fit)
# Append the results
results.append([name, n_jack, X_fit] +
list(train_metrics) + list(test_metrics))
# Form data frames to return
metrics = pandas.DataFrame(results,
columns=['name', 'n_jack', 'fit',
'evar', 'ebias', 'predvar', 'eratio', 'xcorr',
'evar_cv', 'ebias_cv', 'predvar_cv', 'eratio_cv', 'xcorr_cv'])
metrics = metrics.sort(['name', 'n_jack'])
fits = metrics.pop('fit')
return metrics, fits
def jackknife_over_alpha(A, b, alphas, n_jacks=5, meth=fit_ridge,
keep_fits=False, warn=True):
# warn
if warn:
if not np.allclose(0, A.mean(axis=0)):
print "warning: A is not zero meaned"
if not np.allclose(0, b.mean(axis=0)):
print "warning: b is not zero meaned"
# Set up the analyses
analyses = [
['ridge%010.05f' % alpha, meth, {'alpha': alpha}]
for alpha in alphas]
# set up the jackknife
jk_len = len(A) / n_jacks
jk_starts = np.arange(0, len(A) - jk_len + 1, jk_len)
# set up jk_results
jk_metrics = []
jk_fits = []
# Jack the knifes
for n, jk_start in enumerate(jk_starts):
# Set up test and train sets
jk_idxs = np.arange(jk_start, jk_start + jk_len)
jk_mask = np.zeros(len(A), dtype=np.bool)
jk_mask[jk_idxs] = 1
A_test, A_train = A[jk_mask], A[~jk_mask]
b_test, b_train = b[jk_mask], b[~jk_mask]
# Run the analyses
results = []
for name, meth, kwargs in analyses:
X_fit = meth(A_train, b_train, **kwargs)
evar, ebias, predvar, eratio, xcorr = check_fit(
A_train, b_train, X_fit)
evarsc, ebiassc, predvarsc, eratiosc, xcorrsc = check_fit(
A_test, b_test, X_fit)
results.append((name, X_fit, evar, ebias, predvar, eratio, xcorr,
evarsc, ebiassc, predvarsc, eratiosc, xcorrsc))
# DataFrame it
metrics = pandas.DataFrame(results,
columns=['name', 'fit', 'evar', 'ebias', 'predvar', 'eratio', 'xcorr',
'evarsc', 'ebiassc', 'predvarsc', 'eratiosc', 'xcorrsc'])
metrics = metrics.set_index('name')
fits = metrics.pop('fit')
#~ # Make predictions
#~ preds = pandas.Series([np.dot(A_test, fit) for fit in fits],
#~ index=fits.index)
# Store
jk_metrics.append(metrics)
if keep_fits:
jk_fits.append(fits)
#~ jk_preds.append(preds)
return jk_metrics, jk_fits
def allclose_2d(a):
"""Returns True if entries in `a` are close to equal.
a : iterable, convertable to array
If the entries are arrays of different size, returns False.
If len(a) == 1, returns True.
"""
# Return true if only one
if len(a) == 1:
return True
# Return false if different lengths
if len(np.unique(map(len, a))) > 1:
return False
# Otherwise use allclose
a = np.asarray(a)
if a.ndim == 0:
raise ValueError("input to allclose_2d cannot be 0d")
return np.all([np.allclose(a[0], aa) for aa in a[1:]])
def concatenate_and_reshape_timefreq(timefreq_list, n_delays,
blanking_value=-np.inf):
"""Concatenate and reshape spectrograms for STRF estimation.
Returned value is one large spectogram of shape (n_freqs*n_delays,
total_time). Each column contains the current value and the previous
n_delays.
Arugments:
timefreq_list : list of 2d arrays
Each array has shape (n_freqs, n_timepoints)
n_delays : int
Number of blank timepoints to insert between spectrograms.
blanking_value : float
What value to insert at the blank timepoints.
Puts (n_freqs, n_delays) blanking_values before each stimulus.
Then reshapes each column to include delays:
column_n = concatenated_specgram[:, n:n-n_delays:-1].flatten()
The original slice corresponding to column n:
concatenated_specgram[:, n:n-n_delays:-1]
can be recovered as:
reshaped_specgram[:, n].reshape(specgram.shape[0], n_delays)
There are n_delays blanks in between each stimuli, but the total length
of the returned value is the sum of the total length of the provided
stimuli because those blanks are folded into the features. That is, the
first entry contains the first time sample and the rest blanks; and
the last sample contains the last n_delays samples.
"""
# put blanks in front of each stimulus and concatenate
concatenated_specgram_list = []
for specgram in timefreq_list:
# first prepend blanks
specgram_with_prepended_blanks = np.concatenate([
blanking_value * np.ones((specgram.shape[0], n_delays)),
specgram], axis=1)
# now reshape and include delays in each feature
reshaped_specgram_list = []
for n in range(n_delays, specgram_with_prepended_blanks.shape[1]):
reshaped_specgram_list.append(
specgram_with_prepended_blanks[:, n:n-n_delays:-1].flatten())
reshaped_specgram = np.transpose(np.array(reshaped_specgram_list))
concatenated_specgram_list.append(reshaped_specgram)
concatenated_specgram = np.concatenate(concatenated_specgram_list, axis=1)
return concatenated_specgram
class Experiment:
"""Object encapsulating STRF estimation for a specific dataset"""
def __init__(self, path=None, file_schema=None, timefreq_path=None, **file_schema_kwargs):
"""Create a new object to estimate a STRF from a dataset.
There are many computation steps which must be done in order.
Here is a full pipeline illustrating its use:
# Get the files
expt_path = expt_path_l[0]
expt = STRF.base.Experiment(expt_path)
expt.file_schema.timefreq_path = timefreq_dir
expt.file_schema.populate()
# Load the timefreq and concatenate
expt.read_all_timefreq()
expt.compute_full_stimulus_matrix()
# Load responses and bin
expt.read_all_responses()
expt.compute_binned_responses()
# Grab the stimulus and responses
fsm = expt.compute_full_stimulus_matrix()
frm = expt.compute_full_response_matrix()
"""
# Location of data
self.path = path
if file_schema is None:
self.file_schema = STRFlabFileSchema(self.path, **file_schema_kwargs)
# Hack to make it load the timefreq files
self.file_schema.timefreq_path = timefreq_path
self.file_schema.populate()
# How to read timefreq files
self.timefreq_file_reader = io.read_timefreq_from_matfile
def read_timefreq(self, label):
filename = self.file_schema.timefreq_filename[label]
return self.timefreq_file_reader(filename)
def read_all_timefreq(self, store_intermediates=True):
"""Read timefreq from disk. Store and return.
Reads all timefreq from self.file_schema.
Each consists of Pxx, freqs, t.
If the freqs is the same for all, then stores in self.freqs.
Otherwise, self.freqs is None.
Same for t.
Returns:
List of Pxx, list of freqs, list of t
"""
# Load all
Pxx_l, freqs_l, t_l = zip(*[self.read_timefreq(label)
for label in self.file_schema.timefreq_file_labels])
# Optionally store
if store_intermediates:
self.timefreq_list = Pxx_l
self.freqs_l = freqs_l
self.t_l = t_l
# Test for freqs consistency
self.freqs = None
if allclose_2d(freqs_l):
self.freqs = np.mean(freqs_l, axis=0)
# Test for t consistency
self.t = None
if allclose_2d(t_l):
self.t = np.mean(t_l, axis=0)
return Pxx_l, freqs_l, t_l
def read_response(self, label):
folded = io.read_single_stimulus(self.file_schema.spike_path, label)
return folded
def read_all_responses(self):
"""Reads all response files and stores in self.response_l"""
# Read in all spikes, recentering
dfolded = io.read_directory(self.file_schema.spike_path,
subtract_off_center=True)
# Order by label
response_l = []
for label in self.file_schema.spike_file_labels:
response_l.append(dfolded[label])
self.response_l = response_l
return response_l
def compute_binned_responses(self, dilation_before_binning=.99663):
"""Bins the stored responses in the same way as the stimuli.
The bins are inferred from the binwidth of the timefreq, as stored
in self.t_l, independently for each stimulus.
Optionally, a dilation is applied to these bins to convert them into
the neural timebase.
Finally, the values in self.response_l are binned and stored in
self.binned_response_l
I also store self.trials_l to identify how many repetitions of each
timepoint occurred.
"""
self.binned_response_l = []
self.trials_l = []
# Iterate over stimuli
for folded, t_stim in zip(self.response_l, self.t_l):
# Get bins from t_stim by recovering original edges
t_stim_width = np.mean(np.diff(t_stim))
edges = np.linspace(0, len(t_stim) * t_stim_width, len(t_stim) + 1)
# Optionally apply a kkpandas dilation
# Spike times are always shorter than behavior times
edges = edges * dilation_before_binning
# Bin each, using the same number of bins as in t
binned = kkpandas.Binned.from_folded(folded, bins=edges)
# Save the results
self.binned_response_l.append(binned.rate.values.flatten())
self.trials_l.append(binned.trials.values.flatten())
def compute_concatenated_stimuli(self):
"""Returns concatenated spectrograms as (N_freqs, N_timepoints).
This is really only for visualization, not computation, because
it doesn't include the delays.
"""
return np.concatenate(self.timefreq_list, axis=1)
def compute_concatenated_responses(self):
"""Returns a 1d array of concatenated binned responses"""
return np.concatenate(self.binned_response_l)
def compute_full_stimulus_matrix(self, n_delays=3, timefreq_list=None,
blanking_value=-np.inf):
"""Given a list of spectrograms, returns the full stimulus matrix.
See concatenate_and_reshape_timefreq for the implementation details.
This function actually returns a transposed version, more suitable
for fitting. The shape is (N_timepoints, N_freqs * N_delays),
ie, (N_constraints, N_inputs)
"""
# Determine what list to operate on
if timefreq_list is None:
timefreq_list = self.timefreq_list
if timefreq_list is None:
timefreq_list = self.read_all_timefreq()[0]
if timefreq_list is None:
raise ValueError("cannot determine timefreq lists")
# Concatenate and reshape
self.full_stimulus_matrix = concatenate_and_reshape_timefreq(
timefreq_list,
n_delays=n_delays, blanking_value=blanking_value).T
# Write out
return self.full_stimulus_matrix
def compute_full_response_matrix(self):
"""Returns a response matrix, suitable for fitting.
Returned array has shape (N_timepoints, 1)
"""
self.full_response_matrix = \
self.compute_concatenated_responses()[:, None]
return self.full_response_matrix
def clean_up_stimulus(whole_stimulus, silence_value='min_row', z_score=True):
"""Replaces non-finite values with silence_value, and z-scores by row.
silence_value == 'min_row' : minimum value in the row
silence_value == 'min_whole' : minimum value in the whole stimulus
silence_value == 'mean_row', 'mean_whole' : mean
silence_value == 'median_row', 'median_whole' : median
You might want to check a histogram of the returned values and pick what
looks best. On the one hand, silence is best represented by minimal
power. On the other hand, this distorts the histogram due to silence
in the actual signals, and/or the blanking periods. It also means that
this silence will play a part in the linear fit.
Most models treat the rows independently, so if you set the silence
row-independently, it will mesh nicely. On the other hand, why should
silence in one frequency band be treated differently from others?
"""
#~ if np.any(np.isnan(whole_stimulus)):
#~ print "warning: what to do with NaNs?"
if silence_value is 'min_row':
cleaned_stimulus_a = whole_stimulus.copy()
for n in range(len(cleaned_stimulus_a)):
msk = np.isneginf(cleaned_stimulus_a[n])
cleaned_stimulus_a[n, msk] = cleaned_stimulus_a[n, ~msk].min()
elif silence_value is 'mean_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = row[~np.isneginf(row)].mean()
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'median_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = np.median(row[~np.isneginf(row)])
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'min_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.min(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'mean_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.mean(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'median_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.median(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
else:
# blindly assign 'silence_value' to the munged values
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = silence_value
if z_score:
for n in range(cleaned_stimulus_a.shape[0]):
s = np.std(cleaned_stimulus_a[n, :])
if s < 10**-6: print "warning, std too small"
cleaned_stimulus_a[n, :] = (
cleaned_stimulus_a[n, :] - cleaned_stimulus_a[n, :].mean()) / \
np.std(cleaned_stimulus_a[n, :])
return cleaned_stimulus_a
class RidgeFitter:
"""Like DirectFitter but operates on Experiment objects
Gets the full matrices from it, cleans them if necessary
Stores results
"""
def __init__(self, expt=None):
self.expt = expt
def fit(self):
#X = self.expt
pass
class DirectFitter:
"""Calculates STRF for response matrix and stimulus matrix"""
def __init__(self, X=None, Y=None):
"""New fitter
X : (n_timepoints, n_features).
I think this works better if mean of rows and cols is zero.
Y : (n_timepoints, 1)
Generally 0s and 1s.
"""
self.X = X
self.Y = Y
self.XTX = None # pre-calculate this one
if self.X.shape[0] != self.Y.shape[0]:
raise ValueError("n_timepoints (dim0) is not the same!")
if self.X.shape[1] > self.X.shape[1]:
print "warning: more features than timepoints, possibly transposed"
def STA(self):
"""Returns spike-triggered average of stimulus X and response Y.
Therefore the STA is given by np.dot(X.transpose(), Y) / Y.sum().
"""
return np.dot(self.X.transpose(), self.Y).astype(np.float) / \
self.Y.sum()
#X = X - X.mean(axis=0)[newaxis, :] # each feature has zero-mean over time
#X = X - X.mean(axis=1)[:, newaxis] # each datapoint has zero-mean over features
def whitened_STA(self, ridge_parameter=0.):
if self.XTX is None:
self.XTX = np.dot(self.X.transpose(), self.X)
ridge_mat = ridge_parameter * len(self.XTX)**2 * np.eye(len(self.XTX))
STA = self.STA()
return np.dot(np.linalg.inv(self.XTX + ridge_mat), STA)*self.X.shape[0]
| {"/__init__.py": ["/io.py", "/base.py"]} |
59,867 | cxrodgers/STRF | refs/heads/master | /STRF_tools.py | import numpy as np
import glob
import os.path
import matplotlib.mlab as mlab
import wave
import struct
import matplotlib.pyplot as plt
from myutils import Spectrogrammer
import myutils
def my_imshow(C, x=None, y=None, ax=None):
if x is None:
x = range(C.shape[1])
if y is None:
y = range(C.shape[0])
extent = x[0], x[-1], y[0], y[-1]
if ax is None:
f = plt.figure()
ax = f.add_subplot(111)
plt.imshow(np.flipud(C), interpolation='nearest', extent=extent)
ax.axis('auto')
plt.show()
def clean_up_stimulus(whole_stimulus, silence_value='min_row', z_score=True):
"""Replaces non-finite values with silence_value, and z-scores by row.
silence_value == 'min_row' : minimum value in the row
silence_value == 'min_whole' : minimum value in the whole stimulus
silence_value == 'mean_row', 'mean_whole' : mean
silence_value == 'median_row', 'median_whole' : median
You might want to check a histogram of the returned values and pick what
looks best. On the one hand, silence is best represented by minimal
power. On the other hand, this distorts the histogram due to silence
in the actual signals, and/or the blanking periods.
"""
if np.any(np.isnan(whole_stimulus)):
print "warning: what to do with NaNs?"
if silence_value is 'min_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = row[~np.isneginf(row)].min()
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'mean_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = row[~np.isneginf(row)].mean()
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'median_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = np.median(row[~np.isneginf(row)])
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'min_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.min(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'mean_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.mean(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'median_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.median(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
else:
# blindly assign 'silence_value' to the munged values
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = silence_value
if z_score:
for n in range(cleaned_stimulus_a.shape[0]):
s = np.std(cleaned_stimulus_a[n, :])
if s < 10**-6: print "warning, std too small"
cleaned_stimulus_a[n, :] = (
cleaned_stimulus_a[n, :] - cleaned_stimulus_a[n, :].mean()) / \
np.std(cleaned_stimulus_a[n, :])
return cleaned_stimulus_a
class STRF_experiment:
"""Class that holds links to stimulus and response files"""
stim_file_label = 'stim'
spike_file_label = 'spike'
stim_file_regex = r'stim(\d+)\.wav'
spike_file_regex = r'spike(\d+)'
def __init__(self, stim_dir=None, spike_dir=None, waveform_transformer=None,
waveform_loader=None):
"""Initialize object to hold stimulus and response data.
stim_dir : directory holding files of name 'stim(\d+).wav', one
wave file per trial, sampled at 200KHz.
spike_dir : directory holding plaintext files of name 'spike(\d+)',
one per trial, consisting of spike times separated by spaces
in ms all on a single line, aligned to time zero the start of
the corresponding wave file.
waveform_transformer : defaults to Spectrogrammer()
waveform_loader : defaults to self.load_waveform_from_wave_file
But you can set this to be something else if you do not have
wave files. It needs to be a function taking a filename as
argument and return (waveform, fs)
"""
self.stim_dir = stim_dir
self.spike_dir = spike_dir
self.waveform_transformer = None
self.error_check_filenames = True
# list of files, to be set later automatically or by user
self.wave_file_list = None
self.spike_file_list = None
# Waveform loaders and transformer objects
if waveform_transformer is None:
# Use a Spectrogrammer with reasonable default parameters
self.waveform_transformer = Spectrogrammer()
else:
self.waveform_transformer = waveform_transformer
if waveform_loader is None:
self.waveform_loader = self.load_waveform_from_wave_file
else:
self.waveform_loader = waveform_loader
def transform_all_stimuli(self, assert_sampling_rate=None, truncate=None):
"""Calculates spectrograms of each stimulus
First loads using self.waveform_loader. Then transforms using
self.waveform_transformer.
Finally stores in self.t_list, self.freqs_list, and self.specgm_list
If the time base is consistent, will also assign self.t and self.freqs
to be the unique value for all stimuli. Otherwise these are left as
None.
assert_sampling_rate : if not None, assert that I got this sampling
rate when loading the waveform
truncate : Drop all data after this time in seconds for all stimuli
Saves in attributes `specgm_list`, `t_list`, and `freqs_list`.
Also attempts to store unique `t` and `freq` for all.
Returns spectrograms in (n_freqs, n_timepoints) shape
"""
# Get list of files to transform
self._set_list_of_files()
# where data goes
self.specgm_list = []
self.t_list = []
self.freqs_list = []
self.t = None
self.freqs = None
# load and transform each file
for wave_file in self.wave_file_list:
waveform, fs = self.waveform_loader(wave_file)
if assert_sampling_rate:
assert fs == assert_sampling_rate
specgm, freqs, t = self.waveform_transformer.transform(waveform)
if truncate:
inds = (t > truncate)
t = t[~inds]
specgm = specgm[:, ~inds]
self.specgm_list.append(specgm)
self.t_list.append(t)
self.freqs_list.append(freqs)
# Store unique values of t and freqs (if possible)
if len(self.t_list) > 0 and np.all(
[np.all(tt == self.t_list[0]) for tt in self.t_list]):
self.t = self.t_list[0]
if len(self.freqs_list) > 0 and np.all(
[ff == self.freqs_list[0] for ff in self.freqs_list]):
self.freqs = self.freqs_list[0]
def load_waveform_from_wave_file(self, filename, dtype=np.float):
"""Opens wave file and reads, assuming signed shorts"""
wr = wave.Wave_read(filename)
fs = wr.getframerate()
sig = np.array(struct.unpack('%dh' % wr.getnframes(),
wr.readframes(wr.getnframes())), dtype=dtype)
wr.close()
return sig, fs
def _set_list_of_files(self):
"""Reads stimulus and response filenames from disk, if necessary.
If the attributes are already set, do not reload from disk (so
you can overload this behavior).
In any case, error check that the lists are the same length and end
with the same sequence of digits, eg [spike003, spike007] and
[stim003, stim007].
"""
if self.wave_file_list is None:
# Find sorted list of wave files
self.wave_file_list = sorted(glob.glob(os.path.join(self.stim_dir,
self.stim_file_label + '*.wav')))
if self.spike_file_list is None:
# Find sorted list of spike files
self.spike_file_list = sorted(glob.glob(os.path.join(self.spike_dir,
self.spike_file_label + '*')))
# Error checking
if self.error_check_filenames:
assert len(self.spike_file_list) == len(self.wave_file_list)
for wave_file, spike_file in zip(self.wave_file_list,
self.spike_file_list):
# extract numbers on end of wave and spike files
wave_num = glob.re.search(self.stim_file_regex,
wave_file).groups()[0]
spike_num = glob.re.search(self.spike_file_regex,
spike_file).groups()[0]
# test string equality (3 != 003)
assert wave_num == spike_num
def get_full_stimulus_matrix(self, n_delays, blanking_value=-np.inf):
"""Concatenate and reshape spectrograms for STRF estimation.
Returned value is one large spectogram of shape (n_freqs*n_delays,
total_time). Each column contains the current value and the previous
n_delays.
Puts (n_freqs, n_delays) blanking_values before each stimulus.
Then reshapes each column to include delays:
column_n = concatenated_specgram[:, n:n-n_delays:-1].flatten()
The original slice corresponding to column n:
concatenated_specgram[:, n:n-n_delays:-1]
can be recovered as:
reshaped_specgram[:, n].reshape(specgram.shape[0], n_delays)
There are n_delays blanks in between each stimuli, but the total length
of the returned value is the sum of the total length of the provided
stimuli because those blanks are folded into the features. That is, the
first entry contains the first time sample and the rest blanks; and
the last sample contains the last n_delays samples.
"""
if len(self.specgm_list) == 0:
print "nothing to concatenate, have you run transform_all_stimuli?"
return
# put blanks in front of each stimulus and concatenate
concatenated_specgram_list = []
for specgram in self.specgm_list:
# first prepend blanks
specgram_with_prepended_blanks = np.concatenate([
blanking_value * np.ones((specgram.shape[0], n_delays)),
specgram], axis=1)
# now reshape and include delays in each feature
reshaped_specgram_list = []
for n in range(n_delays, specgram_with_prepended_blanks.shape[1]):
reshaped_specgram_list.append(
specgram_with_prepended_blanks[:, n:n-n_delays:-1].flatten())
reshaped_specgram = np.transpose(np.array(reshaped_specgram_list))
concatenated_specgram_list.append(reshaped_specgram)
concatenated_specgram = np.concatenate(concatenated_specgram_list, axis=1)
return concatenated_specgram
def get_concatenated_stimulus_matrix(self):
"""Returns a concatenated (non-reshaped) matrix of stimuli."""
return np.concatenate(self.specgm_list, axis=1)
def get_concatenated_response_matrix(self, dtype=np.float,
sampling_rate=1000., truncate=None):
"""Loads spike files from disk, returns concatenated responses.
You must run transform_all_stimuli first, or otherwise set self.t_list,
so that I know how to bin the spikes.
truncate : if a value, throw away all spikes greater than thi
if None, throw away all spikes beyond the end of the stimulus
for this response
Returns in shape (1, N_timepoints)
"""
# Set list of filenames and error check
self._set_list_of_files()
# load each one and histogram
concatenated_psths = []
for respfile, bin_centers in zip(self.spike_file_list, self.t_list):
# store responses
#~ try:
#~ # flatten() handles the case of only one value
#~ st = np.loadtxt(respfile).flatten()
#~ except IOError:
#~ # this handles the case of no data
#~ st = np.array([])
#~ st = st / 1000.0
s = file(respfile).readlines()
st = []
for line in s:
tmp = myutils.parse_space_sep(line, dtype=np.float)
tmp = np.asarray(tmp) / sampling_rate
if truncate:
tmp = tmp[tmp <= truncate]
else:
tmp = tmp[tmp <= bin_centers.max()]
st.append(tmp)
# convert bin centers to bin edges
bin_edges = bin_centers[:-1] + 0.5 * np.diff(bin_centers)
bin_edges = np.concatenate([[-np.inf], bin_edges, [np.inf]])
# now histogram
counts = []
for line in st:
counts.append(np.histogram(line, bin_edges)[0])
counts = np.mean(counts, axis=0)
# Append to growing list and check that size matches up trial-by-trial
concatenated_psths.append(counts)
assert len(counts) == len(bin_centers)
# Return a concatenated array of response from this recording
self.psth_list = concatenated_psths
return np.concatenate(concatenated_psths).astype(dtype)[np.newaxis,:]
#~ class Spectrogrammer:
#~ """Turns a waveform into a spectrogram"""
#~ def __init__(self, NFFT=256, downsample_ratio=5, new_bin_width_sec=None,
#~ max_freq=40e3, min_freq=5e3, Fs=200e3, normalization=1.0):
#~ """Initialize object to turn waveforms to spectrograms.
#~ Stores parameter choices, so you can batch analyze waveforms using
#~ the `transform` method.
#~ If you specify new_bin_width_sec, this chooses the closest integer
#~ downsample_ratio and that parameter is actually saved and used.
#~ TODO: catch other kwargs and pass to specgram.
#~ """
#~ # figure out downsample_ratio
#~ if new_bin_width_sec is not None:
#~ self.downsample_ratio = int(np.rint(new_bin_width_sec * Fs / NFFT))
#~ else:
#~ self.downsample_ratio = int(downsample_ratio)
#~ assert self.downsample_ratio > 0
#~ # store other defaults
#~ self.NFFT = NFFT
#~ self.max_freq = max_freq
#~ self.min_freq = min_freq
#~ self.Fs = Fs
#~ self.normalization = normalization
#~ def transform(self, waveform):
#~ """Converts a waveform to a suitable spectrogram.
#~ Removes high and low frequencies, rebins in time (via median)
#~ to reduce data size. Returned times are the midpoints of the new bins.
#~ Returns: Pxx, freqs, t
#~ Pxx is an array of dB power of the shape (len(freqs), len(t)).
#~ It will be real but may contain -infs due to log10
#~ """
#~ # For now use NFFT of 256 to get appropriately wide freq bands, then
#~ # downsample in time
#~ Pxx, freqs, t = mlab.specgram(waveform, NFFT=self.NFFT, Fs=self.Fs)
#~ Pxx = Pxx * np.tile(freqs ** self.normalization, (1, Pxx.shape[1]))
#~ # strip out unused frequencies
#~ Pxx = Pxx[(freqs < self.max_freq) & (freqs > self.min_freq), :]
#~ freqs = freqs[(freqs < self.max_freq) & (freqs > self.min_freq)]
#~ # Rebin in size "downsample_ratio". If last bin is not full, discard.
#~ Pxx_rebinned = []
#~ t_rebinned = []
#~ for n in range(0, len(t) - self.downsample_ratio + 1,
#~ self.downsample_ratio):
#~ Pxx_rebinned.append(
#~ np.median(Pxx[:, n:n+self.downsample_ratio], axis=1).flatten())
#~ t_rebinned.append(
#~ np.mean(t[n:n+self.downsample_ratio]))
#~ # Convert to arrays
#~ Pxx_rebinned_a = np.transpose(np.array(Pxx_rebinned))
#~ t_rebinned_a = np.array(t_rebinned)
#~ # log it and deal with infs
#~ Pxx_rebinned_a_log = -np.inf * np.ones_like(Pxx_rebinned_a)
#~ Pxx_rebinned_a_log[np.nonzero(Pxx_rebinned_a)] = \
#~ 10 * np.log10(Pxx_rebinned_a[np.nonzero(Pxx_rebinned_a)])
#~ return Pxx_rebinned_a_log, freqs, t_rebinned_a
def clean_up_stimulus(whole_stimulus, silence_value='min_row', z_score=True):
"""Replaces non-finite values with silence_value, and z-scores by row.
silence_value == 'min_row' : minimum value in the row
silence_value == 'min_whole' : minimum value in the whole stimulus
silence_value == 'mean_row', 'mean_whole' : mean
silence_value == 'median_row', 'median_whole' : median
You might want to check a histogram of the returned values and pick what
looks best. On the one hand, silence is best represented by minimal
power. On the other hand, this distorts the histogram due to silence
in the actual signals, and/or the blanking periods. It also means that
this silence will play a part in the linear fit.
Most models treat the rows independently, so if you set the silence
row-independently, it will mesh nicely. On the other hand, why should
silence in one frequency band be treated differently from others?
"""
#~ if np.any(np.isnan(whole_stimulus)):
#~ print "warning: what to do with NaNs?"
if silence_value is 'min_row':
cleaned_stimulus_a = whole_stimulus.copy()
for n in range(len(cleaned_stimulus_a)):
msk = np.isneginf(cleaned_stimulus_a[n])
cleaned_stimulus_a[n, msk] = cleaned_stimulus_a[n, ~msk].min()
elif silence_value is 'mean_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = row[~np.isneginf(row)].mean()
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'median_row':
cleaned_stimulus = []
for row in whole_stimulus:
row[np.isneginf(row)] = np.median(row[~np.isneginf(row)])
cleaned_stimulus.append(row)
cleaned_stimulus_a = np.array(cleaned_stimulus)
elif silence_value is 'min_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.min(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'mean_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.mean(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
elif silence_value is 'median_whole':
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = \
np.median(cleaned_stimulus_a[np.isfinite(cleaned_stimulus_a)])
else:
# blindly assign 'silence_value' to the munged values
cleaned_stimulus_a = whole_stimulus.copy()
cleaned_stimulus_a[np.isneginf(cleaned_stimulus_a)] = silence_value
if z_score:
for n in range(cleaned_stimulus_a.shape[0]):
s = np.std(cleaned_stimulus_a[n, :])
if s < 10**-6: print "warning, std too small"
cleaned_stimulus_a[n, :] = (
cleaned_stimulus_a[n, :] - cleaned_stimulus_a[n, :].mean()) / \
np.std(cleaned_stimulus_a[n, :])
return cleaned_stimulus_a
class DirectFitter:
"""Calculates STRF for response matrix and stimulus matrix"""
def __init__(self, X=None, Y=None):
"""New fitter
X : (n_timepoints, n_features).
I think this works better if mean of rows and cols is zero.
Y : (n_timepoints, 1)
Generally 0s and 1s.
"""
self.X = X
self.Y = Y
self.XTX = None # pre-calculate this one
if self.X.shape[0] != self.Y.shape[0]:
raise ValueError("n_timepoints (dim0) is not the same!")
if self.X.shape[1] > self.X.shape[1]:
print "warning: more features than timepoints, possibly transposed"
def STA(self):
"""Returns spike-triggered average of stimulus X and response Y.
Therefore the STA is given by np.dot(X.transpose(), Y) / Y.sum().
"""
return np.dot(self.X.transpose(), self.Y).astype(np.float) / \
self.Y.sum()
#X = X - X.mean(axis=0)[newaxis, :] # each feature has zero-mean over time
#X = X - X.mean(axis=1)[:, newaxis] # each datapoint has zero-mean over features
def whitened_STA(self, ridge_parameter=0.):
if self.XTX is None:
self.XTX = np.dot(self.X.transpose(), self.X)
ridge_mat = ridge_parameter * len(self.XTX)**2 * np.eye(len(self.XTX))
STA = self.STA()
return np.dot(np.linalg.inv(self.XTX + ridge_mat), STA)*self.X.shape[0]
| {"/__init__.py": ["/io.py", "/base.py"]} |
59,868 | Mrxiahelong/sylfk | refs/heads/master | /sylfk/execptions.py | class SYLFKException(Exception):
def __init__(self,code='',message='Error'):
self.code=code #异常编号
self.message=message #异常信息
def __str__(self):
return self.message #当作为字符串使用时,返回异常信息
#节点已经存在
class EndpointExistsError(SYLFKException):
def __init__(self,message='Endpoint exists'):
super(EndpointExistsError,self).__init__(message)
#URL已经存在异常
class URLExistsError(SYLFKException):
def __init__(sekf,message='URL exists'):
super(URLExistsError,self).__init__(message)
| {"/main.py": ["/sylfk/__init__.py"]} |
59,869 | Mrxiahelong/sylfk | refs/heads/master | /main.py | from sylfk import SYLFK
app=SYLFK()
app.run()
| {"/main.py": ["/sylfk/__init__.py"]} |
59,870 | Mrxiahelong/sylfk | refs/heads/master | /sylfk/__init__.py | from werkzeug.serving import run_simple
from sylfk.wsgi_adapter import wsgi_app
from werkzeug.wrappers import Response
from sylfk.exceptions as exceptions
from sylfk.helper import parse_static_key
#定义常见服务异常的响应体
ERROR_MAP={
'401':Response('<h1>401 Unknow or unsupport method</h1>',content_type='text/html;charset=UTF-8',status=401),
'404':Response('<h1>404 Source Not Found</h1>',content_type='text/html;charset=UTF-8',status=404),
'503':Response('<h1>503 Unknown function type</h1>',conent_type='text/html;charset=UTF-8',status=503)
}
#定义文件类型
TYPE_MAP={
'css':'text/css',
'js':'text/js',
'png':'image/png',
'jpg':'image/jpeg',
'jpeg':'image/jpeg'
}
class ExecFunc:
def __init__(self,func,func_type,**options):
self.func=func #处理函数
self.options=options #附带函数
self.func_type=func_type #函数类型
class SYLFK:
def __init__(self,static,folder='static'):
self.host='127.0.0.1' #默认主机
self.port=8086 #默认端口
self.url_map={} #存放url与Endpoint的映射
self.static_map={} #存放URL与静态资源的映射
self.function_map={} #存放Endpoint与请求的处理函数的映射
self.static_folder=static_folder #静态资源本地存放路径,默认放在应用目录的static文件夹下
def dispatch_request(self,request):
status=200
headers={
'Server':'Shiyanlou Framework'
}
return Response('<h1>hello framework</h1>',content_type='text/html',headers=headers,status=status)
def run(self,host=None,port=None,**options):
for key,value in options.items():
if value is not None:
self.__setattr_(key,value)
if host:
self.host=host
if port:
self.port=port
self.function_map['static']=ExecFunc(func=self.dispatch_static,func_type='static')
run_simple(hostname=self.host,port=self.port,application=self,**options)
def __call__(self,environ,start_response):
return wsgi_app(self,environ,start_response)
#添加路由规则
def add_url_rule(self,url,func,func_type,endpoint=None,**options):
#如果节点未命名,使用处理函数的名字
if endpoint is None:
endpoint=func.__name__
#抛出url已经存在的异常
if url in self.url_map:
raise exceptions.URLExistsError
#如果类型不是静态资源,并且节点已存在,则抛出节点已经存在异常
if endpoint in self.function_map and func_type !='static':
raise exceptions.EndpointExistsError
#添加URL与节点映射
self.url_map[url]=endpoint
#添加节点与请求处理函数的映射
self.function_map[endpoint]=ExecFunc(func,func_type,**options)
#静态资源路由
def dispatch_static(self,static_path):
#判断资源文件是否在静态资源规则中,如果不存在,返回404状态码
if os.path.exists(static_path):
#获取资源文件后缀
key=parse_static_key(stayic_path):
#获取文件类型
doc_type=TYPE_MAP.get(key,'text/plain')
#获取文件内容
with open(static_path,'rb') as f:
rep=f.read()
#封装并返回响应体
return Response(rep,content_type=doc_type)
else:
#返回404页面并找到相应的响应体
return ERROR_MAP['404']
| {"/main.py": ["/sylfk/__init__.py"]} |
59,871 | Mrxiahelong/sylfk | refs/heads/master | /sylfk/exceptions/__init__.py | class SYLFKException(Exception):
def __init__(self,code='',message='Error'):
self.code=code #异常编码
self.message=message #异常信息
def __str__(self):
return self.message #当作为字符串使用时,返回异常信息
class EndpointExistsError(SYLFKException):
def __init__(self,message='Endpoint exists'):
super(EndpointExistsError,self).__init__(message)
class URLExistsError(SYLFKException):
def __init__(self,message='URL exists'):
super(URLExistsError,self).__init__(message)
| {"/main.py": ["/sylfk/__init__.py"]} |
59,872 | Mrxiahelong/sylfk | refs/heads/master | /sylfk/helper/__init__.py | #以“”分割文件名字,获取文件后缀类型
def parse_static_key(filename):
return filename.spilt(".")[-1]
| {"/main.py": ["/sylfk/__init__.py"]} |
59,873 | WilfredLemus/antivirus | refs/heads/master | /antivirus/apps/home/models.py | from django.db import models
class Categoria(models.Model):
nombre = models.CharField(max_length=50)
slug = models.SlugField(editable=False)
def save(self,*args, **kwargs):
if not self.id:
self.slug = slugify(self.nombre)
super(Categoria, self).save(*args, **kwargs)
def __unicode__(self):
return self.nombre
class antivirus(models.Model):
nombre = models.CharField(max_length=100)
slug = models.SlugField(editable=False)
descripcion = models.CharField(max_length=150)
imagen = models.ImageField(upload_to = 'ImgAntivirus')
precio = models.DecimalField(max_digits=5, decimal_places=2, default=0.00)
categoria = models.ForeignKey(Categoria)
SistemaOperativo = models.CharField(max_length=60)
arquitectura = models.CharField(max_length=60)
ram = models.IntegerField()
# def save(self, *args, **kwargs):
# if not self.id:
# self.slug = slugify(self.nombre)
# super(antivirus, self).save(*args, **kwargs)
# def __unicode__(self):
# return self.nombre | {"/antivirus/apps/home/admin.py": ["/antivirus/apps/home/models.py"]} |
59,874 | WilfredLemus/antivirus | refs/heads/master | /antivirus/apps/home/migrations/0003_auto_20150602_0337.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0002_auto_20150602_0319'),
]
operations = [
migrations.AlterField(
model_name='antivirus',
name='imagen',
field=models.ImageField(upload_to=b'ImgAntivirus'),
),
]
| {"/antivirus/apps/home/admin.py": ["/antivirus/apps/home/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.