index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
52,634 | aaalgo/aardvark | refs/heads/master | /zoo/net3d.py | import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('re_weight', 0.0001, 'regularization weight')
def unet (X, is_training):
BN = False
net = X
stack = []
with tf.name_scope('myunet'):
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.re_weight)
def conv (input, channels, filter_size=3, stride=1):
if BN:
input = tf.layers.conv3d(input, channels, filter_size, stride, padding='SAME', activation=None, kernel_regularizer=regularizer)
input = tf.layers.batch_normalization(input, training=is_training)
return tf.nn.relu(input)
return tf.layers.conv3d(input, channels, filter_size, stride, padding='SAME', activation=tf.nn.relu, kernel_regularizer=regularizer)
def max_pool (input, filter_size=3, stride=2):
return tf.layers.max_pooling3d(input, filter_size, stride, padding='SAME')
def conv_transpose (input, channels, filter_size=4, stride=2):
if BN:
input = tf.layers.conv3d_transpose(input, channels, filter_size, stride, padding='SAME', activation=None, kernel_regularizer=regularizer)
input = tf.layers.batch_normalization(input, training=is_training)
return tf.nn.relu(input)
return tf.layers.conv3d_transpose(input, channels, filter_size, stride, padding='SAME', activation=tf.nn.relu, kernel_regularizer=regularizer)
net = conv(net, 32)
net = conv(net, 32)
stack.append(net) # 1/1
net = conv(net, 64)
net = conv(net, 64)
net = max_pool(net)
stack.append(net) # 1/2
net = conv(net, 128)
net = conv(net, 128)
net = max_pool(net)
stack.append(net) # 1/4
net = conv(net, 256)
net = conv(net, 256)
net = max_pool(net)
# 1/8
net = conv(net, 512)
net = conv(net, 512)
net = conv_transpose(net, 128)
# 1/4
net = tf.concat([net, stack.pop()], 4)
net = conv_transpose(net, 64)
net = conv(net, 64)
# 1/2
net = tf.concat([net, stack.pop()], 4)
net = conv_transpose(net, 32)
net = conv(net, 32)
# 1
net = tf.concat([net, stack.pop()], 4)
net = conv(net, 16)
assert len(stack) == 0
return net, 8
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,635 | aaalgo/aardvark | refs/heads/master | /cxray/import.py | #!/usr/bin/env python3
import numpy as np
from sklearn.model_selection import StratifiedKFold
import cv2
from chest import *
import picpac
def load_file (path):
with open(path, 'rb') as f:
return f.read()
def import_db (path, tasks):
with open(path + '.list','w') as f:
db = picpac.Writer(path, picpac.OVERWRITE)
for p, l in tqdm(list(tasks)):
f.write('%s,%d\n' % (p, l))
image = cv2.imread(p, -1)
image = cv2.resize(image, None, fx=0.5, fy=0.5)
image_buffer = cv2.imencode('.jpg', image)[1].tostring()
db.append(float(l), image_buffer)
pass
X = []
Y = []
with open('data/Data_Entry_2017.csv', 'r') as f:
f.readline()
for l in f:
bname, labels, _ = l.strip().split(',', 2)
labels = [x.strip() for x in labels.split('|')]
if len(labels) != 1:
continue
label = labels[0]
l = LABEL_LOOKUP.get(label, -1)
path = image_path(bname)
if path is None or l == -1:
continue
X.append(path)
Y.append(l)
#db.append(l, load_file(path))
print('Found %d images.' % len(X))
X = np.array(X)
Y = np.array(Y)
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=2017)
for train_index, val_index in skf.split(np.zeros(len(X)), Y):
import_db('scratch/train.db', zip(X[train_index], Y[train_index]))
import_db('scratch/val.db', zip(X[val_index], Y[val_index]))
break
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,636 | aaalgo/aardvark | refs/heads/master | /zoo/sss/FC_DenseNet_Tiramisu.py | from __future__ import division
import os,time,cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
def preact_conv(inputs, n_filters, kernel_size=[3, 3], dropout_p=0.2):
"""
Basic pre-activation layer for DenseNets
Apply successivly BatchNormalization, ReLU nonlinearity, Convolution and
Dropout (if dropout_p > 0) on the inputs
"""
preact = tf.nn.relu(slim.batch_norm(inputs, fused=True))
conv = slim.conv2d(preact, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
if dropout_p != 0.0:
conv = slim.dropout(conv, keep_prob=(1.0-dropout_p))
return conv
def DenseBlock(stack, n_layers, growth_rate, dropout_p, scope=None):
"""
DenseBlock for DenseNet and FC-DenseNet
Arguments:
stack: input 4D tensor
n_layers: number of internal layers
growth_rate: number of feature maps per internal layer
Returns:
stack: current stack of feature maps (4D tensor)
new_features: 4D tensor containing only the new feature maps generated
in this block
"""
with tf.name_scope(scope) as sc:
new_features = []
for j in range(n_layers):
# Compute new feature maps
layer = preact_conv(stack, growth_rate, dropout_p=dropout_p)
new_features.append(layer)
# Stack new layer
stack = tf.concat([stack, layer], axis=-1)
new_features = tf.concat(new_features, axis=-1)
return stack, new_features
def TransitionDown(inputs, n_filters, dropout_p=0.2, scope=None):
"""
Transition Down (TD) for FC-DenseNet
Apply 1x1 BN + ReLU + conv then 2x2 max pooling
"""
with tf.name_scope(scope) as sc:
l = preact_conv(inputs, n_filters, kernel_size=[1, 1], dropout_p=dropout_p)
l = slim.pool(l, [2, 2], stride=[2, 2], pooling_type='MAX')
return l
def TransitionUp(block_to_upsample, skip_connection, n_filters_keep, scope=None):
"""
Transition Up for FC-DenseNet
Performs upsampling on block_to_upsample by a factor 2 and concatenates it with the skip_connection
"""
with tf.name_scope(scope) as sc:
# Upsample
l = slim.conv2d_transpose(block_to_upsample, n_filters_keep, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
# Concatenate with skip connection
l = tf.concat([l, skip_connection], axis=-1)
return l
def build_fc_densenet(inputs, num_classes, preset_model='FC-DenseNet56', n_filters_first_conv=48, n_pool=5, growth_rate=12, n_layers_per_block=4, dropout_p=0.2, scope=None):
"""
Builds the FC-DenseNet model
Arguments:
inputs: the input tensor
preset_model: The model you want to use
n_classes: number of classes
n_filters_first_conv: number of filters for the first convolution applied
n_pool: number of pooling layers = number of transition down = number of transition up
growth_rate: number of new feature maps created by each layer in a dense block
n_layers_per_block: number of layers per block. Can be an int or a list of size 2 * n_pool + 1
dropout_p: dropout rate applied after each convolution (0. for not using)
Returns:
Fc-DenseNet model
"""
if preset_model == 'FC-DenseNet56':
n_pool=5
growth_rate=12
n_layers_per_block=4
elif preset_model == 'FC-DenseNet67':
n_pool=5
growth_rate=16
n_layers_per_block=5
elif preset_model == 'FC-DenseNet103':
n_pool=5
growth_rate=16
n_layers_per_block=[4, 5, 7, 10, 12, 15, 12, 10, 7, 5, 4]
else:
raise ValueError("Unsupported FC-DenseNet model '%s'. This function only supports FC-DenseNet56, FC-DenseNet67, and FC-DenseNet103" % (preset_model))
if type(n_layers_per_block) == list:
assert (len(n_layers_per_block) == 2 * n_pool + 1)
elif type(n_layers_per_block) == int:
n_layers_per_block = [n_layers_per_block] * (2 * n_pool + 1)
else:
raise ValueError
with tf.variable_scope(scope, preset_model, [inputs]) as sc:
#####################
# First Convolution #
#####################
# We perform a first convolution.
stack = slim.conv2d(inputs, n_filters_first_conv, [3, 3], scope='first_conv', activation_fn=None)
n_filters = n_filters_first_conv
#####################
# Downsampling path #
#####################
skip_connection_list = []
for i in range(n_pool):
# Dense Block
stack, _ = DenseBlock(stack, n_layers_per_block[i], growth_rate, dropout_p, scope='denseblock%d' % (i+1))
n_filters += growth_rate * n_layers_per_block[i]
# At the end of the dense block, the current stack is stored in the skip_connections list
skip_connection_list.append(stack)
# Transition Down
stack = TransitionDown(stack, n_filters, dropout_p, scope='transitiondown%d'%(i+1))
skip_connection_list = skip_connection_list[::-1]
#####################
# Bottleneck #
#####################
# Dense Block
# We will only upsample the new feature maps
stack, block_to_upsample = DenseBlock(stack, n_layers_per_block[n_pool], growth_rate, dropout_p, scope='denseblock%d' % (n_pool + 1))
#######################
# Upsampling path #
#######################
for i in range(n_pool):
# Transition Up ( Upsampling + concatenation with the skip connection)
n_filters_keep = growth_rate * n_layers_per_block[n_pool + i]
stack = TransitionUp(block_to_upsample, skip_connection_list[i], n_filters_keep, scope='transitionup%d' % (n_pool + i + 1))
# Dense Block
# We will only upsample the new feature maps
stack, block_to_upsample = DenseBlock(stack, n_layers_per_block[n_pool + i + 1], growth_rate, dropout_p, scope='denseblock%d' % (n_pool + i + 2))
#####################
# Softmax #
#####################
net = slim.conv2d(stack, num_classes, [1, 1], activation_fn=None, scope='logits')
return net
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,637 | aaalgo/aardvark | refs/heads/master | /kitti2d/kitti.py | #!/usr/bin/env python3
import sys
import os
import cv2
import numpy as np
class Object:
def __init__ (self):
pass
def load_label (path):
objs = []
with open(path, 'r') as f:
for line in f:
line = line.strip().split(' ')
obj = Object()
obj.cat = line[0]
obj.trunc = float(line[1])
obj.occl = int(line[2])
obj.alpha = float(line[3])
obj.bbox = [float(x) for x in line[4:8]]
obj.dim = [float(x) for x in line[8:11]]
obj.loc = [float(x) for x in line[11:14]]
obj.rot = float(line[14])
objs.append(obj)
pass
return objs
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,638 | aaalgo/aardvark | refs/heads/master | /mura/import14.py | #!/usr/bin/env python3
import os
import picpac
PARTS = {
'XR_ELBOW': 0,
'XR_FINGER': 1,
'XR_FOREARM': 2,
'XR_HAND': 3,
'XR_HUMERUS': 4,
'XR_SHOULDER': 5,
'XR_WRIST': 6
}
def load_file (path):
with open(path, 'rb') as f:
return f.read()
def import_db (db_path, list_path):
db = picpac.Writer(db_path, picpac.OVERWRITE)
with open(list_path, 'r') as f:
for l in f:
path = l.strip()
part = path.split('/')[2]
#print(path)
if 'positive' in path:
l = 1
elif 'negative' in path:
l = 0
else:
assert 0
pass
assert part in PARTS
k = PARTS[part]
label = k * 2 + l
db.append(label, load_file('data/' + path), path.encode('ascii'))
pass
pass
#import_db('scratch/train.db', 'train.list')
#import_db('scratch/val.db', 'val.list')
import_db('scratch/val0.db', 'val0.list')
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,639 | aaalgo/aardvark | refs/heads/master | /rpn3d.py | #!/usr/bin/env python3
import os
import math
import sys
from abc import abstractmethod
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
import cv2
from tf_utils import *
import cpp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('rpn_priors', 'rpn_priors', 'param prior config file')
flags.DEFINE_integer('rpn_params', 3, 'number of parameters per shape')
flags.DEFINE_integer('rpn_stride', 1, 'downsize factor of rpn output')
flags.DEFINE_float('rpn_logits_weight', 1.0, 'loss weight')
flags.DEFINE_float('rpn_params_weight', 1.0, 'loss weight')
class BasicRPN3D:
def __init__ (self):
priors = []
# read in priors
# what RPN estimates is the delta between priors and the real
# regression target.
if os.path.exists(FLAGS.rpn_priors):
with open(FLAGS.rpn_priors, 'r') as f:
for l in f:
if l[0] == '#':
continue
vs = [float(v) for v in l.strip().split(' ')]
assert len(vs) == FLAGS.rpn_params
priors.append(vs)
pass
pass
pass
if len(priors) == 0:
priors.append([1.0] * FLAGS.rpn_params)
pass
aardvark.print_red("PRIORS %s" % str(priors))
self.priors = np.array(priors, dtype=np.float32)
pass
def rpn_backbone (self, volume, is_training, stride):
assert False
def rpn_logits (self, net, is_training, channels):
assert False
def rpn_params (self, net, is_training, channels):
assert False
def rpn_generate_shapes (self, shape, anchor_params, priors, n_priors):
assert False
def build_rpn (self, volume, is_training, shape=None):
# volume: input volume tensor
Z,Y,X = shape
assert max(Z % FLAGS.rpn_stride, Y % FLAGS.rpn_stride, X % FLAGS.rpn_stride) == 0
oZ = Z // FLAGS.rpn_stride
oY = Y // FLAGS.rpn_stride
oX = X // FLAGS.rpn_stride
n_priors = self.priors.shape[0]
n_params = self.priors.shape[1]
self.gt_anchors = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.gt_anchors_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
# parameter of that location
self.gt_params = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors, n_params))
self.gt_params_weight = tf.placeholder(tf.float32, shape=(None, oZ, oY, oX, n_priors))
self.backbone = self.rpn_backbone(volume, is_training, FLAGS.rpn_stride)
logits = self.rpn_logits(self.backbone, is_training, n_priors)
logits = tf.identity(logits, name='logits')
self.logits = logits
self.probs = tf.sigmoid(logits, name='probs')
params = self.rpn_params(self.backbone, is_training, n_priors * n_params)
params = tf.identity(params, name='params')
self.params = params
# setup losses
# 1. losses for logits
logits1 = tf.reshape(logits, (-1,))
gt_anchors = tf.reshape(self.gt_anchors, (-1,))
gt_anchors_weight = tf.reshape(self.gt_anchors_weight, (-1,))
xe = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(gt_anchors, tf.float32))
xe = tf.reduce_sum(xe * gt_anchors_weight) / (tf.reduce_sum(gt_anchors_weight) + 0.00001)
xe = tf.identity(xe, name='xe')
getattr(self, 'metrics', []).append(xe)
tf.losses.add_loss(xe * FLAGS.rpn_logits_weight)
# 2. losses for parameters
priors = tf.constant(self.priors[np.newaxis, :, :], dtype=tf.float32)
params = tf.reshape(params, (-1, n_priors, n_params))
gt_params = tf.reshape(self.gt_params, (-1, n_priors, n_params))
l1 = tf.losses.huber_loss(params, gt_params / priors, reduction=tf.losses.Reduction.NONE, loss_collection=None)
l1 = tf.reduce_sum(l1, axis=2)
# l1: ? * n_priors
l1 = tf.reshape(l1, (-1,))
gt_params_weight = tf.reshape(self.gt_params_weight, (-1,))
l1 = tf.reduce_sum(l1 * gt_params_weight) / (tf.reduce_sum(gt_params_weight) + 0.00001)
l1 = tf.identity(l1, name='l1')
getattr(self, 'metrics', []).append(l1)
tf.losses.add_loss(l1 * FLAGS.rpn_params_weight)
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,640 | aaalgo/aardvark | refs/heads/master | /zoo/fuck_slim.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils, resnet_v2
def patch_resnet_arg_scope (is_training):
def resnet_arg_scope (weight_decay=0.0001):
print('\033[91m' + 'Using patched resnet arg scope' + '\033[0m')
batch_norm_decay=0.9
batch_norm_epsilon=5e-4
batch_norm_scale=False
activation_fn=tf.nn.relu
use_batch_norm=True
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'updates_collections': tf.GraphKeys.UPDATE_OPS,
# don't know what it does, but seems improves cifar10 a bit
#'fused': None, # Use fused batch norm if possible.
'is_training': is_training
}
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose],
weights_regularizer=slim.l2_regularizer(weight_decay),
#Removing following 2 improves cifar10 performance
#weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding='SAME'):
with slim.arg_scope([slim.dropout], is_training=is_training) as arg_sc:
return arg_sc
return resnet_arg_scope
def patch (is_training):
asc = patch_resnet_arg_scope(is_training)
keys = [key for key in nets_factory.arg_scopes_map.keys() if 'resnet_' in key or 'densenet' in key]
for key in keys:
nets_factory.arg_scopes_map[key] = asc
def resnet_v2_14_nmist (inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
include_root_block=False,
spatial_squeeze=True,
scope='resnet_v2_14_nist',
reduction=2):
resnet_v2_block = resnet_v2.resnet_v2_block
blocks = [
resnet_v2_block('block1', base_depth=64//reduction, num_units=2, stride=2),
resnet_v2_block('block2', base_depth=128//reduction, num_units=2, stride=2),
resnet_v2_block('block3', base_depth=256//reduction, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def resnet_v2_18 (inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
include_root_block=True,
spatial_squeeze=True,
scope='resnet_v2_18',
reduction=1):
resnet_v2_block = resnet_v2.resnet_v2_block
blocks = [
resnet_v2_block('block1', base_depth=64//reduction, num_units=2, stride=2),
resnet_v2_block('block2', base_depth=128//reduction, num_units=2, stride=2),
resnet_v2_block('block3', base_depth=256//reduction, num_units=2, stride=2),
resnet_v2_block('block4', base_depth=512//reduction, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=include_root_block,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope)
def resnet_v2_18_cifar (inputs, num_classes=None, is_training=True, global_pool=False, output_stride=None,
reuse=None, scope='resnet_v2_18_cifar', spatial_squeeze=True):
#assert global_pool
return resnet_v2_18(inputs, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, include_root_block=False, scope=scope, spatial_squeeze=spatial_squeeze)
def resnet_v2_18_slim (inputs, num_classes=None, is_training=True, global_pool=True, output_stride=None,
reuse=None, scope='resnet_v2_18_slim', spatial_squeeze=True):
return resnet_v2_18(inputs, num_classes, is_training, global_pool=global_pool, output_stride=output_stride, reuse=reuse, include_root_block=True, scope=scope, reduction=2, spatial_squeeze=spatial_squeeze)
def resnet_v2_50_slim(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope='resnet_v2_50'):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
resnet_v2_block = resnet_v2.resnet_v2_block
reduction=2
blocks = [
resnet_v2_block('block1', base_depth=64//reduction, num_units=3, stride=2),
resnet_v2_block('block2', base_depth=128//reduction, num_units=4, stride=2),
resnet_v2_block('block3', base_depth=256//reduction, num_units=6, stride=2),
resnet_v2_block('block4', base_depth=512//reduction, num_units=3, stride=1),
]
return resnet_v2.resnet_v2(inputs, blocks, num_classes, is_training=is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
reuse=reuse, scope=scope)
def extend ():
nets_factory.networks_map['resnet_v2_14_nmist'] = resnet_v2_14_nmist
nets_factory.networks_map['resnet_v2_18'] = resnet_v2_18
nets_factory.networks_map['resnet_v2_18_cifar'] = resnet_v2_18_cifar
nets_factory.networks_map['resnet_v2_18_slim'] = resnet_v2_18_slim
nets_factory.networks_map['resnet_v2_50_slim'] = resnet_v2_50_slim
nets_factory.arg_scopes_map['resnet_v2_14_nmist'] = resnet_v2.resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_18'] = resnet_v2.resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_18_cifar'] = resnet_v2.resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_18_slim'] = resnet_v2.resnet_arg_scope
nets_factory.arg_scopes_map['resnet_v2_50_slim'] = resnet_v2.resnet_arg_scope
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,641 | aaalgo/aardvark | refs/heads/master | /zoo/sss/AdapNet.py | # coding=utf-8
import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np
import resnet_v2
import os, sys
def Upsampling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvBlock(inputs, n_filters, kernel_size=[3, 3], stride=1):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net = slim.conv2d(net, n_filters, kernel_size, stride=stride, activation_fn=None, normalizer_fn=None)
return net
def ResNetBlock_1(inputs, filters_1, filters_2):
net = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net = slim.conv2d(net, filters_1, [1, 1], activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
net = slim.conv2d(net, filters_1, [3, 3], activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
net = slim.conv2d(net, filters_2, [1, 1], activation_fn=None, normalizer_fn=None)
net = tf.add(inputs, net)
return net
def ResNetBlock_2(inputs, filters_1, filters_2, s=1):
net_1 = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net_1 = slim.conv2d(net_1, filters_1, [1, 1], stride=s, activation_fn=None, normalizer_fn=None)
net_1 = tf.nn.relu(slim.batch_norm(net_1, fused=True))
net_1 = slim.conv2d(net_1, filters_1, [3, 3], activation_fn=None, normalizer_fn=None)
net_1 = tf.nn.relu(slim.batch_norm(net_1, fused=True))
net_1 = slim.conv2d(net_1, filters_2, [1, 1], activation_fn=None, normalizer_fn=None)
net_2 = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net_2 = slim.conv2d(net_2, filters_2, [1, 1], stride=s, activation_fn=None, normalizer_fn=None)
net = tf.add(net_1, net_2)
return net
def MultiscaleBlock_1(inputs, filters_1, filters_2, filters_3, p, d):
net = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net = slim.conv2d(net, filters_1, [1, 1], activation_fn=None, normalizer_fn=None)
scale_1 = tf.nn.relu(slim.batch_norm(net, fused=True))
scale_1 = slim.conv2d(scale_1, filters_3 // 2, [3, 3], rate=p, activation_fn=None, normalizer_fn=None)
scale_2 = tf.nn.relu(slim.batch_norm(net, fused=True))
scale_2 = slim.conv2d(scale_2, filters_3 // 2, [3, 3], rate=d, activation_fn=None, normalizer_fn=None)
net = tf.concat((scale_1, scale_2), axis=-1)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
net = slim.conv2d(net, filters_2, [1, 1], activation_fn=None, normalizer_fn=None)
net = tf.add(inputs, net)
return net
def MultiscaleBlock_2(inputs, filters_1, filters_2, filters_3, p, d):
net_1 = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net_1 = slim.conv2d(net_1, filters_1, [1, 1], activation_fn=None, normalizer_fn=None)
scale_1 = tf.nn.relu(slim.batch_norm(net_1, fused=True))
scale_1 = slim.conv2d(scale_1, filters_3 // 2, [3, 3], rate=p, activation_fn=None, normalizer_fn=None)
scale_2 = tf.nn.relu(slim.batch_norm(net_1, fused=True))
scale_2 = slim.conv2d(scale_2, filters_3 // 2, [3, 3], rate=d, activation_fn=None, normalizer_fn=None)
net_1 = tf.concat((scale_1, scale_2), axis=-1)
net_1 = tf.nn.relu(slim.batch_norm(net_1, fused=True))
net_1 = slim.conv2d(net_1, filters_2, [1, 1], activation_fn=None, normalizer_fn=None)
net_2 = tf.nn.relu(slim.batch_norm(inputs, fused=True))
net_2 = slim.conv2d(net_2, filters_2, [1, 1], activation_fn=None, normalizer_fn=None)
net = tf.add(net_1, net_2)
return net
def build_adaptnet(inputs, num_classes):
"""
Builds the AdaptNet model.
Arguments:
inputs: The input tensor=
preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
num_classes: Number of classes
Returns:
AdaptNet model
"""
net = ConvBlock(inputs, n_filters=64, kernel_size=[3, 3])
net = ConvBlock(net, n_filters=64, kernel_size=[7, 7], stride=2)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
net = ResNetBlock_2(net, filters_1=64, filters_2=256, s=1)
net = ResNetBlock_1(net, filters_1=64, filters_2=256)
net = ResNetBlock_1(net, filters_1=64, filters_2=256)
net = ResNetBlock_2(net, filters_1=128, filters_2=512, s=2)
net = ResNetBlock_1(net, filters_1=128, filters_2=512)
net = ResNetBlock_1(net, filters_1=128, filters_2=512)
skip_connection = ConvBlock(net, n_filters=12, kernel_size=[1, 1])
net = MultiscaleBlock_1(net, filters_1=128, filters_2=512, filters_3=64, p=1, d=2)
net = ResNetBlock_2(net, filters_1=256, filters_2=1024, s=2)
net = ResNetBlock_1(net, filters_1=256, filters_2=1024)
net = MultiscaleBlock_1(net, filters_1=256, filters_2=1024, filters_3=64, p=1, d=2)
net = MultiscaleBlock_1(net, filters_1=256, filters_2=1024, filters_3=64, p=1, d=4)
net = MultiscaleBlock_1(net, filters_1=256, filters_2=1024, filters_3=64, p=1, d=8)
net = MultiscaleBlock_1(net, filters_1=256, filters_2=1024, filters_3=64, p=1, d=16)
net = MultiscaleBlock_2(net, filters_1=512, filters_2=2048, filters_3=512, p=2, d=4)
net = MultiscaleBlock_1(net, filters_1=512, filters_2=2048, filters_3=512, p=2, d=8)
net = MultiscaleBlock_1(net, filters_1=512, filters_2=2048, filters_3=512, p=2, d=16)
net = ConvBlock(net, n_filters=12, kernel_size=[1, 1])
net = Upsampling(net, scale=2)
net = tf.add(skip_connection, net)
net = Upsampling(net, scale=8)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net
def mean_image_subtraction(inputs, means=[123.68, 116.78, 103.94]):
inputs=tf.to_float(inputs)
num_channels = inputs.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=inputs)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels) | {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,642 | aaalgo/aardvark | refs/heads/master | /pyramid.py | #!/usr/bin/env python3
import numpy as np
import cv2
def pyramid_helper (canvas, mask, rois, canvas_offset, image, horizontal, threshold):
H, W = canvas.shape[:2]
h, w = image.shape[:2]
if min(h, w) < threshold:
return
x0, y0 = canvas_offset
canvas[:h, :w, :] = image
mask[:h, :w] = len(rois)
rois.append([x0, y0, w, h])
image = cv2.resize(image, None, fx=0.5, fy=0.5)
h2, w2 = image.shape[:2]
if horizontal:
o = W-w2
canvas = canvas[:,o:,:]
mask = mask[:,o:]
x0 += o
else:
o = H-h2
canvas = canvas[o:, :, :]
mask = mask[o:, :]
y0 += o
pyramid_helper(canvas, mask, rois, (x0, y0), image, not horizontal, threshold)
pass
class Pyramid:
def __init__ (self, image, threshold=64, stride=16, min_size=600):
self.image = image
# returns canvas, mask, rois
# canvas, the image sprial
# mask: the depth of each pixel
# rois[depth] = (x, y, W, H)
h, w = image.shape[:2]
m = min(h, w)
if m < min_size:
ratio = min_size / m
image = cv2.resize(image, None, fx=ratio, fy=ratio)
h, w = image.shape[:2]
C = 1
if len(image.shape) == 3:
C = image.shape[2]
#else:
# image = np.reshape(a, (H, W, 1))
H = (h + stride -1) // stride * stride
W = (w * 2 + stride - 1) // stride * stride
canvas = np.zeros((H, W, C), image.dtype)
mask = np.zeros((H, W), np.int32)
rois = [(0, 0, 0, 0)] # rois[0] is not used
pyramid_helper(canvas, mask, rois, (0, 0), image, True, threshold)
if C == 1:
canvas = canvas[:, :, 0]
self.pyramid = canvas
self.mask = mask
self.rois = rois
pass
def find_depth (self, box):
x1, y1, x2, y2 = np.round(box).astype(np.int32)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
roi = self.mask[y1:(y2+1), x1:(x2+1)]
uniq, cnts = np.unique(roi, return_counts=True)
return uniq[np.argmax(cnts)]
def combine (self, boxes):
R = []
h, w = self.image.shape[:2]
for box in boxes:
d = self.find_depth(box)
if d == 0:
continue
x0, y0, w0, h0 = self.rois[d]
x1, y1, x2, y2 = box
x1 = (x1 - x0) * w / w0
x2 = (x2 - x0) * w / w0
y1 = (y1 - y0) * h / h0
y2 = (y2 - y0) * h / h0
R.append([x1, y1, x2, y2])
pass
return R
if __name__ == '__main__':
image = cv2.imread('lenna.png', -1)
sp = Pyramid(image, 16)
cv2.imwrite('pyramid.png', sp.pyramid)
cv2.normalize(sp.mask, sp.mask, 0, 255, cv2.NORM_MINMAX)
cv2.imwrite('pyramid_mask.png', sp.mask)
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,643 | aaalgo/aardvark | refs/heads/master | /zoo/sss/MobileUNet.py | import os,time,cv2
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Builds the conv block for MobileNets
Apply successivly a 2D convolution, BatchNormalization relu
"""
# Skip pointwise by setting num_outputs=Non
net = slim.conv2d(inputs, n_filters, kernel_size=[1, 1], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
return net
def DepthwiseSeparableConvBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Builds the Depthwise Separable conv block for MobileNets
Apply successivly a 2D separable convolution, BatchNormalization relu, conv, BatchNormalization, relu
"""
# Skip pointwise by setting num_outputs=None
net = slim.separable_convolution2d(inputs, num_outputs=None, depth_multiplier=1, kernel_size=[3, 3], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters, kernel_size=[1, 1], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
return net
def conv_transpose_block(inputs, n_filters, kernel_size=[3, 3]):
"""
Basic conv transpose block for Encoder-Decoder upsampling
Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
net = tf.nn.relu(slim.batch_norm(net))
return net
def build_mobile_unet(inputs, preset_model, num_classes):
has_skip = False
if preset_model == "MobileUNet":
has_skip = False
elif preset_model == "MobileUNet-Skip":
has_skip = True
else:
raise ValueError("Unsupported MobileUNet model '%s'. This function only supports MobileUNet and MobileUNet-Skip" % (preset_model))
#####################
# Downsampling path #
#####################
net = ConvBlock(inputs, 64)
net = DepthwiseSeparableConvBlock(net, 64)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
skip_1 = net
net = DepthwiseSeparableConvBlock(net, 128)
net = DepthwiseSeparableConvBlock(net, 128)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
skip_2 = net
net = DepthwiseSeparableConvBlock(net, 256)
net = DepthwiseSeparableConvBlock(net, 256)
net = DepthwiseSeparableConvBlock(net, 256)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
skip_3 = net
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
skip_4 = net
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
#####################
# Upsampling path #
#####################
net = conv_transpose_block(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
if has_skip:
net = tf.add(net, skip_4)
net = conv_transpose_block(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 512)
net = DepthwiseSeparableConvBlock(net, 256)
if has_skip:
net = tf.add(net, skip_3)
net = conv_transpose_block(net, 256)
net = DepthwiseSeparableConvBlock(net, 256)
net = DepthwiseSeparableConvBlock(net, 256)
net = DepthwiseSeparableConvBlock(net, 128)
if has_skip:
net = tf.add(net, skip_2)
net = conv_transpose_block(net, 128)
net = DepthwiseSeparableConvBlock(net, 128)
net = DepthwiseSeparableConvBlock(net, 64)
if has_skip:
net = tf.add(net, skip_1)
net = conv_transpose_block(net, 64)
net = DepthwiseSeparableConvBlock(net, 64)
net = DepthwiseSeparableConvBlock(net, 64)
#####################
# Softmax #
#####################
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,644 | aaalgo/aardvark | refs/heads/master | /zoo/sss/ICNet.py | import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np
import resnet_v2
import os, sys
def Upsampling_by_shape(inputs, feature_map_shape):
return tf.image.resize_bilinear(inputs, size=feature_map_shape)
def Upsampling_by_scale(inputs, scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2):
"""
Basic conv transpose block for Encoder-Decoder upsampling
Apply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
def ConvBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Basic conv block for Encoder-Decoder
Apply successivly Convolution, BatchNormalization, ReLU nonlinearity
"""
net = slim.conv2d(inputs, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(slim.batch_norm(net, fused=True))
return net
def InterpBlock(net, level, feature_map_shape, pooling_type):
# Compute the kernel and stride sizes according to how large the final feature map will be
# When the kernel size and strides are equal, then we can compute the final feature map size
# by simply dividing the current size by the kernel or stride size
# The final feature map sizes are 1x1, 2x2, 3x3, and 6x6. We round to the closest integer
kernel_size = [int(np.round(float(feature_map_shape[0]) / float(level))), int(np.round(float(feature_map_shape[1]) / float(level)))]
stride_size = kernel_size
net = slim.pool(net, kernel_size, stride=stride_size, pooling_type='MAX')
net = slim.conv2d(net, 512, [1, 1], activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = Upsampling_by_shape(net, feature_map_shape)
return net
def PyramidPoolingModule_ICNet(inputs, feature_map_shape, pooling_type):
"""
Build the Pyramid Pooling Module.
"""
interp_block1 = InterpBlock(inputs, 1, feature_map_shape, pooling_type)
interp_block2 = InterpBlock(inputs, 2, feature_map_shape, pooling_type)
interp_block3 = InterpBlock(inputs, 3, feature_map_shape, pooling_type)
interp_block6 = InterpBlock(inputs, 6, feature_map_shape, pooling_type)
res = tf.add([inputs, interp_block6, interp_block3, interp_block2, interp_block1])
return res
def CFFBlock(F1, F2, num_classes):
F1_big = Upsampling_by_scale(F1, scale=2)
F1_out = slim.conv2d(F1_big, num_classes, [1, 1], activation_fn=None)
F1_big = slim.conv2d(F1_big, 2048, [3, 3], rate=2, activation_fn=None)
F1_big = slim.batch_norm(F1_big, fused=True)
F2_proj = slim.conv2d(F2, 512, [1, 1], rate=1, activation_fn=None)
F2_proj = slim.batch_norm(F2_proj, fused=True)
F2_out = tf.add([F1_big, F2_proj])
F2_out = tf.nn.relu(F2_out)
return F1_out, F2_out
def build_icnet(inputs, label_size, num_classes, preset_model='ICNet-Res50', pooling_type = "MAX",
weight_decay=1e-5, is_training=True, pretrained_dir="models"):
"""
Builds the ICNet model.
Arguments:
inputs: The input tensor
label_size: Size of the final label tensor. We need to know this for proper upscaling
preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
num_classes: Number of classes
pooling_type: Max or Average pooling
Returns:
ICNet model
"""
inputs_4 = tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*4, tf.shape(inputs)[2]*4])
inputs_2 = tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*2, tf.shape(inputs)[2]*2])
inputs_1 = inputs
if preset_model == 'ICNet-Res50':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits_32, end_points_32 = resnet_v2.resnet_v2_50(inputs_4, is_training=is_training, scope='resnet_v2_50')
logits_16, end_points_16 = resnet_v2.resnet_v2_50(inputs_2, is_training=is_training, scope='resnet_v2_50')
logits_8, end_points_8 = resnet_v2.resnet_v2_50(inputs_1, is_training=is_training, scope='resnet_v2_50')
resnet_scope='resnet_v2_50'
# ICNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
elif preset_model == 'ICNet-Res101':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits_32, end_points_32 = resnet_v2.resnet_v2_101(inputs_4, is_training=is_training, scope='resnet_v2_101')
logits_16, end_points_16 = resnet_v2.resnet_v2_101(inputs_2, is_training=is_training, scope='resnet_v2_101')
logits_8, end_points_8 = resnet_v2.resnet_v2_101(inputs_1, is_training=is_training, scope='resnet_v2_101')
resnet_scope='resnet_v2_101'
# ICNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
elif preset_model == 'ICNet-Res152':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits_32, end_points_32 = resnet_v2.resnet_v2_152(inputs_4, is_training=is_training, scope='resnet_v2_152')
logits_16, end_points_16 = resnet_v2.resnet_v2_152(inputs_2, is_training=is_training, scope='resnet_v2_152')
logits_8, end_points_8 = resnet_v2.resnet_v2_152(inputs_1, is_training=is_training, scope='resnet_v2_152')
resnet_scope='resnet_v2_152'
# ICNet requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
else:
raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 50, ResNet 101, and ResNet 152" % (preset_model))
feature_map_shape = [int(x / 32.0) for x in label_size]
block_32 = PyramidPoolingModule(end_points_32['pool3'], feature_map_shape=feature_map_shape, pooling_type=pooling_type)
out_16, block_16 = CFFBlock(psp_32, end_points_16['pool3'])
out_8, block_8 = CFFBlock(block_16, end_points_8['pool3'])
out_4 = Upsampling_by_scale(out_8, scale=2)
out_4 = slim.conv2d(out_4, num_classes, [1, 1], activation_fn=None)
out_full = Upsampling_by_scale(out_4, scale=2)
out_full = slim.conv2d(out_full, num_classes, [1, 1], activation_fn=None, scope='logits')
net = tf.concat([out_16, out_8, out_4, out_final])
return net, init_fn
def mean_image_subtraction(inputs, means=[123.68, 116.78, 103.94]):
inputs=tf.to_float(inputs)
num_channels = inputs.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=inputs)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels) | {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,645 | aaalgo/aardvark | refs/heads/master | /cxray/chest.py | #!/usr/bin/env python3
import os
import sys
from tqdm import tqdm
from glob import glob
import pickle
CATEGORIES = [
[60361, 'No Finding'],
[11559, 'Atelectasis'],
#[1, 'bels'],
[2776, 'Cardiomegaly'],
[4667, 'Consolidation'],
[2303, 'Edema'],
[13317, 'Effusion'],
[2516, 'Emphysema'],
[1686, 'Fibrosis'],
#[227, 'Hernia'],
[19894, 'Infiltration'],
[5782, 'Mass'],
[6331, 'Nodule'],
[3385, 'Pleural_Thickening'],
[1431, 'Pneumonia'],
[5302, 'Pneumothorax'],
]
CLASSES = len(CATEGORIES)
LABEL_LOOKUP = {}
for l, (_, v) in enumerate(CATEGORIES):
LABEL_LOOKUP[v] = l
pass
LOOKUP_PATH = 'lookup.pickle'
if os.path.exists(LOOKUP_PATH):
with open(LOOKUP_PATH, 'rb') as f:
lookup = pickle.load(f)
else:
lookup = {}
print("Scanning images...")
for i in range(1, 13):
C = 0
for p in glob('data/images_%03d/*.png' % i):
bname = os.path.basename(p)
#print(bname)
lookup[bname] = i
C += 1
print('%d found for directory %d' % (C, i))
pass
with open(LOOKUP_PATH, 'wb') as f:
pickle.dump(lookup, f)
pass
def image_path (bname):
n = lookup.get(bname, None)
if n is None:
return n
return 'data/images_%03d/%s' % (lookup[bname], bname)
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,646 | aaalgo/aardvark | refs/heads/master | /train-frcnn.py | #!/usr/bin/env python3
import os
import math
import sys
# C++ code, python3 setup.py build
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), 'build/lib.linux-x86_64-3.5'))
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/slim'))
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
import cv2
from rpn import FRCNN
import cpp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture')
flags.DEFINE_integer('backbone_stride', 16, '')
class Model (FRCNN):
def __init__ (self):
super().__init__(FLAGS.backbone_stride)
pass
def rpn_backbone (self, images):
self.backbone = aardvark.create_stock_slim_network(FLAGS.backbone, images, self.is_training, global_pool=False, stride=FLAGS.backbone_stride, scope='bb1')
self.backbone_stride = FLAGS.backbone_stride
pass
def rpn_logits (self, channels, stride):
upscale = self.backbone_stride // stride
with slim.arg_scope(aardvark.default_argscope(self.is_training)):
return slim.conv2d_transpose(self.backbone, channels, 2*upscale, upscale, activation_fn=None)
pass
def rpn_params (self, channels, stride):
upscale = self.backbone_stride // stride
with slim.arg_scope(aardvark.default_argscope(self.is_training)):
return slim.conv2d_transpose(self.backbone, channels, 2*upscale, upscale, activation_fn=None)
pass
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,647 | aaalgo/aardvark | refs/heads/master | /faster_rcnn.py | #!/usr/bin/env python3
import os
import math
import sys
from abc import abstractmethod
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
import cv2
from tf_utils import *
import cpp
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('priors', 'priors', '')
flags.DEFINE_integer('anchor_stride', 4, '')
flags.DEFINE_integer('pooling_size', 7, '')
flags.DEFINE_float('anchor_th', 0.5, '')
flags.DEFINE_integer('nms_max', 128, '')
flags.DEFINE_float('nms_th', 0.5, '')
flags.DEFINE_float('match_th', 0.5, '')
flags.DEFINE_integer('max_masks', 128, '')
flags.DEFINE_float('lower_th', 0.1, '')
flags.DEFINE_float('upper_th', 0.5, '')
# optimizer settings
flags.DEFINE_float('rpn_act_weight', 1.0, '')
flags.DEFINE_float('rpn_params_weight', 1.0, '')
flags.DEFINE_float('xe_weight2', 1.0, '')
flags.DEFINE_float('pl_weight2', 1.0, '')
flags.DEFINE_boolean('rpnonly', False, '')
flags.DEFINE_boolean('rcnnonly', False, '')
def params_loss_rpn (params, gt_params, priors):
# params ? * priors * 4
# gt_params ? * priors * 4
# priors 1 * priors * 2
gt_params = gt_params / priors
l1 = tf.losses.huber_loss(params, gt_params, reduction=tf.losses.Reduction.NONE, loss_collection=None)
return tf.reduce_sum(l1, axis=2)
def anchors2boxes (shape, anchor_params, priors, n_priors):
# anchor parameters are: dx, dy, w, h
# anchor_params: n * n_priors * 4
# priors: 1 * priors * 2
B = shape[0]
H = shape[1]
W = shape[2]
offset = tf_repeat(tf.range(B), [H * W * n_priors])
if True: # generate array of box centers
x0 = tf.cast(tf.range(W) * FLAGS.anchor_stride, tf.float32)
y0 = tf.cast(tf.range(H) * FLAGS.anchor_stride, tf.float32)
x0, y0 = tf.meshgrid(x0, y0)
x0 = tf.reshape(x0, (-1,))
y0 = tf.reshape(y0, (-1,))
x0 = tf.tile(tf_repeat(x0, [n_priors]), [B])
y0 = tf.tile(tf_repeat(y0, [n_priors]), [B])
anchor_params = tf.reshape(anchor_params * priors, (-1, 4))
dx, dy, w, h = [tf.squeeze(x, axis=1) for x in tf.split(anchor_params, [1,1,1,1], 1)]
W = tf.cast(W * FLAGS.anchor_stride, tf.float32)
H = tf.cast(H * FLAGS.anchor_stride, tf.float32)
max_X = W-1
max_Y = H-1
x1 = x0 + dx - w/2
y1 = y0 + dy - h/2
x2 = x1 + w
y2 = y1 + h
x1 = tf.clip_by_value(x1, 0, max_X)
y1 = tf.clip_by_value(y1, 0, max_Y)
x2 = tf.clip_by_value(x2, 0, max_X)
y2 = tf.clip_by_value(y2, 0, max_Y)
boxes = tf.stack([x1, y1, x2, y2], axis=1)
return boxes, offset
def transform_bbox (roi, gt_box):
x1 = roi[:, 0]
y1 = roi[:, 1]
x2 = roi[:, 2]
y2 = roi[:, 3]
w = x2 - x1 + 1
h = y2 - y1 + 1
cx = x1 + 0.5 * w
cy = y1 + 0.5 * h
X1 = gt_box[:, 0]
Y1 = gt_box[:, 1]
X2 = gt_box[:, 2]
Y2 = gt_box[:, 3]
W = X2 - X1 + 1
H = Y2 - Y1 + 1
CX = X1 + 0.5 * W
CY = Y1 + 0.5 * H
dx = (CX - cx) / w
dy = (CY - cy) / h
dw = W / w
dh = H / h
return tf.stack([dx, dy, dw, dh], axis=1)
def refine_bbox (roi, params):
x1 = roi[:, 0]
y1 = roi[:, 1]
x2 = roi[:, 2]
y2 = roi[:, 3]
w = x2 - x1 + 1
h = y2 - y1 + 1
cx = x1 + 0.5 * w
cy = y1 + 0.5 * h
dx = params[:, 0]
dy = params[:, 1]
dw = tf.exp(params[:, 2])
dh = tf.exp(params[:, 3])
CX = dx * w + cx
CY = dy * h + cy
W = dw * w
H = dh * h
return tf.stack([CX - 0.5 * W, CY - 0.5 * H, CX + 0.5 * W, CY + 0.5 * H], axis=1)
def normalize_boxes (shape, boxes):
max_X = tf.cast(shape[2]-1, tf.float32)
max_Y = tf.cast(shape[1]-1, tf.float32)
x1,y1,x2,y2 = [tf.squeeze(x, axis=1) for x in tf.split(boxes, [1,1,1,1], 1)]
x1 = x1 / max_X
y1 = y1 / max_Y
x2 = x2 / max_X
y2 = y2 / max_Y
return tf.stack([y1, x1, y2, x2], axis=1)
def shift_boxes (boxes, offset):
# boxes N * [x1, y1, x2, y2]
# offsets N
offset = tf.expand_dims(offset * FLAGS.max_size * 2, axis=1)
# offset N * [V]
# such that there's no way for boxes from different offset to overlap
return boxes + tf.cast(offset, dtype=tf.float32)
def params_loss (params, gt_params):
dxy, log_wh = tf.split(params, [2,2], 1)
dxy_gt, wh_gt = tf.split(gt_params, [2,2], 1)
log_wh_gt = tf.check_numerics(tf.log(wh_gt), name='log_wh_gt', message='xxx')
l1 = tf.losses.huber_loss(dxy, dxy_gt, reduction=tf.losses.Reduction.NONE, loss_collection=None)
l2 = tf.losses.huber_loss(log_wh, log_wh_gt, reduction=tf.losses.Reduction.NONE, loss_collection=None)
return tf.reduce_sum(l1+l2, axis=1)
dump_cnt = 0
class FasterRCNN (aardvark.Model2D):
def __init__ (self, min_size=1):
super().__init__()
self.gt_matcher = cpp.GTMatcher(FLAGS.match_th, FLAGS.max_masks, min_size)
self.priors = []
if os.path.exists(FLAGS.priors):
with open(FLAGS.priors, 'r') as f:
for l in f:
if l[0] == '#':
continue
s, r = l.strip().split(' ')
s, r = float(s), float(r)
# w * h = s * s
# w / h = r
w = math.sqrt(s * s * r)
h = math.sqrt(s * s / r)
self.priors.append([w, h])
pass
pass
pass
aardvark.print_red("PRIORS %s" % str(self.priors))
# TODO: need a better way to generalize this to multiple priors and 0 priors
self.n_priors = len(self.priors)
if self.n_priors == 0:
self.n_priors = 1
pass
def feed_dict (self, record, is_training = True):
global dump_cnt
_, images, _, gt_anchors, gt_anchors_weight, \
gt_params, gt_params_weight, gt_boxes = record
assert np.all(gt_anchors < 2)
gt_boxes = np.reshape(gt_boxes, [-1, 7]) # make sure shape is correct
if dump_cnt < 20:
# dump images for sanity check
for i in range(images.shape[0]):
cv2.imwrite('picpac_dump2/%d_a_image.png' % dump_cnt, images[i])
for j in range(gt_anchors.shape[3]):
cv2.imwrite('picpac_dump2/%d_b_%d_anchor.png' % (dump_cnt, j), gt_anchors[i,:,:,j]*255)
cv2.imwrite('picpac_dump2/%d_c_%d_mask.png' % (dump_cnt, j), gt_anchors_weight[i,:,:,j]*255)
cv2.imwrite('picpac_dump2/%d_d_%d_weight.png' % (dump_cnt, j), gt_params_weight[i,:,:,j]*255)
dump_cnt += 1
if len(gt_boxes.shape) > 1:
assert np.all(gt_boxes[:, 1] < FLAGS.classes)
assert np.all(gt_boxes[:, 1] > 0)
return {self.is_training: is_training,
self.images: images,
self.gt_anchors: gt_anchors,
self.gt_anchors_weight: gt_anchors_weight,
self.gt_params: gt_params,
self.gt_params_weight: gt_params_weight,
self.gt_boxes: gt_boxes}
@abstractmethod
def rpn_backbone (self, images):
pass
@abstractmethod
def rpn_activation (self, channels, stride):
pass
@abstractmethod
def rpn_parameters (self, channels, stride):
pass
def build_graph (self):
# Set up model inputs
# parameters
self.is_training = tf.placeholder(tf.bool, name="is_training")
anchor_th = tf.constant(FLAGS.anchor_th, dtype=tf.float32, name="anchor_th")
nms_max = tf.constant(FLAGS.nms_max, dtype=tf.int32, name="nms_max")
nms_th = tf.constant(FLAGS.nms_th, dtype=tf.float32, name="nms_th")
# input images
self.images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
# the reset are for training only
# whether a location should be activated
self.gt_anchors = tf.placeholder(tf.float32, shape=(None, None, None, self.n_priors))
self.gt_anchors_weight = tf.placeholder(tf.float32, shape=(None, None, None, self.n_priors))
# parameter of that location
self.gt_params = tf.placeholder(tf.float32, shape=(None, None, None, self.n_priors * 4))
self.gt_params_weight = tf.placeholder(tf.float32, shape=(None, None, None, self.n_priors))
self.gt_boxes = tf.placeholder(tf.float32, shape=(None, 7))
if len(self.priors) > 0:
priors = tf.expand_dims(tf.constant(self.priors, dtype=tf.float32), axis=0)
else:
priors = tf.constant([[[1,1]]], dtype=tf.float32)
# 1 * priors * 2
priors2 = tf.tile(priors,[1,1,2])
self.rpn_backbone(self.images)
if FLAGS.dice:
anchors = self.rpn_activation(self.n_priors, FLAGS.anchor_stride)
anchors = tf.sigmoid(anchors)
dice, dice_chs = weighted_dice_loss_by_channel(self.gt_anchors, anchors, self.gt_anchors_weight, self.n_priors)
activation_loss = tf.identity(dice, name='di')
prob = tf.reshape(anchors, (-1,))
if not FLAGS.rcnnonly:
#tf.losses.add_loss(dice * FLAGS.di_weight1)
'''
self.metrics.append(tf.identity(dice_chs[0], name='c0'))
self.metrics.append(tf.identity(dice_chs[1], name='c1'))
self.metrics.append(tf.identity(dice_chs[2], name='c2'))
'''
pass
else:
logits = self.rpn_activation(self.n_priors * 2, FLAGS.anchor_stride)
logits2 = tf.reshape(logits, (-1, 2)) # ? * 2
gt_anchors = tf.reshape(self.gt_anchors, (-1, ))
gt_anchors_weight = tf.reshape(self.gt_anchors_weight, (-1,))
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits2, labels=tf.cast(gt_anchors, dtype=tf.int32))
xe = tf.reduce_sum(xe * gt_anchors_weight) / (tf.reduce_sum(gt_anchors_weight) + 0.0001)
activation_loss = tf.identity(xe, name='xe')
prob = tf.squeeze(tf.slice(tf.nn.softmax(logits2), [0, 1], [-1, 1]), axis=1)
'''
self.metrics.append(tf.reduce_mean(anchors, name='o'))
self.metrics.append(tf.reduce_sum(self.gt_anchors * anchors, name='s1'))
self.metrics.append(tf.reduce_sum(self.gt_anchors, name='s2'))
self.metrics.append(tf.reduce_sum(anchors, name='s3'))
self.metrics.append(tf.reduce_mean(self.gt_params_weight, name='o'))
'''
params = self.rpn_parameters(4 * self.n_priors, FLAGS.anchor_stride)
anchor_layer_shape = tf.shape(params)
params = tf.reshape(params, (-1, self.n_priors, 4)) # ? * 4
gt_params = tf.reshape(self.gt_params, (-1, self.n_priors, 4))
gt_params_weight = tf.reshape(self.gt_params_weight, (-1, self.n_priors))
pl = weighted_loss_by_channel(params_loss_rpn(params, gt_params, priors2), gt_params_weight, self.n_priors)
pl = tf.check_numerics(pl, 'p1', name='p1') # params-loss
tf.identity(prob, name='rpn_all_probs')
if not FLAGS.rcnnonly:
tf.losses.add_loss(activation_loss * FLAGS.rpn_act_weight)
self.metrics.append(activation_loss)
tf.losses.add_loss(pl * FLAGS.rpn_params_weight)
self.metrics.append(pl)
#prob = tf.reshape(anchors, (-1,))
# index is index within mini batch
boxes, index = anchors2boxes(anchor_layer_shape, params, priors2, self.n_priors)
with tf.device('/cpu:0'):
# fuck tensorflow, these lines fail on GPU
# pre-filtering by threshold so we put less stress on non_max_suppression
sel = tf.greater_equal(prob, anchor_th)
# sel is a boolean mask
# select only boxes with prob > th for nms
prob = tf.boolean_mask(prob, sel)
#params = tf.boolean_mask(params, sel)
boxes = tf.boolean_mask(boxes, sel)
index = tf.boolean_mask(index, sel)
#self.metrics.append(tf.identity(tf.cast(tf.shape(boxes)[0], dtype=tf.float32), name='o'))
sel = tf.image.non_max_suppression(shift_boxes(boxes, index), prob, nms_max, iou_threshold=nms_th)
# sel is a list of indices
rpn_probs = tf.gather(prob, sel, name='rpn_probs')
rpn_boxes = tf.gather(boxes, sel, name='rpn_boxes')
rpn_index = tf.gather(index, sel, name='rpn_index')
n_hits, rpn_hits, gt_hits = tf.py_func(self.gt_matcher.apply, [rpn_boxes, rpn_index, self.gt_boxes], [tf.float32, tf.int32, tf.int32])
self.metrics.append(tf.identity(tf.cast(tf.shape(rpn_boxes)[0], dtype=tf.float32), name='n'))
self.metrics.append(tf.identity(tf.cast(n_hits, dtype=tf.float32), name='h'))
# % boxes found
precision = n_hits / (tf.cast(tf.shape(rpn_boxes)[0], tf.float32) + 0.0001);
recall = n_hits / (tf.cast(tf.shape(self.gt_boxes)[0], tf.float32) + 0.0001);
self.metrics.append(tf.identity(precision, name='p'))
self.metrics.append(tf.identity(recall, name='r'))
# setup prediction
# normalize boxes to [0-1]
boxes = normalize_boxes(tf.shape(self.images), rpn_boxes)
# we need to add extra samples from training boxes only
if False:
mask_size = FLAGS.pooling_size * 2
net = tf.image.crop_and_resize(backbone, boxes, rpn_index, [mask_size, mask_size])
net = slim.max_pool2d(net, [2,2], padding='SAME')
#
net = slim.conv2d(net, FLAGS.feature_channels, 3, 1)
net = tf.reshape(net, [-1, FLAGS.pooling_size * FLAGS.pooling_size * FLAGS.feature_channels])
net = slim.fully_connected(net, 4096)
net = slim.dropout(net, keep_prob=0.5, is_training=self.is_training)
net = slim.fully_connected(net, 4096)
net = slim.dropout(net, keep_prob=0.5, is_training=self.is_training)
logits = slim.fully_connected(net, FLAGS.classes, activation_fn=None)
params = slim.fully_connected(net, FLAGS.classes * 4, activation_fn=None)
params = tf.reshape(params, [-1, FLAGS.classes, 4])
else: # my simplified simplementation
if FLAGS.rcnnonly:
backbone = tf.stop_gradient(backbone)
mask_size = FLAGS.pooling_size * 2
net = tf.image.crop_and_resize(self.backbone, boxes, rpn_index, [mask_size, mask_size])
net = slim.conv2d(net, 256, 3, 1)
net = slim.conv2d(net, 256, 3, 1)
net = slim.max_pool2d(net, [2,2], padding='SAME')
net = slim.conv2d(net, 512, 3, 1)
net = slim.conv2d(net, 512, 3, 1)
#net = slim.conv2d(patches, 64, 3, 1)
net = tf.reduce_mean(net, [1, 2], keep_dims=False)
logits = slim.fully_connected(net, FLAGS.classes, activation_fn=None)
#net = slim.conv2d(patches, 128, 3, 1)
#net = patches
#net = tf.reduce_mean(net, [1, 2], keep_dims=False)
params = slim.fully_connected(net, FLAGS.classes * 4, activation_fn=None)
params = tf.reshape(params, [-1, FLAGS.classes, 4])
if FLAGS.classes == 2:
logits = tf.clip_by_value(logits, -10, 10)
tf.nn.softmax(logits, name='probs')
cls = tf.argmax(logits, axis=1, name='cls')
if True: # for inference stage
onehot = tf.expand_dims(tf.one_hot(tf.cast(cls, tf.int32), depth=FLAGS.classes, on_value=1.0, off_value=0.0), axis=2)
# onehot: N * C * 1
# params : N * C * 4
params_onehot = tf.reduce_sum(params * onehot, axis=1)
refined_boxes = refine_bbox(rpn_boxes, params_onehot)
tf.identity(refined_boxes, name='boxes')
rpn_boxes = tf.gather(rpn_boxes, rpn_hits)
logits = tf.gather(logits, rpn_hits)
params = tf.gather(params, rpn_hits)
'''
self.metrics.append(tf.reduce_sum(tf.nn.l2_loss(logits), name='U'))
self.metrics.append(tf.reduce_sum(tf.nn.l2_loss(params), name='V'))
'''
matched_gt_boxes = tf.gather(self.gt_boxes, gt_hits)
matched_gt_labels = tf.cast(tf.squeeze(tf.slice(matched_gt_boxes, [0, 1], [-1, 1]), axis=1), tf.int32)
matched_gt_boxes = transform_bbox(rpn_boxes, tf.slice(matched_gt_boxes, [0, 3], [-1, 4]))
onehot = tf.expand_dims(tf.one_hot(matched_gt_labels, depth=FLAGS.classes, on_value=1.0, off_value=0.0), axis=2)
params = tf.reduce_sum(params * onehot, axis=1)
xe = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=matched_gt_labels)
xe = tf.check_numerics(tf.reduce_sum(xe)/(n_hits + 0.0001), 'x2', name='x2')
if not FLAGS.rpnonly:
tf.losses.add_loss(xe * FLAGS.xe_weight2)
self.metrics.append(xe)
pl = params_loss(params, matched_gt_boxes)
pl = tf.reduce_sum(pl) / (n_hits + 0.0001)
pl = tf.check_numerics(pl, 'p2', name='p2') # params-loss
if not FLAGS.rpnonly:
tf.losses.add_loss(pl*FLAGS.pl_weight2)
self.metrics.append(pl)
pass
def extra_stream_config (self, is_training):
if len(self.priors) > 0:
aardvark.print_red('priors %s' % str(self.priors))
augments = aardvark.load_augments(is_training)
shift = 0
if is_training:
shift = FLAGS.clip_shift
return {
"annotate": [1],
"transforms":
[{"type": "resize", "max_size": FLAGS.max_size}] +
augments + [
#{"type": "clip", "round": FLAGS.backbone_stride},
{"type": "clip", "shift": shift, "width": FLAGS.fix_width, "height": FLAGS.fix_height, "round": FLAGS.clip_stride},
{"type": "anchors.dense.box", 'downsize': FLAGS.anchor_stride, 'lower_th': FLAGS.lower_th, 'upper_th': FLAGS.upper_th, 'weighted': False, 'priors': self.priors, 'params_default': 1.0},
{"type": "box_feature"},
{"type": "rasterize"},
]
}
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,648 | aaalgo/aardvark | refs/heads/master | /zoo/sss/GCN.py | import tensorflow as tf
from tensorflow.contrib import slim
import resnet_v2
import os, sys
def Upsampling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ConvUpscaleBlock(inputs, n_filters, kernel_size=[3, 3], scale=2):
"""
Basic deconv block for GCN
Apply Transposed Convolution for feature map upscaling
"""
net = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[2, 2], activation_fn=None)
return net
def BoundaryRefinementBlock(inputs, n_filters, kernel_size=[3, 3]):
"""
Boundary Refinement Block for GCN
"""
net = slim.conv2d(inputs, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters, kernel_size, activation_fn=None, normalizer_fn=None)
net = tf.add(inputs, net)
return net
def GlobalConvBlock(inputs, n_filters=21, size=3):
"""
Global Conv Block for GCN
"""
net_1 = slim.conv2d(inputs, n_filters, [size, 1], activation_fn=None, normalizer_fn=None)
net_1 = slim.conv2d(net_1, n_filters, [1, size], activation_fn=None, normalizer_fn=None)
net_2 = slim.conv2d(inputs, n_filters, [1, size], activation_fn=None, normalizer_fn=None)
net_2 = slim.conv2d(net_2, n_filters, [size, 1], activation_fn=None, normalizer_fn=None)
net = tf.add(net_1, net_2)
return net
def build_gcn(inputs, num_classes, preset_model='GCN-Res101', weight_decay=1e-5, is_training=None, upscaling_method="bilinear", pretrained_dir="models"):
"""
Builds the GCN model.
Arguments:
inputs: The input tensor
preset_model: Which model you want to use. Select which ResNet model to use for feature extraction
num_classes: Number of classes
Returns:
GCN model
"""
assert not is_training is None
if preset_model == 'GCN-Res50':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_50(inputs, is_training=is_training, scope='resnet_v2_50')
resnet_scope = 'resnet_v2_50'
# GCN requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_50.ckpt'), slim.get_model_variables('resnet_v2_50'))
elif preset_model == 'GCN-Res101':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_101(inputs, is_training=is_training, scope='resnet_v2_101')
resnet_scope = 'resnet_v2_101'
# GCN requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_101.ckpt'), slim.get_model_variables('resnet_v2_101'))
elif preset_model == 'GCN-Res152':
with slim.arg_scope(resnet_v2.resnet_arg_scope(weight_decay=weight_decay)):
logits, end_points = resnet_v2.resnet_v2_152(inputs, is_training=is_training, scope='resnet_v2_152')
resnet_scope = 'resnet_v2_152'
# GCN requires pre-trained ResNet weights
init_fn = slim.assign_from_checkpoint_fn(os.path.join(pretrained_dir, 'resnet_v2_152.ckpt'), slim.get_model_variables('resnet_v2_152'))
else:
raise ValueError("Unsupported ResNet model '%s'. This function only supports ResNet 101 and ResNet 152" % (preset_model))
res = [end_points['pool5'], end_points['pool4'],
end_points['pool3'], end_points['pool2']]
down_5 = GlobalConvBlock(res[0], n_filters=21, size=3)
down_5 = BoundaryRefinementBlock(down_5, n_filters=21, kernel_size=[3, 3])
down_5 = ConvUpscaleBlock(down_5, n_filters=21, kernel_size=[3, 3], scale=2)
down_4 = GlobalConvBlock(res[1], n_filters=21, size=3)
down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3])
down_4 = tf.add(down_4, down_5)
down_4 = BoundaryRefinementBlock(down_4, n_filters=21, kernel_size=[3, 3])
down_4 = ConvUpscaleBlock(down_4, n_filters=21, kernel_size=[3, 3], scale=2)
down_3 = GlobalConvBlock(res[2], n_filters=21, size=3)
down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3])
down_3 = tf.add(down_3, down_4)
down_3 = BoundaryRefinementBlock(down_3, n_filters=21, kernel_size=[3, 3])
down_3 = ConvUpscaleBlock(down_3, n_filters=21, kernel_size=[3, 3], scale=2)
down_2 = GlobalConvBlock(res[3], n_filters=21, size=3)
down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
down_2 = tf.add(down_2, down_3)
down_2 = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
down_2 = ConvUpscaleBlock(down_2, n_filters=21, kernel_size=[3, 3], scale=2)
net = BoundaryRefinementBlock(down_2, n_filters=21, kernel_size=[3, 3])
net = ConvUpscaleBlock(net, n_filters=21, kernel_size=[3, 3], scale=2)
net = BoundaryRefinementBlock(net, n_filters=21, kernel_size=[3, 3])
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net, init_fn
def mean_image_subtraction(inputs, means=[123.68, 116.78, 103.94]):
inputs=tf.to_float(inputs)
num_channels = inputs.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=3, num_or_size_splits=num_channels, value=inputs)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=3, values=channels)
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,649 | aaalgo/aardvark | refs/heads/master | /train-fcn-slim.py | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/slim'))
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory, resnet_utils
import aardvark
from tf_utils import *
from zoo import fuck_slim
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_string('backbone', 'resnet_v2_50', 'architecture')
flags.DEFINE_integer('backbone_stride', 16, '')
flags.DEFINE_integer('reduction', 1, '')
flags.DEFINE_integer('multistep', 0, '')
class Model (aardvark.SegmentationModel):
def __init__ (self):
super().__init__()
pass
def inference (self, images, classes, is_training):
assert FLAGS.clip_stride % FLAGS.backbone_stride == 0
backbone = aardvark.create_stock_slim_network(FLAGS.backbone, images, is_training, global_pool=False, stride=FLAGS.backbone_stride)
if FLAGS.finetune:
backbone = tf.stop_gradient(backbone)
with slim.arg_scope(aardvark.default_argscope(self.is_training)):
if FLAGS.multistep > 0:
if FLAGS.multistep == 1:
aardvark.print_red("multistep = 1 doesn't converge well")
net = slim_multistep_upscale(backbone, FLAGS.backbone_stride, FLAGS.reduction, FLAGS.multistep)
logits = slim.conv2d(net, classes, 3, 1, activation_fn=None, padding='SAME')
else:
logits = slim.conv2d_transpose(backbone, classes, FLAGS.backbone_stride * 2, FLAGS.backbone_stride, activation_fn=None, padding='SAME')
if FLAGS.finetune:
assert FLAGS.colorspace == 'RGB'
def is_trainable (x):
return not x.startswith(FLAGS.backbone)
self.init_session, self.variables_to_train = aardvark.setup_finetune(FLAGS.finetune, is_trainable)
return logits
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,650 | aaalgo/aardvark | refs/heads/master | /mura/predict14.py | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import meta_graph
import picpac
class Model:
def __init__ (self, X, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
is_training = tf.constant(False, dtype=tf.bool)
self.probs, = tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X, 'is_training:0': is_training},
return_elements=['probs:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_float('cth', 0.5, '')
flags.DEFINE_integer('channels', 1, '')
flags.DEFINE_integer('max_size', 400, '')
flags.DEFINE_integer('fix_size', 400, '')
flags.DEFINE_integer('max', 0, '')
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels))
model = Model(X, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
stream = picpac.ImageStream({'db': 'scratch/val.db',
'cache': False,
'loop': False,
'channels': FLAGS.channels,
'shuffle': False,
'batch': 1,
'raw': [1],
'colorspace': 'RGB',
'transforms': [
{"type": "resize", "max_size": FLAGS.max_size},
{"type": "clip", "width": FLAGS.fix_size, "height": FLAGS.fix_size},
]})
with tf.Session(config=config) as sess:
model.loader(sess)
lookup = {}
C = 0
for meta, batch in tqdm(stream, total=stream.size()):
path = meta.raw[0][0].decode('ascii')
probs = sess.run(model.probs, feed_dict={X: batch})
case = '/'.join(path.split('/')[:-1]) + '/'
if not case in lookup:
lookup[case] = [0, np.zeros((14, ), dtype=np.float32)]
pass
case = lookup[case]
case[0] = case[0] + 1
case[1] += probs[0]
C += 1
if FLAGS.max > 0 and C >= FLAGS.max:
break
pass
with open('predict.csv', 'w') as f, \
open('predict.csv.full', 'w') as f2:
for k, v in lookup.items():
probs = np.reshape(v[1] / v[0], (7, 2))
prob = np.sum(probs[:, 1])
if prob > 0.5:
l = 1
else:
l = 0
f.write('%s,%d\n' % (k, l))
f2.write('%s,%g\n' % (k, prob))
pass
pass
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,651 | aaalgo/aardvark | refs/heads/master | /train-fcn-unet.py | #!/usr/bin/env python3
import tensorflow as tf
import aardvark
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('re_weight', 0.0001, 'regularization weight')
class Model (aardvark.SegmentationModel):
def __init__ (self):
super().__init__()
pass
def inference (self, images, classes, is_training):
self.backbone, backbone_stride = myunet(self.images-127.0, self.is_training)
assert FLAGS.clip_stride % backbone_stride == 0
return tf.layers.conv2d_transpose(self.backbone, classes, 3, 1, activation=None, padding='SAME')
pass
def myunet (X, is_training):
BN = True
net = X
stack = []
with tf.name_scope('myunet'):
regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.re_weight)
def conv2d (input, channels, filter_size, stride):
if BN:
input = tf.layers.conv2d(input, channels, filter_size, stride, padding='SAME', activation=None, kernel_regularizer=regularizer)
input = tf.layers.batch_normalization(input, training=is_training)
return tf.nn.relu(input)
return tf.layers.conv2d(input, channels, filter_size, stride, padding='SAME', kernel_regularizer=regularizer, activation=tf.nn.relu)
def max_pool2d (input, filter_size, stride):
return tf.layers.max_pooling2d(input, filter_size, stride, padding='SAME')
def conv2d_transpose (input, channels, filter_size, stride):
if BN:
input = tf.layers.conv2d_transpose(input, channels, filter_size, stride, padding='SAME', activation=None, kernel_regularizer=regularizer)
input = tf.layers.batch_normalization(input, training=is_training)
return tf.nn.relu(input)
return tf.layers.conv2d_transpose(input, channels, filter_size, stride, padding='SAME', kernel_regularizer=regularizer, activation=tf.nn.relu)
net = conv2d(net, 32, 3, 2)
net = conv2d(net, 32, 3, 1)
stack.append(net) # 1/2
net = conv2d(net, 64, 3, 1)
net = conv2d(net, 64, 3, 1)
net = max_pool2d(net, 2, 2)
stack.append(net) # 1/4
net = conv2d(net, 128, 3, 1)
net = conv2d(net, 128, 3, 1)
net = max_pool2d(net, 2, 2)
stack.append(net) # 1/8
net = conv2d(net, 256, 3, 1)
net = conv2d(net, 256, 3, 1)
net = max_pool2d(net, 2, 2)
# 1/16
net = conv2d(net, 256, 3, 1)
net = conv2d(net, 256, 3, 1)
net = conv2d_transpose(net, 128, 5, 2)
# 1/8
net = tf.concat([net, stack.pop()], 3)
net = conv2d_transpose(net, 64, 5, 2)
# 1/4
net = tf.concat([net, stack.pop()], 3)
net = conv2d_transpose(net, 32, 5, 2)
net = tf.concat([net, stack.pop()], 3)
net = conv2d_transpose(net, 16, 5, 2)
assert len(stack) == 0
return net, 16
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,652 | aaalgo/aardvark | refs/heads/master | /gate/predict_gate.py | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time
from tqdm import tqdm
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import meta_graph
import picpac
from gallery import Gallery
import cv2
from glob import glob
class Model:
def __init__ (self, X, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
is_training = tf.constant(False, dtype=tf.bool)
self.probs, = tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X, 'is_training:0': is_training},
return_elements=['probs:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_string('image', None, '')
flags.DEFINE_float('cth', 0.5, '')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_integer('max_size', 400, '')
flags.DEFINE_integer('fix_width', 200, '')
flags.DEFINE_integer('fix_height', 112, '')
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels))
model = Model(X, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
model.loader(sess)
gal_0 = Gallery('gallery_0',ext='png')
gal_1 = Gallery('gallery_1',ext='png')
gal_2 = Gallery('gallery_2',ext='png')
for img in glob(os.path.join(FLAGS.image,"*/*.jpg")):
filename = img.split("/")[-1]
image = cv2.imread(img, cv2.IMREAD_COLOR)
batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
probs = sess.run(model.probs, feed_dict={X: batch})
cls = np.argmax(probs[0])
if cls == 0:
cv2.imwrite(gal_0.next(filename=filename),image)
if cls == 1:
cv2.imwrite(gal_1.next(filename=filename),image)
if cls == 2:
cv2.imwrite(gal_2.next(filename=filename),image)
'''
if cls == 1:
cv2.imwrite('gallery_1/'+filename,image)
gal_1.next(filename=filename)
if cls == 2:
cv2.imwrite('gallery_2/'+filename,image)
gal_2.next(filename=filename)
'''
gal_0.flush()
gal_1.flush()
gal_2.flush()
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,653 | aaalgo/aardvark | refs/heads/master | /cxray/predict-cls-vis.py | #!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sys.path.append('..')
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.python.framework import meta_graph
from mold import Scaling
from gallery import Gallery
from chest import *
class Model:
def __init__ (self, X, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
is_training = tf.constant(False)
self.probs, self.heatmap = \
tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X, 'is_training:0': is_training},
return_elements=['probs:0', 'heatmap:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_integer('stride', 16, '')
flags.DEFINE_integer('channels', 1, '')
flags.DEFINE_string('list', 'scratch/val-nz.list', '')
flags.DEFINE_integer('max', 10, '')
flags.DEFINE_integer('resize', 256, '')
def save_prediction_image (gal, image, label, probs, heatmap):
pred = np.argmax(probs)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR).astype(np.float32)
orig = np.copy(image)
# ground truth
cv2.putText(image, 'gt %d: %.3f %s' % (label, probs[label], CATEGORIES[label][1]), (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
cv2.putText(image, 'inf %d: %.3f %s' % (pred, probs[pred], CATEGORIES[pred][1]), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
image[:, :, 1] += heatmap[:, :, label] * 128
image[:, :, 2] += heatmap[:, :, pred] * 128
image = np.concatenate([image, orig], axis=1)
cv2.imwrite(gal.next(), np.clip(image, 0, 255))
pass
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
model = Model(X, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
mold = Scaling(stride = FLAGS.stride)
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.loader(sess)
gal = Gallery('output', ext='.png')
CC = 0
if FLAGS.list:
with open(FLAGS.list, 'r') as f:
for line in f:
if CC > FLAGS.max:
break
path, label = line.strip().split(',')
label = int(label)
print(path)
if FLAGS.channels == 3:
image = cv2.imread(path, cv2.IMREAD_COLOR)
elif FLAGS.channels == 1:
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
else:
assert False
image = cv2.resize(image, (FLAGS.resize, FLAGS.resize))
probs, heatmap = sess.run([model.probs, model.heatmap], feed_dict={X: mold.batch_image(image)})
probs = probs[0]
heatmap = mold.unbatch_prob(image, heatmap)
'''END INFERENCE'''
save_prediction_image(gal, image, label, probs, heatmap)
CC += 1
gal.flush()
pass
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,654 | aaalgo/aardvark | refs/heads/master | /zoo/cls_nets.py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import resnet_v2
#import resnet_v2
def resnet_v2_18_impl (inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
reuse=None,
include_root_block=True,
scope='resnet_v2_18'):
resnet_v2_block = resnet_v2.resnet_v2_block
blocks = [
resnet_v2_block('block1', base_depth=64, num_units=2, stride=2),
resnet_v2_block('block2', base_depth=128, num_units=2, stride=2),
resnet_v2_block('block3', base_depth=256, num_units=2, stride=2),
resnet_v2_block('block4', base_depth=512, num_units=2, stride=1),
]
return resnet_v2.resnet_v2(
inputs,
blocks,
num_classes,
is_training,
global_pool,
output_stride,
include_root_block=include_root_block,
reuse=reuse,
scope=scope)
def resnet_18_cifar (inputs, is_training, num_classes):
logits, _ = resnet_v2_18_impl(inputs, num_classes=num_classes, is_training=is_training, include_root_block=False)
logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the
return tf.identity(logits, name='logits')
def resnet_18 (inputs, is_training, num_classes):
logits, _ = resnet_v2_18_impl(inputs, num_classes=num_classes, is_training=is_training)
logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the
return tf.identity(logits, name='logits')
def resnet_50 (inputs, is_training, num_classes):
logits, _ = resnet_v2.resnet_v2_50(inputs, num_classes=num_classes, is_training=is_training)
logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the
return tf.identity(logits, name='logits')
def resnet_101 (inputs, is_training, num_classes):
logits, _ = resnet_v2.resnet_v2_101(inputs, num_classes)
logits = tf.squeeze(logits, [1,2]) # resnet output is (N,1,1,C, remove the
return tf.identity(logits, name='logits')
'''
def inception (inputs, num_classes):
from tensorflow.contrib.slim.nets import inception_v2
logits, _ = inception_v2.inception_v2(inputs, num_classes)
return tf.identity(logits, name='logits')
'''
def vgg16 (inputs, is_training, num_classes):
from tensorflow.contrib.slim.nets import vgg
print(vgg.__file__)
logits, _ = vgg.vgg_d(inputs, num_classes)
return tf.identity(logits, name='logits')
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,655 | aaalgo/aardvark | refs/heads/master | /zoo/sss/FRRN.py | import tensorflow as tf
from tensorflow.contrib import slim
import resnet_v1
def Upsampling(inputs,scale):
return tf.image.resize_nearest_neighbor(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def Unpooling(inputs,scale):
return tf.image.resize_bilinear(inputs, size=[tf.shape(inputs)[1]*scale, tf.shape(inputs)[2]*scale])
def ResidualUnit(inputs, n_filters=48, filter_size=3):
"""
A local residual unit
Arguments:
inputs: The input tensor
n_filters: Number of output feature maps for each conv
filter_size: Size of convolution kernel
Returns:
Output of local residual block
"""
net = slim.conv2d(inputs, n_filters, filter_size, activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters, filter_size, activation_fn=None)
net = slim.batch_norm(net, fused=True)
return net
def FullResolutionResidualUnit(pool_stream, res_stream, n_filters_3, n_filters_1, pool_scale):
"""
A full resolution residual unit
Arguments:
pool_stream: The inputs from the pooling stream
res_stream: The inputs from the residual stream
n_filters_3: Number of output feature maps for each 3x3 conv
n_filters_1: Number of output feature maps for each 1x1 conv
pool_scale: scale of the pooling layer i.e window size and stride
Returns:
Output of full resolution residual block
"""
G = tf.concat([pool_stream, slim.pool(res_stream, [pool_scale, pool_scale], stride=[pool_scale, pool_scale], pooling_type='MAX')], axis=-1)
net = slim.conv2d(G, n_filters_3, kernel_size=3, activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = slim.conv2d(net, n_filters_3, kernel_size=3, activation_fn=None)
net = slim.batch_norm(net, fused=True)
pool_stream_out = tf.nn.relu(net)
net = slim.conv2d(pool_stream_out, n_filters_1, kernel_size=1, activation_fn=None)
net = Upsampling(net, scale=pool_scale)
res_stream_out = tf.add(res_stream, net)
return pool_stream_out, res_stream_out
def build_frrn(inputs, num_classes, preset_model='FRRN-A'):
"""
Builds the Full Resolution Residual Network model.
Arguments:
inputs: The input tensor
preset_model: Which model you want to use. Select FRRN-A or FRRN-B
num_classes: Number of classes
Returns:
FRRN model
"""
if preset_model == 'FRRN-A':
#####################
# Initial Stage
#####################
net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
#####################
# Downsampling Path
#####################
pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
#####################
# Upsampling Path
#####################
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream = Unpooling(pool_stream, 2)
#####################
# Final Stage
#####################
net = tf.concat([pool_stream, res_stream], axis=-1)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net
elif preset_model == 'FRRN-B':
#####################
# Initial Stage
#####################
net = slim.conv2d(inputs, 48, kernel_size=5, activation_fn=None)
net = slim.batch_norm(net, fused=True)
net = tf.nn.relu(net)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
#####################
# Downsampling Path
#####################
pool_stream = slim.pool(net, [2, 2], stride=[2, 2], pooling_type='MAX')
res_stream = slim.conv2d(net, 32, kernel_size=1, activation_fn=None)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=8)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=16)
pool_stream = slim.pool(pool_stream, [2, 2], stride=[2, 2], pooling_type='MAX')
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=384, n_filters_1=32, pool_scale=32)
#####################
# Upsampling Path
#####################
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=17)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=16)
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=8)
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=192, n_filters_1=32, pool_scale=4)
pool_stream = Unpooling(pool_stream, 2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream, res_stream = FullResolutionResidualUnit(pool_stream=pool_stream, res_stream=res_stream, n_filters_3=96, n_filters_1=32, pool_scale=2)
pool_stream = Unpooling(pool_stream, 2)
#####################
# Final Stage
#####################
net = tf.concat([pool_stream, res_stream], axis=-1)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = ResidualUnit(net, n_filters=48, filter_size=3)
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, scope='logits')
return net
else:
raise ValueError("Unsupported FRRN model '%s'. This function only supports FRRN-A and FRRN-B" % (preset_model))
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,656 | aaalgo/aardvark | refs/heads/master | /predict-fcn.py | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.python.framework import meta_graph
import picpac
from gallery import Gallery
class Model:
def __init__ (self, X, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
is_training = tf.constant(False)
self.prob, = \
tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X, 'is_training:0': is_training},
return_elements=['prob:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_integer('clip_stride', 16, '')
flags.DEFINE_integer('max_size', 2000, '')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_string('colorspace', 'RGB', '')
flags.DEFINE_string('db', None, '')
flags.DEFINE_string('list', None, '')
flags.DEFINE_integer('max', 50, '')
def save_prediction_image (gal, image, prob):
cv2.imwrite(gal.next(), image)
label = np.copy(image).astype(np.float32)
label *= 0
label[:, :, 0] += prob[:, :] * 255
cv2.imwrite(gal.next(), np.clip(label, 0, 255))
pass
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
model = Model(X, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
model.loader(sess)
gal = Gallery('output', cols=2, ext='.jpg')
CC = 0
if FLAGS.list:
with open(FLAGS.list, 'r') as f:
for path in f:
if CC > FLAGS.max:
break
path = path.strip()
print(path)
if FLAGS.channels == 3:
image = cv2.imread(path, cv2.IMREAD_COLOR)
elif FLAGS.channels == 1:
image = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
image = np.expand_dims(image, axis=3)
else:
assert False
H, W = image.shape[:2]
if max(H, W) > FLAGS.max_size:
f = FLAGS.max_size / max(H, W)
image = cv2.resize(image, None, fx=f, fy=f)
H, W = image.shape[:2]
'''BEGIN INFERENCE'''
# clip edge
H = H // FLAGS.clip_stride * FLAGS.clip_stride
W = W // FLAGS.clip_stride * FLAGS.clip_stride
image = image[:H, :W].astype(np.float32)
# change from BGR to RGB
if FLAGS.channels == 3 and FLAGS.colorspace == 'RGB':
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
batch = np.expand_dims(image_rgb, axis=0)
else:
batch = np.expand_dims(image, axis=0)
prob = sess.run(model.prob, feed_dict={X: batch})
'''END INFERENCE'''
save_prediction_image(gal, image, prob[0])
CC += 1
if FLAGS.db:
stream = picpac.ImageStream({'db': FLAGS.db, 'loop': False, 'channels': FLAGS.channels, 'colorspace': FLAGS.colorspace, 'threads': 1, 'shuffle': False,
'transforms': [{"type": "resize", "max_size": FLAGS.max_size},
{"type": "clip", "round": FLAGS.clip_stride}]})
for meta, batch in stream:
if CC > FLAGS.max:
break
print(meta.ids)
image = batch[0]
if FLAGS.channels == 3 and FLAGS.colorspace == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
prob = sess.run(model.prob, feed_dict={X: batch})
'''END INFERENCE'''
save_prediction_image(gal, image, prob[0])
CC += 1
gal.flush()
pass
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,657 | aaalgo/aardvark | refs/heads/master | /predict-frcnn.py | #!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import time
from tqdm import tqdm
import numpy as np
import cv2
from skimage import measure
# RESNET: import these for slim version of resnet
import tensorflow as tf
from tensorflow.python.framework import meta_graph
class Model:
def __init__ (self, X, anchor_th, nms_max, nms_th, is_training, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
self.predictions = tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X,
'anchor_th:0': anchor_th,
'nms_max:0': nms_max,
'nms_th:0': nms_th,
'is_training:0': is_training,
},
return_elements=['rpn_probs:0', 'rpn_shapes:0', 'rpn_index:0', 'boxes:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_string('input', None, '')
flags.DEFINE_string('input_db', None, '')
flags.DEFINE_integer('stride', 16, '')
flags.DEFINE_float('anchor_th', 0.5, '')
flags.DEFINE_integer('nms_max', 100, '')
flags.DEFINE_float('nms_th', 0.2, '')
flags.DEFINE_float('max', None, 'max images from db')
def save_prediction_image (path, image, preds):
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
rpn_probs, rpn_boxes, rpn_index, boxes = preds
assert np.all(rpn_index == 0)
rpn_boxes = np.round(rpn_boxes).astype(np.int32)
for i in range(rpn_boxes.shape[0]):
x1, y1, x2, y2 = rpn_boxes[i]
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 0, 255))
for i in range(boxes.shape[0]):
x1, y1, x2, y2 = boxes[i]
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))
#boxes = np.round(boxes).astype(np.int32)
#for i in range(boxes.shape[0]):
# x1, y1, x2, y2 = boxes[i]
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0))
cv2.imwrite(path, image)
pass
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, 3), name="images")
is_training = tf.constant(False, name="is_training")
anchor_th = tf.constant(FLAGS.anchor_th, tf.float32)
nms_max = tf.constant(FLAGS.nms_max, tf.int32)
nms_th = tf.constant(FLAGS.nms_th, tf.float32)
model = Model(X, anchor_th, nms_max, nms_th, is_training, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
model.loader(sess)
if FLAGS.input:
assert os.path.exists(FLAGS.input)
image = cv2.imread(FLAGS.input, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
preds = sess.run(model.predictions, feed_dict={X: batch})
save_prediction_image(FLAGS.input + '.prob.png', image, preds)
if FLAGS.input_db:
assert os.path.exists(FLAGS.input_db)
import picpac
from gallery import Gallery
picpac_config = {"db": FLAGS.input_db,
"loop": False,
"shuffle": False,
"reshuffle": False,
"annotate": False,
"channels": 3,
"stratify": False,
"dtype": "float32",
"colorspace": "RGB",
"batch": 1,
"transforms": []
}
stream = picpac.ImageStream(picpac_config)
gal = Gallery('output')
C = 0
for _, images in stream:
preds = sess.run(model.predictions, feed_dict={X: images, is_training: False})
save_prediction_image(gal.next(), images[0], preds)
C += 1
if FLAGS.max and C >= FLAGS.max:
break
pass
pass
gal.flush()
pass
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,658 | aaalgo/aardvark | refs/heads/master | /zoo/resnet.py | from tensorflow import variable_scope
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers import conv2d, max_pool2d, flatten, fully_connected, batch_norm
# https://arxiv.org/pdf/1605.07146.pdf
# https://arxiv.org/abs/1603.05027
def original_conv2d (net, depth, filter_size):
return conv2d(net, depth, filter_size, normalizer_fn=batch_norm)
def rewired_conv2d (net, depth, filter_size):
# https://arxiv.org/abs/1603.05027
# original: conv-BN-ReLU,
# changed here to: BN-ReLU-conv
net = batch_norm(net)
net = tf.nn.relu(net)
net = tf.conv2d(net, depth, filter_size, normalizer_fn=None, activation_fn=None)
return net
myconv2d = rewired_conv2d
def block_basic (net):
depth = tf.shape(net)[3]
branch = net
branch = myconv2d(branch, depth, 3)
branch = myconv2d(branch, depth, 3)
return net + branch
def block_bottleneck (net):
depth = tf.shape(net)[3]
branch = net
branch = myconv2d(branch, depth, 1, normalizer_fn=batch_norm)
branch = myconv2d(branch, depth, 3, normalizer_fn=batch_norm)
branch = myconv2d(branch, depth, 1, normalizer_fn=batch_norm)
return net + branch
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,659 | aaalgo/aardvark | refs/heads/master | /kitti2d/import.py | #!/usr/bin/env python3
import picpac
from tqdm import tqdm
import simplejson as json
from kitti import *
json.encoder.FLOAT_REPR = lambda f: ("%.4f" % f)
json.encoder.c_make_encoder = None
def read_list (path):
nums = [] # read bname list
with open(path, 'r') as f:
for l in f:
nums.append(int(l.strip()))
pass
pass
return nums
def load_file (path):
with open(path, 'rb') as f:
return f.read()
def import_db (db_path, list_path):
db = picpac.Writer(db_path, picpac.OVERWRITE)
tasks = read_list(list_path)
for number in tqdm(tasks):
image_path = os.path.join('data/training/image_2', '%06d.png' % number)
label_path = os.path.join('data/training/label_2', '%06d.txt' % number)
image = cv2.imread(image_path, -1)
label = load_label(label_path)
H, W = image.shape[:2]
shapes = []
for obj in label:
if obj.cat != 'Car':
continue
#print(obj.bbox)
x1, y1, x2, y2 = obj.bbox
x = x1 / W
y = y1 / H
w = (x2 - x1)/ W
h = (y2 - y1)/ H
shapes.append({'type': 'rect', 'geometry': {'x': x, 'y': y, 'width': w, 'height': h}})
anno = {'shapes': shapes, 'number': number}
anno_buf = json.dumps(anno).encode('ascii')
#print(anno_buf)
db.append(0, load_file(image_path), anno_buf)
pass
import_db('scratch/train.db', 'train.txt')
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,660 | aaalgo/aardvark | refs/heads/master | /mold.py | import numpy as np
import cv2
class Padding:
def __init__ (self, stride):
self.stride = stride
pass
def batch_image (self, image):
# convert image into batch, with proper stride
h, w = image.shape[:2]
H = (h + self.stride - 1) // self.stride * self.stride
W = (w + self.stride - 1) // self.stride * self.stride
if len(image.shape) == 3:
C = image.shape[2]
batch = np.zeros((1, H, W, C), dtype=np.float32)
batch[0, :h, :w, :] = image
elif len(image.shape) == 2:
batch = np.zeros((1, H, W, 1), dtype=np.float32)
batch[0, :h, :w, 0] = image
else:
assert False
return batch
def unbatch_prob (self, image, prob_batch):
# extract prob from a batch, image is only used for size
h, w = image.shape[:2]
assert prob_batch.shape[0] == 1
return prob_batch[0, :h, :w]
pass
class Scaling:
def __init__ (self, stride, ratio=1.0, fixed = None):
self.stride = stride
self.fixed = None
self.ratio = ratio
pass
def batch_image (self, image):
# convert image into batch, with proper stride
h, w = image.shape[:2]
H = (int(round(h * self.ratio)) + self.stride - 1) // self.stride * self.stride
W = (int(round(w * self.ratio)) + self.stride - 1) // self.stride * self.stride
if not self.fixed is None:
H = self.fixed
W = self.fixed
if len(image.shape) == 3:
C = image.shape[2]
batch = np.zeros((1, H, W, C), dtype=np.float32)
batch[0, :, :, :] = cv2.resize(image, (W, H))
elif len(image.shape) == 2:
batch = np.zeros((1, H, W, 1), dtype=np.float32)
batch[0, :, :, 0] = cv2.resize(image, (W, H))
else:
assert False
return batch
def unbatch_prob (self, image, prob_batch):
# extract prob from a batch, image is only used for size
h, w = image.shape[:2]
return cv2.resize(prob_batch[0], (w, h))
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,661 | aaalgo/aardvark | refs/heads/master | /train-fcn-selim.py | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/dsb_selim'))
import numpy as np
import tensorflow as tf
import keras
from keras.optimizers import Adam
from keras.callbacks import LambdaCallback
import aardvark
from models.model_factory import make_model
flags = tf.app.flags
flags.DEFINE_string('net', 'resnet50_2', 'architecture')
FLAGS = flags.FLAGS
def acc (a, b): # just for shorter name
return keras.metrics.sparse_categorical_accuracy(a, b)
def prep (record):
meta, images, labels = record
return images, labels
def build_model ():
assert FLAGS.fix_width > 0
assert FLAGS.fix_height > 0
model = make_model(FLAGS.net, [FLAGS.fix_height, FLAGS.fix_width, FLAGS.channels])
model.compile(optimizer=Adam(lr=0.0001),
loss='sparse_categorical_crossentropy',
metrics=[acc])
return model
def main (_):
from keras.backend import set_image_data_format
from keras.backend.tensorflow_backend import set_session
set_image_data_format('channels_last')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
set_session(tf.Session(config=config))
model = build_model()
sm = aardvark.SegmentationModel()
train_stream = sm.create_stream(FLAGS.db, True)
val_stream = sm.create_stream(FLAGS.val_db, False)
# we neet to reset val_stream
callbacks = [keras.callbacks.LambdaCallback(on_epoch_end=lambda epoch, logs: val_stream.reset()),
keras.callbacks.ModelCheckpoint('%s.{epoch:03d}-{val_loss:.2f}.hdf5' % FLAGS.model, period=FLAGS.ckpt_epochs),
]
hist = model.fit_generator(map(prep, train_stream),
steps_per_epoch=train_stream.size()//FLAGS.batch,
epochs=FLAGS.max_epochs,
validation_data=map(prep, val_stream),
validation_steps=val_stream.size()//FLAGS.batch,
callbacks=callbacks)
model.save_weights(FLAGS.model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,662 | aaalgo/aardvark | refs/heads/master | /train-cls-slim.py | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/slim'))
import tensorflow as tf
import tensorflow.contrib.slim as slim
from nets import nets_factory
import aardvark
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('finetune', None, '')
flags.DEFINE_string('net', 'resnet_v2_50', 'architecture')
class Model (aardvark.ClassificationModel):
def __init__ (self):
super().__init__()
pass
def inference (self, images, classes, is_training):
logits = aardvark.create_stock_slim_network(FLAGS.net, images, is_training, num_classes=classes, global_pool=True)
if FLAGS.finetune:
assert FLAGS.colorspace == 'RGB'
self.init_session, self.variables_to_train = aardvark.setup_finetune(FLAGS.finetune, lambda x: 'logits' in x)
return logits
pass
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,663 | aaalgo/aardvark | refs/heads/master | /gallery.py | #!/usr/bin/env python3
import os
from jinja2 import Environment, FileSystemLoader
TMPL_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'./templates')
env = Environment(loader=FileSystemLoader(searchpath=TMPL_DIR))
tmpl = env.get_template('gallery.html')
class Gallery:
def __init__ (self, path, cols = 1, header = None, ext = '.png'):
self.next_id = 0
self.path = path
self.cols = cols
self.header = header
self.ext = ext
self.images = []
try:
if path != '.':
os.makedirs(path)
except:
pass
pass
def text (self, tt, br = False):
self.images.append({
'text': tt})
if br:
for i in range(1, self.cols):
self.images.append({
'text': ''})
pass
def next (self, text=None, link=None, ext=None, path=None):
if ext is None:
ext = self.ext
if path is None:
path = '%03d%s' % (self.next_id, ext)
self.images.append({
'image': path,
'text': text,
'link': link})
self.next_id += 1
return os.path.join(self.path, path)
def flush (self):
with open(os.path.join(self.path, 'index.html'), 'w') as f:
images = [self.images[i:i+self.cols] for i in range(0, len(self.images), self.cols)]
f.write(tmpl.render(images=images, header=self.header))
pass
pass
if __name__ == '__main__':
import argparse
from glob import glob
parser = argparse.ArgumentParser()
parser.add_argument("--ext", default='.jpg')
args = parser.parse_args()
gal = Gallery('.')
for path in glob('*' + args.ext):
print(path)
gal.next(path=path)
pass
gal.flush()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,664 | aaalgo/aardvark | refs/heads/master | /train-fcn-sss.py | #!/usr/bin/env python3
import os
import sys
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'zoo/sss'))
import tensorflow as tf
import aardvark
from FC_DenseNet_Tiramisu import build_fc_densenet
from Encoder_Decoder import build_encoder_decoder
from RefineNet import build_refinenet
from FRRN import build_frrn
from MobileUNet import build_mobile_unet
from PSPNet import build_pspnet
from GCN import build_gcn
from DeepLabV3 import build_deeplabv3
from DeepLabV3_plus import build_deeplabv3_plus
from AdapNet import build_adaptnet
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('net', 'MobileUNet', 'architecture')
class Model (aardvark.SegmentationModel):
def __init__ (self):
super().__init__()
pass
def init_session (self, sess):
if not self.init_fn is None:
self.init_fn(sess)
pass
def inference (self, net_input, num_classes, is_training):
if FLAGS.patch_slim:
fuck_slim.patch(is_training)
network = None
init_fn = None
if FLAGS.net == "FC-DenseNet56" or FLAGS.net == "FC-DenseNet67" or FLAGS.net == "FC-DenseNet103":
with slim.arg_scope(aardvark.default_argscope(is_training)):
network = build_fc_densenet(net_input, preset_model = FLAGS.net, num_classes=num_classes)
elif FLAGS.net == "RefineNet-Res50" or FLAGS.net == "RefineNet-Res101" or FLAGS.net == "RefineNet-Res152":
with slim.arg_scope(aardvark.default_argscope(is_training)):
# RefineNet requires pre-trained ResNet weights
network, init_fn = build_refinenet(net_input, preset_model = FLAGS.net, num_classes=num_classes, is_training=is_training)
elif FLAGS.net == "FRRN-A" or FLAGS.net == "FRRN-B":
with slim.arg_scope(aardvark.default_argscope(is_training)):
network = build_frrn(net_input, preset_model = FLAGS.net, num_classes=num_classes)
elif FLAGS.net == "Encoder-Decoder" or FLAGS.net == "Encoder-Decoder-Skip":
with slim.arg_scope(aardvark.default_argscope(is_training)):
network = build_encoder_decoder(net_input, preset_model = FLAGS.net, num_classes=num_classes)
elif FLAGS.net == "MobileUNet" or FLAGS.net == "MobileUNet-Skip":
with slim.arg_scope(aardvark.default_argscope(is_training)):
network = build_mobile_unet(net_input, preset_model = FLAGS.net, num_classes=num_classes)
elif FLAGS.net == "PSPNet-Res50" or FLAGS.net == "PSPNet-Res101" or FLAGS.net == "PSPNet-Res152":
with slim.arg_scope(aardvark.default_argscope(is_training)):
# Image size is required for PSPNet
# PSPNet requires pre-trained ResNet weights
network, init_fn = build_pspnet(net_input, label_size=[args.crop_height, args.crop_width], preset_model = FLAGS.net, num_classes=num_classes, is_training=is_training)
elif FLAGS.net == "GCN-Res50" or FLAGS.net == "GCN-Res101" or FLAGS.net == "GCN-Res152":
with slim.arg_scope(aardvark.default_argscope(is_training)):
# GCN requires pre-trained ResNet weights
network, init_fn = build_gcn(net_input, preset_model = FLAGS.net, num_classes=num_classes, is_training=is_training)
elif FLAGS.net == "DeepLabV3-Res50" or FLAGS.net == "DeepLabV3-Res101" or FLAGS.net == "DeepLabV3-Res152":
with slim.arg_scope(aardvark.default_argscope(is_training)):
# DeepLabV requires pre-trained ResNet weights
network, init_fn = build_deeplabv3(net_input, preset_model = FLAGS.net, num_classes=num_classes, is_training=is_training)
elif FLAGS.net == "DeepLabV3_plus-Res50" or FLAGS.net == "DeepLabV3_plus-Res101" or FLAGS.net == "DeepLabV3_plus-Res152":
# DeepLabV3+ requires pre-trained ResNet weights
with slim.arg_scope(aardvark.default_argscope(is_training)):
network, init_fn = build_deeplabv3_plus(net_input, preset_model = FLAGS.net, num_classes=num_classes, is_training=is_training)
elif FLAGS.net == "AdapNet":
with slim.arg_scope(aardvark.default_argscope(is_training)):
network = build_adaptnet(net_input, num_classes=num_classes)
else:
raise ValueError("Error: the model %d is not available. Try checking which models are available using the command python main.py --help")
self.init_fn = init_fn
return network
def main (_):
model = Model()
aardvark.train(model)
pass
if __name__ == '__main__':
try:
tf.app.run()
except KeyboardInterrupt:
pass
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,665 | aaalgo/aardvark | refs/heads/master | /zoo/dsb_selim/models/model_factory.py | from models.unets import resnet152_fpn, resnet101_fpn, resnet50_fpn, xception_fpn, densenet_fpn, inception_resnet_v2_fpn
def make_model(network, input_shape):
if network == 'resnet101_softmax':
return resnet101_fpn(input_shape,channels=3, activation="softmax")
elif network == 'resnet152_2':
return resnet152_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'resnet101_2':
return resnet101_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'resnet50_2':
return resnet50_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'resnetv2':
return inception_resnet_v2_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'resnetv2_3':
return inception_resnet_v2_fpn(input_shape, channels=3, activation="sigmoid")
elif network == 'densenet169':
return densenet_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'densenet169_softmax':
return densenet_fpn(input_shape, channels=3, activation="softmax")
elif network == 'resnet101_unet_2':
return resnet101_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'xception_fpn':
return xception_fpn(input_shape, channels=2, activation="sigmoid")
elif network == 'resnet50_2':
return resnet50_fpn(input_shape, channels=2, activation="sigmoid")
else:
raise ValueError('unknown network ' + network) | {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,666 | aaalgo/aardvark | refs/heads/master | /predict-basic-keypoints.py | #!/usr/bin/env python3
import os
import sys
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), 'build/lib.linux-x86_64-3.5'))
import time
from tqdm import tqdm
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.python.framework import meta_graph
import picpac, cpp
class Model:
def __init__ (self, X, is_training, path, name):
mg = meta_graph.read_meta_graph_file(path + '.meta')
self.prob, self.offsets = tf.import_graph_def(mg.graph_def, name=name,
input_map={'images:0': X,
'is_training:0': is_training},
return_elements=['prob:0', 'offsets:0'])
self.saver = tf.train.Saver(saver_def=mg.saver_def, name=name)
self.loader = lambda sess: self.saver.restore(sess, path)
pass
pass
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('model', None, '')
flags.DEFINE_integer('channels', 3, '')
flags.DEFINE_string('input', None, '')
flags.DEFINE_string('input_db', None, '')
flags.DEFINE_integer('stride', 4, '')
flags.DEFINE_integer('backbone_stride', 16, '')
flags.DEFINE_integer('max', 50, '')
flags.DEFINE_integer('max_size', 20000, '')
flags.DEFINE_float('anchor_th', 0.5, '')
def save_prediction_image (path, image, kp, mask, prob):
if image.shape[2] == 1:
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
H, W = image.shape[:2]
blue = image[:, :, 0]
blue += 255 * cv2.resize(prob, (W, H))
red = image[:, :, 2]
mask = mask > 0
red[mask] *= 0.5
red[mask] += 127
for x, y, c, score in kp:
#if score < 5:
# continue
cv2.circle(image, (x, y), 3, (0,255,0), 2)
cv2.putText(image, '%.4f'%score, (x,y+40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0), 1)
cv2.imwrite(path, np.clip(image, 0, 255))
pass
def main (_):
X = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
is_training = tf.placeholder(tf.bool, name="is_training")
model = Model(X, is_training, FLAGS.model, 'xxx')
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
with tf.Session(config=config) as sess:
model.loader(sess)
if FLAGS.input:
assert False
'''
assert os.path.exists(FLAGS.input)
image = cv2.imread(FLAGS.input, cv2.IMREAD_COLOR)
batch = np.expand_dims(image, axis=0).astype(dtype=np.float32)
boxes, probs = sess.run([model.boxes, model.probs], feed_dict={X: batch, is_training: False})
save_prediction_image(FLAGS.input + '.prob.png', image, boxes, probs)
'''
if FLAGS.input_db:
assert os.path.exists(FLAGS.input_db)
from gallery import Gallery
picpac_config = {"db": FLAGS.input_db,
"loop": False,
"shuffle": False,
"reshuffle": False,
"annotate": False,
"channels": FLAGS.channels,
"colorspace": "RGB",
"stratify": False,
"dtype": "float32",
"batch": 1,
"annotate": [1],
"transforms": [
{"type": "resize", "max_size": FLAGS.max_size},
{"type": "clip", "round": FLAGS.backbone_stride},
{"type": "keypoints.basic", 'downsize': 1, 'classes': 1, 'radius': 25},
{"type": "drop"}, # remove original annotation
]
}
stream = picpac.ImageStream(picpac_config)
gal = Gallery('out')
C = 0
for meta, images, _, label, _ in stream:
shape = list(images.shape)
shape[1] //= FLAGS.stride
shape[2] //= FLAGS.stride
shape[3] = 1
prob, offsets = sess.run([model.prob, model.offsets], feed_dict={X: images, is_training: False})
kp = cpp.predict_basic_keypoints(prob[0], offsets[0], FLAGS.stride, 0.1)
print(images.shape, prob.shape, offsets.shape, kp)
save_prediction_image(gal.next(), images[0], kp, label[0, :, :, 0], prob[0, :, :, 0])
C += 1
if FLAGS.max and C >= FLAGS.max:
break
pass
pass
gal.flush()
pass
if __name__ == '__main__':
tf.app.run()
| {"/aardvark.py": ["/tf_utils.py"], "/train-basic-keypoints.py": ["/aardvark.py", "/tf_utils.py"], "/rpn3d.py": ["/aardvark.py", "/tf_utils.py"], "/train-frcnn.py": ["/aardvark.py"], "/faster_rcnn.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-slim.py": ["/aardvark.py", "/tf_utils.py"], "/train-fcn-unet.py": ["/aardvark.py"], "/gate/predict_gate.py": ["/gallery.py"], "/cxray/predict-cls-vis.py": ["/mold.py", "/gallery.py"], "/predict-fcn.py": ["/gallery.py"], "/predict-frcnn.py": ["/gallery.py"], "/train-fcn-selim.py": ["/aardvark.py"], "/train-cls-slim.py": ["/aardvark.py"], "/train-fcn-sss.py": ["/aardvark.py"], "/predict-basic-keypoints.py": ["/gallery.py"]} |
52,668 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 12:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('nengli8', '0002_userold'),
]
operations = [
migrations.CreateModel(
name='QuestionnaireStatistics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('questionnaire', models.IntegerField()),
('name', models.CharField(max_length=128, verbose_name='\u95ee\u5377\u6807\u9898')),
('question', models.IntegerField()),
('question_text', models.CharField(max_length=128, verbose_name='\u95ee\u9898')),
('qsort', models.IntegerField()),
('type', models.CharField(max_length=32)),
('choice', models.IntegerField()),
('choice_text', models.CharField(max_length=128, verbose_name='\u9009\u9879')),
('csort', models.IntegerField()),
('sum', models.IntegerField()),
('percent', models.IntegerField()),
],
options={
'db_table': 'questionnaire_statistics',
'managed': False,
},
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.IntegerField()),
('choice', models.IntegerField(blank=True, null=True)),
('text', models.TextField(blank=True, null=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sortnum', models.IntegerField(default=1, verbose_name='\u5e8f\u53f7')),
('text', models.CharField(max_length=128, verbose_name='\u9009\u9879')),
('tags', models.CharField(blank=True, editable=False, max_length=64, verbose_name='Tags')),
],
options={
'verbose_name': '\u9009\u9879',
'verbose_name_plural': '\u9009\u9879',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sortnum', models.IntegerField(default=1, verbose_name='\u5e8f\u53f7')),
('type', models.CharField(choices=[(b'radio', '\u5355\u9009'), (b'checkbox', '\u591a\u9009'), (b'star', '\u6253\u661f'), (b'text', '\u95ee\u7b54')], max_length=32, verbose_name='\u9898\u578b')),
('text', models.CharField(max_length=128, verbose_name='\u95ee\u9898')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '\u95ee\u9898',
'verbose_name_plural': '\u95ee\u9898',
},
),
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_published', models.BooleanField(default=False, verbose_name='\u662f\u5426\u53d1\u5e03')),
('take_nums', models.IntegerField(default=0, verbose_name='\u53c2\u4e0e\u4eba\u6570')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nengli8.CourseOld', verbose_name='\u95ee\u5377')),
],
options={
'verbose_name': '\u95ee\u5377',
'verbose_name_plural': '\u95ee\u5377',
'permissions': (('export', 'Can export questionnaire answers'), ('management', 'Management Tools')),
},
),
migrations.CreateModel(
name='RunInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='\u95ee\u5377\u65f6\u95f4')),
('questionnaire', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.Questionnaire', verbose_name='\u95ee\u5377')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questionnaire_user_id', to='nengli8.UserOld', verbose_name='\u95ee\u5377\u7528\u6237')),
],
options={
'verbose_name': '\u8bb0\u5f55',
'verbose_name_plural': '\u8bb0\u5f55',
},
),
migrations.AddField(
model_name='question',
name='questionnaire',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.Questionnaire', verbose_name='\u95ee\u5377'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.Question'),
),
migrations.AddField(
model_name='answer',
name='runinfo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='questionnaire.RunInfo'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,669 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/apps.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class CompanysConfig(AppConfig):
name = 'companys'
verbose_name = u'企业'
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,670 | ECNU-Studio/emoc | refs/heads/master | /apps/courses/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import Courses
# from teacheres.models import Teacheres
# class TeachersChoice(object):
# model = Teacheres
# extra = 0
#课程
class CoursesAdmin(object):
list_display = ['name', 'coursesAbstract', 'teacherid']
search_fields = ['name']
list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fa fa-graduation-cap'
# inlines = [TeachersChoice]
xadmin.site.register(Courses, CoursesAdmin)
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,671 | ECNU-Studio/emoc | refs/heads/master | /apps/nengli8/models.py | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext as _
# Create your models here.
class CourseOld(models.Model):
# id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=52, verbose_name='课程名字')
def __unicode__(self):
return self.name
class Meta:
verbose_name = '课程'
verbose_name_plural = verbose_name
managed = False
db_table = 'courses'
class UserOld(models.Model):
# id = models.IntegerField(primary_key=True)
username = models.CharField(max_length=52, verbose_name='账号', db_column='username')
def __unicode__(self):
return self.username
class Meta:
managed = False
db_table = 'users' | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,672 | ECNU-Studio/emoc | refs/heads/master | /apps/classes/models.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
#from courses.models import Courses
# Create your models here.
#班级
class Classes(models.Model):
companyid = models.CharField(max_length=45, verbose_name=_(u"公司id"))
coursesid = models.CharField(max_length=45, verbose_name=_(u"课程id"))
schoolTime = models.DateTimeField( verbose_name=_(u"上课时间"))
address = models.CharField(max_length=100, verbose_name=_(u"上课地点"))
state = models.BooleanField(max_length=1, verbose_name=_(u"状态"))
period = models.CharField(max_length=45, verbose_name=_(u"周期"))
hour = models.IntegerField(default=0 , verbose_name=_(u"学时"))
# classStudent = models.ForeignKey(ClassStudent, verbose_name=_(u"上次课程的学生"))
# courses = models.ForeignKey(Courses, verbose_name=_(u"此班级要上的课程"))
# companys = models.ForeignKey(Companys, verbose_name=_(u"上此课程的公司"))
# classModels = models.ForeignKey(ClassModels, verbose_name=_(u"课程模块"))
# comment = models.ForeignKey(Comment, verbose_name=_(u"评论"))
# classAddress = models.ForeignKey(ClassAddress, default="", verbose_name=_(u"班级地址"))
class Meta:
verbose_name = '班级'
verbose_name_plural = verbose_name
# managed = False
# db_table = 'class'
# def teacher(self):
# course = Courses.objects.filter(id=self.coursesid)
# return course.teacher
def __unicode__(self):
return self.coursesid | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,673 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import Companys,Users
class UsersChoice(object):
model = Users
extra = 0
#企业
class CompanysAdmin(object):
list_display = ['name', 'account', 'email', 'legalperson', 'address']
search_fields = ['name']
# list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fas fa-clipboard-list'
inlines = [UsersChoice]
xadmin.site.register(Companys, CompanysAdmin)
# class DemoAdmin(object):
# list_display = ['name']
# search_fields = ['name']
# model_icon = 'fas fa-clipboard-list'
#users
class UsersAdmin(object):
list_display = ['name', 'username', 'password', 'tel', 'department', 'position', 'email', 'total_class']
search_fields = ['name']
# list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fas fa-clipboard-list'
xadmin.site.register(Users, UsersAdmin) | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,674 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import Questionnaire, Question, RunInfo, Choice
class QuestionInline(object):
model = Question
extra = 0
class ChoiceInline(object):
model = Choice
extra = 0
class QuestionnaireAdmin(object):
list_display = ['course', 'edit_questionnaire', 'show_questionnaire']
search_fields = ['course']
list_filter = ['course']
# 列表页直接编辑
list_editable = ['course']
model_icon = 'fas fa-clipboard-list'
# 不显示字段
exclude = ['take_nums']
# 根据更新时间倒序
ordering = ['-update_time']
def queryset(self):
# super调用方法
qs = super(QuestionnaireAdmin, self).queryset()
qs = qs.filter(is_published=False)
return qs
class PublishedQuestionnaireAdmin(object):
list_display = ['name', 'show_statistics']
search_fields = ['name']
list_filter = ['name']
# 不显示字段
exclude = ['is_published']
# 只读字段
readonly_fields = ['name', 'take_nums']
# 列表页直接编辑
model_icon = 'fas fa-clipboard-list'
# 根据更新时间倒序
ordering = ['-update_time']
def queryset(self):
# super调用方法
qs = super(PublishedQuestionnaireAdmin, self).queryset()
qs = qs.filter(is_published=True)
return qs
class QuestionAdmin(object):
list_display = ['questionnaire', 'text', 'type']
search_fields = ['text']
# list_filter = ['type']
# 只读字段
readonly_fields = ['sortnum']
model_icon = 'fas fa-question'
# 不显示字段
# exclude = ['sortnum']
relfield_style = 'fk_ajax'
inlines = [ChoiceInline]
# class ChoiceAdmin(object):
# list_display = ['question', 'text']
# search_fields = ['text']
# # list_filter = ['question', 'text']
# readonly_fields = ['sortnum']
# model_icon = 'fas fa-question'
# # 不显示字段
# # exclude = ['sortnum']
# relfield_style = 'fk_ajax'
class RunInfoAdmin(object):
list_display = ['questionnaire', 'user', 'create_time']
search_fields = ['questionnaire', 'user']
list_filter = ['questionnaire', 'user', 'create_time']
model_icon = 'fas fa-history' #far fa-chart-bar'
readonly_fields = ['questionnaire', 'user', 'create_time']
# 效率统计
class QuestionnaireStatisticsAdmin(object):
list_display = ['question']
search_fields = ['question']
list_filter = ['question']
model_icon = 'far fa-chart-bar'
xadmin.site.register(Questionnaire, QuestionnaireAdmin)
# xadmin.site.register(PublishedQuestionnaire, PublishedQuestionnaireAdmin)
# xadmin.site.register(Question, QuestionAdmin)
# xadmin.site.register(Choice, ChoiceAdmin)
xadmin.site.register(RunInfo, RunInfoAdmin)
# xadmin.site.register(QuestionnaireStatistics, QuestionnaireStatisticsAdmin)
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,675 | ECNU-Studio/emoc | refs/heads/master | /extra_apps/xadmin/plugins/mdeditor.py | import xadmin
from xadmin.views import BaseAdminPlugin, CreateAdminView, UpdateAdminView
from mdeditor.fields import MDTextField
from mdeditor.widgets import MDEditorWidget
class XadminMDEditorWidget(MDEditorWidget):
def __init__(self, **kwargs):
self.mdeditor_options = kwargs
self.Media.js = None
super(XadminMDEditorWidget, self).__init__(kwargs)
class MDeditorPlugin(BaseAdminPlugin):
def get_field_style(self, attrs, db_field, style, **kwargs):
if style == 'mdeditor':
if isinstance(db_field, MDTextField):
widget = db_field.formfield().widget
param = {}
param.update(widget.mdeditor_settings)
param.update(widget.attrs)
return {'widget': XadminMDEditorWidget(**param)}
return attrs
xadmin.site.register_plugin(MDeditorPlugin, UpdateAdminView)
xadmin.site.register_plugin(MDeditorPlugin, CreateAdminView)
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,676 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/migrations/0002_auto_20180421_1712.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 17:12
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('companys', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ComtoUsers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companys', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='companys.Companys', verbose_name='\u516c\u53f8')),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=45, verbose_name='\u7528\u6237\u8d26\u53f7')),
('name', models.CharField(max_length=45, verbose_name='\u59d3\u540d')),
('password', models.CharField(max_length=45, verbose_name='\u5bc6\u7801')),
('department', models.CharField(max_length=45, verbose_name='\u90e8\u95e8')),
('position', models.CharField(max_length=45, verbose_name='\u804c\u4f4d')),
('tel', models.CharField(max_length=45, verbose_name='\u7535\u8bdd')),
('email', models.CharField(max_length=45, verbose_name='\u90ae\u7bb1')),
('total_class', models.IntegerField(default=0, verbose_name='\u5b66\u4e60\u8bfe\u7a0b')),
('companyid', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='companys.Companys', verbose_name='\u516c\u53f8')),
],
options={
'verbose_name': '\u7528\u6237',
'verbose_name_plural': '\u7528\u6237',
},
),
migrations.AddField(
model_name='comtousers',
name='users',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='companys.Users', verbose_name='\u7528\u6237'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,677 | ECNU-Studio/emoc | refs/heads/master | /apps/useradmin/views.py | # _*_ coding:utf-8 _*_
import json
from django.shortcuts import render
from django.views.generic import View
from django.http import HttpResponse
from django.shortcuts import render
from courses.models import *
class manage_courses(View):
def get(self, request, courses_id=None, preview=1):
all_courses = courses.objects.all()
org_nums = courses.count()
# 反解析URL
return render(request, 'templates/admin_courses.html', {
'org_nums': org_nums,
'all_courses': all_courses,
'preview': preview
})
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,678 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0003_auto_20180422_1615.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-22 16:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('examination', '0002_auto_20180421_1756'),
]
operations = [
migrations.AddField(
model_name='examination',
name='is_random',
field=models.BooleanField(default=False, verbose_name='\u662f\u5426\u968f\u673a'),
),
migrations.AddField(
model_name='examination',
name='question_num',
field=models.IntegerField(default=0, verbose_name='\u9898\u76ee\u6570\u91cf'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,679 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0002_auto_20180421_1756.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 17:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('examination', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='takeinfo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='nengli8.UserOld', verbose_name='\u95ee\u5377\u7528\u6237'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,680 | ECNU-Studio/emoc | refs/heads/master | /apps/teacheres/apps.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class TeacheresConfig(AppConfig):
name = 'teacheres'
verbose_name = u'培训师管理'
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,681 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/models.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
# Create your models here.
# 企业表单
class Companys(models.Model):
name = models.CharField(max_length=45, verbose_name=_(u"名称"))
account = models.CharField(max_length=45, verbose_name=_(u"账户"))
password = models.CharField(max_length=45, verbose_name=_(u"密码"))
email = models.CharField(max_length=45,blank=True, null=True, verbose_name=_(u"邮箱"))
legalperson = models.CharField(max_length=45, blank=True, null=True,verbose_name=_(u"法人"))
address = models.CharField(max_length=45,blank=True, null=True, verbose_name=_(u"企业地址"))
cover = models.CharField(max_length=45, blank=True, null=True,verbose_name=_(u"企业封面"))
memo = models.CharField(max_length=45, blank=True, null=True,verbose_name=_(u"备注"))
state = models.BooleanField(max_length=45, default=0, verbose_name=_(u"是否有效"))
# add_time = models.DateTimeField(default=datetime.now, verbose_name=_(u"添加时间"))
class Meta:
# managed = False
# db_table = 'companys'
verbose_name = '企业'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
#用户基本信息表
class Users(models.Model):
username = models.CharField(max_length=45, verbose_name=_(u"用户账号"))
name = models.CharField(max_length=45, verbose_name=_(u"姓名"))
# photo = models.CharField(max_length=1000, verbose_name=_(u"头像"))
password = models.CharField(max_length=45, verbose_name=_(u"密码"))
# companyID = models.IntegerField(default=0, verbose_name=_(u"公司ID"))
department = models.CharField(max_length=45, blank=True, null=True , verbose_name=_(u"部门"))
position = models.CharField(max_length=45,blank=True, null=True , verbose_name=_(u"职位"))
# openid = models.CharField(max_length=45, verbose_name=_(u"微信openid"))
# qq = models.CharField(max_length=45, verbose_name=_(u"QQ"))
tel = models.CharField(max_length=45, blank=True, null=True ,verbose_name=_(u"电话"))
email = models.CharField(max_length=45,blank=True, null=True , verbose_name=_(u"邮箱"))
# notice_wenda = models.BooleanField(max_length=45, verbose_name=_(u"问答通知"))
# notice_pinglun = models.BooleanField(max_length=45, verbose_name=_(u"评论通知"))
# notice_sendmail = models.BooleanField(max_length=45, verbose_name=_(u"问答评论发送邮箱"))
# total_hours = models.IntegerField(default=0, verbose_name=_(u"累计学时"))
total_class = models.IntegerField(default=0, blank=True, null=True , verbose_name=_(u"学习课程"))
# total_day = models.IntegerField(default=0, verbose_name=_(u"累计天数"))
# classID = models.IntegerField(default=0, verbose_name=_(u"班级id"))
# dayBefor = models.DateTimeField( verbose_name=_(u"上次时间"))
# dayFirst = models.DateTimeField( verbose_name=_(u"连续登陆,第一次登陆"))
# total_score = models.FloatField(max_length=12, verbose_name=_(u"总成绩"))
# class_finish = models.IntegerField(default=0, verbose_name=_(u"已完成课程"))
# state = models.BooleanField(max_length=1, verbose_name=_(u"是否有效"))
# new_ans = models.BooleanField(max_length=1, verbose_name=_(u"是否有新的回复"))
# classStudent = models.CharField(max_length=45, verbose_name=_(u"班级"))
companyid = models.ForeignKey(Companys, default=0, to_field='id',verbose_name=_(u"公司"))
# language = models.CharField(max_length=45, verbose_name=_(u"语言(1中文,2英文)"))
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class ComtoUsers(models.Model):
companys = models.ForeignKey(Companys, to_field='id',verbose_name= _(u"公司"))
users = models.ForeignKey(Users, to_field='id', verbose_name=_(u"用户")) | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,682 | ECNU-Studio/emoc | refs/heads/master | /apps/classes/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import Classes
#班级
class ClassesAdmin(object):
list_display = ['companyid', 'coursesid']
search_fields = ['companyid']
list_filter = ['companyid']
# 列表页直接编辑
list_editable = ['companyid']
model_icon = 'fa fa-users'
xadmin.site.register(Classes, ClassesAdmin)
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,683 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/apps.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class QuestionnaireConfig(AppConfig):
name = 'questionnaire'
verbose_name = u'问卷'
# label = u'问卷' | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,684 | ECNU-Studio/emoc | refs/heads/master | /apps/classes/apps.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class ClassesConfig(AppConfig):
name = 'classes'
verbose_name = u'班级管理'
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,685 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 17:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('nengli8', '0002_userold'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ExaminationStatistics',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.IntegerField()),
('name', models.CharField(max_length=128, verbose_name='\u6807\u9898')),
('question', models.IntegerField()),
('question_text', models.CharField(max_length=128, verbose_name='\u95ee\u9898')),
('qsort', models.IntegerField()),
('type', models.CharField(max_length=32)),
('choice', models.IntegerField()),
('choice_text', models.CharField(max_length=128, verbose_name='\u9009\u9879')),
('csort', models.IntegerField()),
('sum', models.IntegerField()),
('percent', models.IntegerField()),
],
options={
'db_table': 'examination_statistics',
'managed': False,
},
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.IntegerField()),
('choice', models.IntegerField(blank=True, null=True)),
('text', models.TextField(blank=True, null=True)),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sortnum', models.IntegerField(default=1, verbose_name='\u5e8f\u53f7')),
('is_answer', models.BooleanField(default=False, verbose_name='\u662f\u5426\u6b63\u786e\u7b54\u6848')),
('text', models.CharField(max_length=128, verbose_name='\u9009\u9879')),
('tags', models.CharField(blank=True, editable=False, max_length=64, verbose_name='Tags')),
],
options={
'verbose_name': '\u9009\u9879',
'verbose_name_plural': '\u9009\u9879',
},
),
migrations.CreateModel(
name='Examination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_published', models.BooleanField(default=False, verbose_name='\u662f\u5426\u53d1\u5e03')),
('take_nums', models.IntegerField(default=0, verbose_name='\u53c2\u4e0e\u4eba\u6570')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='examination_course_id', to='nengli8.CourseOld', verbose_name='\u95ee\u5377')),
],
options={
'verbose_name': '\u95ee\u5377',
'verbose_name_plural': '\u95ee\u5377',
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sortnum', models.IntegerField(default=1, verbose_name='\u5e8f\u53f7')),
('type', models.CharField(choices=[(b'radio', '\u5355\u9009'), (b'checkbox', '\u591a\u9009'), (b'star', '\u6253\u661f'), (b'text', '\u95ee\u7b54')], max_length=32, verbose_name='\u9898\u578b')),
('text', models.CharField(max_length=128, verbose_name='\u95ee\u9898')),
('is_use', models.BooleanField(default=False, verbose_name='\u662f\u5426\u4f7f\u7528')),
('create_time', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('examination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.Examination', verbose_name='\u8bd5\u5377')),
],
options={
'verbose_name': '\u95ee\u9898',
'verbose_name_plural': '\u95ee\u9898',
},
),
migrations.CreateModel(
name='TakeInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('score', models.IntegerField(blank=True, null=True)),
('start_time', models.DateTimeField(blank=True, null=True, verbose_name='\u5f00\u59cb\u65f6\u95f4')),
('end_time', models.DateTimeField(blank=True, null=True, verbose_name='\u7ed3\u675f\u65f6\u95f4')),
('examination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.Examination', verbose_name='\u8bfe\u7a0b')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='examination_user_id', to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237')),
],
options={
'verbose_name': '\u8bb0\u5f55',
'verbose_name_plural': '\u8bb0\u5f55',
},
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.Question'),
),
migrations.AddField(
model_name='answer',
name='takeinfo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.TakeInfo'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,686 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0002_auto_20180420_1536.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-20 15:36
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('examination', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='takeinfo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='examination_user_id', to=settings.AUTH_USER_MODEL, verbose_name='\u7528\u6237'),
),
migrations.AddField(
model_name='question',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.CourseOld', verbose_name='\u8bfe\u7a0b'),
),
migrations.AddField(
model_name='examination',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.CourseOld', verbose_name='\u8bfe\u7a0b'),
),
migrations.AddField(
model_name='examination',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.Question', verbose_name='\u95ee\u9898'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.Question'),
),
migrations.AddField(
model_name='answer',
name='takeinfo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='examination.TakeInfo'),
),
migrations.CreateModel(
name='PublishedExamination',
fields=[
],
options={
'verbose_name': '\u7edf\u8ba1',
'proxy': True,
'verbose_name_plural': '\u7edf\u8ba1',
},
bases=('examination.courseold',),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,687 | ECNU-Studio/emoc | refs/heads/master | /apps/teacheres/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Teacheres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=45, verbose_name='\u6559\u5e08\u767b\u5f55\u540d')),
('password', models.CharField(max_length=45, verbose_name='\u5bc6\u7801')),
('name', models.CharField(max_length=45, verbose_name='\u6559\u5e08\u59d3\u540d')),
('email', models.CharField(max_length=45, verbose_name='\u90ae\u7bb1')),
('phone', models.CharField(max_length=45, verbose_name='\u624b\u673a')),
('weixin', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u5fae\u4fe1')),
('introduce', models.TextField(max_length=500, verbose_name='\u4ecb\u7ecd')),
],
options={
'verbose_name': '\u57f9\u8bad\u5e08',
'verbose_name_plural': '\u57f9\u8bad\u5e08',
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,688 | ECNU-Studio/emoc | refs/heads/master | /apps/useradmin/urls.py | # _*_ coding:utf-8 _*_
from django.conf.urls import *
from .views import *
urlpatterns = [
# 后台管理首页
# url(r'manage/courses$', manage_courses),
url(r'manage/(?P<courses_id>[0-9]+)/$',manage_courses.as_view(), name='courses'),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,689 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/views.py | # _*_ coding:utf-8 _*_
from django.views.generic import View
from django.shortcuts import render
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from examination.models import *
from users.models import UserProfile
import random
import json
from hashlib import md5
from datetime import *
import time
class ExaminationShow(View):
"""
预览试卷
"""
def get(self, request, course_id=None, preview=1):
course = get_object_or_404(CourseOld, id=int(course_id))
examination = get_object_or_404(Examination, course=course)
if examination:
questions = examination.questions_use()
for question in questions:
question.choices = question.choices()
question.template = "question_type/exam-%s.html" % question.type
# 反解析URL
return render(request, 'show_examination.html',
{'course': course,
'examination': examination,
'questions': questions, 'preview': preview}
)
class StatisticsShow(View):
"""
查找当前问卷的统计信息并显示出来
"""
def get(self, request, course_id=None):
course = get_object_or_404(CourseOld, id=int(course_id))
examination = get_object_or_404(Examination, course=course)
if examination:
questions = examination.questions_use()
for question in questions:
if question.type == 'text':
question.answer_texts = question.get_answer_texts()
else:
question.statistics = question.statistics()
question.template = "statistics_type/%s.html" % question.type
takeinfos = TakeInfo.objects.filter(examination=examination).order_by('score')
# 反解析URL
return render(request, 'examination_statistics.html', {
'examination': examination,
'questions': questions,
'takeinfos': takeinfos
})
class ShowTakeinfoDetail(View):
"""
查找一个问卷的答案显示
"""
def get(self, request, takeinfo_id=None):
takeinfo = get_object_or_404(TakeInfo, id=int(takeinfo_id))
examination = get_object_or_404(Examination, id=takeinfo.examination_id)
if examination:
questions = examination.questions_use()
questions.count = questions.count()
for question in questions:
question.result = True
# 所有问题的答案
choices = question.choices()
for choice in choices:
if choice.is_answer:
question.right_answer = choice.text
if Answer.objects.filter(takeinfo=takeinfo.id, question=question.id, choice=choice.id).exists():
choice.checked = True
if not choice.is_answer:
question.result = False
question.choices = choices
question.template = "takeinfo_detail_type/%s.html" % question.type
# 反解析URL
return render(request, 'show_takeinfo_detail.html', {
'takeinfo': takeinfo,
'questions': questions,
'examination': examination
})
class QuestionEdit(View):
"""
编辑试卷
"""
def get(self, request, course_id=None):
course = get_object_or_404(CourseOld, id=int(course_id))
examination_set = Examination.objects.filter(course=course)[0:1]
if not examination_set:
examination = Examination()
examination.course = course
examination.is_published = False
examination.take_nums = 0
examination.save()
else:
examination = list(examination_set)[0]
questions = examination.questions()
question_list = []
for question in questions:
question_obj = {}
question_obj['label'] = question.text
question_obj['field_type'] = question.type
question_obj['field_options'] = {}
choices = question.choices()
options = []
for choice in choices:
option = {}
option['label'] = choice.text
option['checked'] = choice.is_answer
options.append(option)
question_obj['field_options']['options'] = options
question_list.append(question_obj)
return render(request, 'edit_examination.html', {
'course': course,
'examination': examination,
'question_list': json.dumps(question_list)
})
class SaveQuestion(View):
"""
保存试卷时候,根据随机卷或者固定卷进行保存
"""
def post(self, request):
res = dict()
examination_id = int(request.POST.get('examination_id', 0))
examination = Examination.objects.get(id=examination_id)
if examination:
# 删除原有的问题记录
payload = json.loads(request.POST.get('payload'))
question_list = payload['fields']
question_count = int(request.POST.get('question_count', 0))
is_random = request.POST.get('is_random')
examination.is_random = is_random
examination.question_count = question_count
examination.save()
Question.objects.filter(examination=examination).delete()
for index1, value1 in enumerate(question_list):
question = Question()
question.examination = examination
question.sortnum = index1 + 1
question.type = value1['field_type']
question.text = value1['label']
question.save()
# 有选项,则更新选项表
if 'options' in value1['field_options'].keys():
for index2, value2 in enumerate(value1['field_options']['options']):
choice_obj = Choice()
choice_obj.question = question
choice_obj.is_answer = value2['checked']
choice_obj.sortnum = index2 + 1
choice_obj.text = value2['label']
choice_obj.save()
if is_random == 'false':
# 生成固定卷
questions = Question.objects.filter(examination=examination).order_by('sortnum')[:question_count]
else:
# 生成随机卷
questionAll = list(Question.objects.filter(examination=examination))
questions = random.sample(questionAll, question_count)
for question in questions:
question.is_use = True
question.save()
res['status'] = 'success'
res['msg'] = '保存成功'
else:
res = dict()
res['status'] = 'failed'
res['msg'] = '课程未创建'
return HttpResponse(json.dumps(res), content_type='application/json')
class CancelExamination(View):
def post(self, request):
examination_id = int(request.POST.get('examination_id', 0))
examination = get_object_or_404(Examination, id=int(examination_id))
if examination:
examination.is_published = False
examination.save()
res = dict()
res['status'] = 'success'
res['msg'] = '已取消'
return HttpResponse(json.dumps(res), content_type='application/json')
class PublishExamination(View):
def post(self, request):
examination_id = int(request.POST.get('examination_id', 0))
examination = get_object_or_404(Examination, id=int(examination_id))
if examination:
examination.is_published = True
examination.save()
res = dict()
res['status'] = 'success'
res['msg'] = '发布成功'
return HttpResponse(json.dumps(res), content_type='application/json')
class SubmitExamination(View):
# 保存记录
def save_takeinfo(self, examination, user, start_time, end_time, stu_name, stu_num):
takeinfo = TakeInfo()
takeinfo.user = user
takeinfo.examination = examination
takeinfo.start_time = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
takeinfo.end_time = datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
takeinfo.num = stu_num
takeinfo.name = stu_name
takeinfo.save()
return takeinfo
def post(self, request):
# 获取调查者
# 根据userid获取
user_id = int(request.POST.get('user_id', 1))
user = get_object_or_404(UserOld, id=user_id)
examination_id = int(request.POST.get('examination_id', 0))
examination = get_object_or_404(Examination, id=int(examination_id))
start_time = request.POST.get('start_time', '')
end_time = request.POST.get('end_time', '')
stu_name = request.POST.get('stu_name', '')
stu_num = request.POST.get('stu_num', '')
if examination:
takeinfo = self.save_takeinfo(examination, user, start_time, end_time, stu_name, stu_num)
# 未处理好
answers = json.loads(request.POST.get('answerStr'))
right_num = 0
for answer_obj in answers:
question_id = answer_obj["question_id"]
choices = answer_obj["choice"].split(',')
answerObjs = Choice.objects.filter(question_id=int(question_id), is_answer=True).values('id')
answers = []
for answerObj in answerObjs:
answers.append(str(answerObj.get('id')))
if answers == choices:
right_num = right_num + 1
for choice in choices:
answer = Answer()
answer.question = question_id
# 去除空格
if choice.strip():
answer.choice = int(choice)
answer.text = answer_obj["text"]
answer.takeinfo = takeinfo
answer.save()
takeinfo.score = (100/examination.question_count)*right_num
takeinfo.save()
examination.take_nums += 1
examination.save()
res = dict()
res['status'] = 'success'
res['msg'] = '完成'
return HttpResponse(json.dumps(res), content_type='application/json') | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,690 | ECNU-Studio/emoc | refs/heads/master | /apps/courses/models.py | # _*_ coding:utf-8 _*_
import sys
from django.db import models
from django.utils.translation import ugettext as _
from teacheres.models import Teacheres
# Create your models here.
#课程
class Courses(models.Model):
name = models.CharField(max_length=45, verbose_name=_(u"课程名称"))
coursesAbstract = models.TextField(max_length=45, verbose_name=_(u"课程简介"))
cover = models.ImageField(upload_to='images/%Y/%m', verbose_name=_(u"封面"))
teacherid = models.ForeignKey(Teacheres, verbose_name=_(u"讲师id"))
# state = models.CharField(max_length=45, verbose_name=_(u"是否有效"))
# honor = models.CharField(max_length=45, verbose_name=_(u"勋章图"))
# abstractFile = models.CharField(max_length=1000,blank=True, null=True, verbose_name=_(u"简介附件"))
# abstractFileSize = models.CharField(max_length=500, verbose_name=_(u"简介附件文件大小"))
# abstractFileName = models.CharField(max_length=500, verbose_name=_(u"简介附件名称"))
# teacher = models.ForeignKey(Teacheres , verbose_name=_(u"此课程的上课教师"))
# class = models.ForeignKey(Classes, verbose_name=_(u"上此课程的班级"))
# catalog = models.ForeignKey(Catalog , verbose_name=_(u"课程目录"))
class Meta:
verbose_name = '课程'
verbose_name_plural = verbose_name
# managed = False
# db_table = 'courses'
def __unicode__(self):
return self.name
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,691 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/views.py | # _*_ coding:utf-8 _*_
from django.views.generic import View
from django.shortcuts import render
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from questionnaire.models import *
# from users.models import UserProfile
from nengli8.models import *
import json
from hashlib import md5
class QuestionnaireEdit(View):
"""
编辑问卷
"""
def get(self, request, course_id=None):
course = get_object_or_404(CourseOld, id=int(course_id))
questionnaire_set = Questionnaire.objects.filter(course=course)[0:1]
if not questionnaire_set:
questionnaire = Questionnaire()
questionnaire.course = course
questionnaire.is_published = False
questionnaire.take_nums = 0
questionnaire.save()
else:
questionnaire = list(questionnaire_set)[0]
questions = questionnaire.questions()
question_list = []
for question in questions:
question_obj = {}
question_obj['label'] = question.text
question_obj['field_type'] = question.type
question_obj['field_options'] = {}
choices = question.choices()
options = []
for choice in choices:
option = {}
option['label'] = choice.text
option['checked'] = False
options.append(option)
question_obj['field_options']['options'] = options
question_list.append(question_obj)
return render(request, 'edit_questionnaire.html', {
'questionnaire': questionnaire,
'question_list': json.dumps(question_list)
})
class StatisticsShow(View):
"""
查找当前问卷的统计信息并显示出来
"""
def get(self, request, course_id=None):
course = get_object_or_404(CourseOld, id=int(course_id))
questionnaire = get_object_or_404(Questionnaire, course=course)
if questionnaire:
questions = questionnaire.questions()
for question in questions:
if question.type == 'text':
question.answer_texts = question.get_answer_texts()
else:
question.statistics = question.statistics()
question.template = "statistics_type/%s.html" % question.type
runinfos = RunInfo.objects.filter(questionnaire=questionnaire)[:10]
# 反解析URL
return render(request, 'questionnaire_statistics.html', {
'questionnaire': questionnaire,
'questions': questions,
'runinfos': runinfos
})
class ShowRuninfoDetail(View):
"""
查找一个问卷的答案显示
"""
def get(self, request, runinfo_id=None):
runinfo = get_object_or_404(RunInfo, id=int(runinfo_id))
questionnaire = get_object_or_404(Questionnaire, id=runinfo.questionnaire_id)
if questionnaire:
questions = questionnaire.questions()
for question in questions:
choices = question.choices()
for choice in choices:
if Answer.objects.filter(runinfo=runinfo.id, question=question.id, choice=choice.id).exists():
choice.checked = True
question.choices = choices
question.template = "runinfo_detail_type/%s.html" % question.type
# 反解析URL
return render(request, 'show_runinfo_detail.html', {
'questions': questions
})
class QuestionnaireShow(View):
"""
查找当前问卷并显示出来
"""
def get(self, request, course_id=None, preview=1):
course = get_object_or_404(CourseOld, id=int(course_id))
questionnaire = get_object_or_404(Questionnaire, course=course)
if questionnaire:
questions = questionnaire.questions()
questions.count = questions.count()
for question in questions:
question.choices = question.choices()
question.template = "question_type/%s.html" % question.type
# 反解析URL
return render(request, 'show_questionnaire.html', {
'course': course,
'questionnaire': questionnaire,
'questions': questions,
'preview': preview
})
class CancelQuestionnaire(View):
def post(self, request):
questionnaire_id = int(request.POST.get('questionnaire_id', 0))
questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id)
if questionnaire:
questionnaire.is_published = False
questionnaire.save()
res = dict()
res['status'] = 'success'
res['msg'] = '已取消 '
return HttpResponse(json.dumps(res), content_type='application/json')
class PublishQuestionnaire(View):
def post(self, request):
questionnaire_id = int(request.POST.get('questionnaire_id', 0))
questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id)
if questionnaire:
questionnaire.is_published = True
questionnaire.save()
res = dict()
res['status'] = 'success'
res['msg'] = '发布成功'
return HttpResponse(json.dumps(res), content_type='application/json')
class SubmitQuestionnaire(View):
# 保存记录
def save_runinfo(self, questionnaire, user):
runinfo = RunInfo()
runinfo.user = user
runinfo.questionnaire = questionnaire
runinfo.save()
return runinfo
def post(self, request):
# 获取调查者
# 根据userid获取
user_id = int(request.POST.get('user_id', 1))
user = get_object_or_404(UserOld, id=user_id)
questionnaire_id = int(request.POST.get('questionnaire_id', 0))
questionnaire = get_object_or_404(Questionnaire, id=questionnaire_id)
if questionnaire:
runinfo = self.save_runinfo(questionnaire, user)
# 未处理好
answers = json.loads(request.POST.get('answerStr'))
for answer_obj in answers:
choices = answer_obj["choice"].split(',')
for choice in choices:
answer = Answer()
answer.question = answer_obj["question_id"]
if choice.strip():
answer.choice = int(choice)
answer.text = answer_obj["text"]
answer.runinfo = runinfo
answer.save()
questionnaire.take_nums += 1
questionnaire.save()
res = dict()
res['status'] = 'success'
res['msg'] = '完成'
return HttpResponse(json.dumps(res), content_type='application/json')
class SaveQuestionnaire(View):
def post(self, request):
res = dict()
questionnaire_id = int(request.POST.get('questionnaire_id', 0))
questionnaire = Questionnaire.objects.get(id=questionnaire_id)
if questionnaire:
# 删除原有的问题记录
Question.objects.filter(questionnaire=questionnaire).delete()
payload = json.loads(request.POST.get('payload'))
question_list = payload['fields']
for index1, value1 in enumerate(question_list):
question = Question()
question.questionnaire = questionnaire
question.sortnum = index1 + 1
# question.type = value1['field_type'].split('-')[0]
question.type = value1['field_type']
question.text = value1['label']
question.save()
# 有选项,则更新选项表
if 'options' in value1['field_options'].keys():
for index2, value2 in enumerate(value1['field_options']['options']):
choice_obj = Choice()
choice_obj.question = question
choice_obj.sortnum = index2 + 1
choice_obj.text = value2['label']
choice_obj.save()
res['status'] = 'success'
res['msg'] = '保存成功'
else:
res = dict()
res['status'] = 'failed'
res['msg'] = '问卷未创建'
return HttpResponse(json.dumps(res), content_type='application/json')
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,692 | ECNU-Studio/emoc | refs/heads/master | /apps/users/models.py | # -*- coding: utf-8 -*-
# 引入python自带的模块
from datetime import datetime
# 引入第三方库的模块
from django.db import models
from django.contrib.auth.models import AbstractUser
# 引入自定义的模块
# Create your models here.
# 继承原始的user类
class UserProfile(AbstractUser):
nick_name = models.CharField(max_length=50, verbose_name='昵称', default='')
birthday = models.DateField(null=True, blank=True, verbose_name='生日')
gender = models.CharField(max_length=6, choices=(('male', '男'), ('female', '女')), default='female', verbose_name='性别')
address = models.CharField(max_length=100, default='', verbose_name='地址')
mobile = models.CharField(max_length=11, null=True, blank=True, verbose_name='手机号')
image = models.ImageField(max_length=100, upload_to='image/%Y/%m', default='image?default.png', verbose_name='头像')
class Meta:
verbose_name = '用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,693 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/models.py | # -*- coding: utf-8 -*-
from django.db import models
from users.models import UserProfile
from django.utils.translation import ugettext as _
from nengli8.models import *
CHOICES_TYPE = [('radio', u'单选'), ('checkbox', u'多选'), ('star', u'打星'), ('text', u'问答')]
class Examination(models.Model):
course = models.ForeignKey(CourseOld, verbose_name=_(u"问卷"), related_name='examination_course_id')
is_published = models.BooleanField(default=False, verbose_name=u'是否发布')
take_nums = models.IntegerField(default=0, verbose_name=u'参与人数')
is_random = models.BooleanField(default=False, verbose_name=u'是否随机')
question_count = models.IntegerField(default=0, verbose_name=u'题目数量')
def questions(self):
return Question.objects.filter(examination=self).order_by('sortnum')
def questions_use(self):
return Question.objects.filter(examination=self, is_use=True).order_by('sortnum')
def statistics(self):
return ExaminationStatistics.objects.filter(questionnaire=self.id).order_by('qsort')
def __unicode__(self):
return self.course.name
class Meta:
verbose_name = '问卷'
verbose_name_plural = verbose_name
class Question(models.Model):
examination = models.ForeignKey(Examination, verbose_name=_(u"试卷"))
sortnum = models.IntegerField(default=1, verbose_name=_(u"序号"))
type = models.CharField(max_length=32, choices=CHOICES_TYPE, verbose_name=_(u"题型"))
text = models.TextField(verbose_name=_(u"问题"))
is_use = models.BooleanField(default=False, verbose_name=u'是否使用')
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
def choices(self):
return Choice.objects.filter(question=self).order_by('sortnum')
def get_answers(self):
choices = Choice.objects.filter(question=self, is_answer=True).values('id').order_by('sortnum')
return choices
def statistics(self):
return ExaminationStatistics.objects.values('choice', 'choice_text', 'sum', 'percent').filter(question=self.id).order_by('csort')
def get_answer_texts(self):
return Answer.objects.values('text').filter(question=self.id).order_by('id')[:5]
class Meta:
verbose_name = '问题'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'[%s] (%d) %s' % (self.examination, self.sortnum, self.text)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
sortnum = models.IntegerField(default=1, verbose_name=_(u"序号"))
is_answer = models.BooleanField(default=False, verbose_name=u'是否正确答案')
text = models.TextField(verbose_name=_(u"选项"))
tags = models.CharField(u"Tags", max_length=64, blank=True, editable=False)
class Meta:
verbose_name = '选项'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'(%s) %d. %s' % (self.question.sortnum, self.sortnum, self.text)
class TakeInfo(models.Model):
"Store the active/waiting questionnaire runs here"
user = models.ForeignKey(UserOld, verbose_name=_(u"问卷用户"))
num = models.CharField(blank=True, null=True, max_length=128, verbose_name=_(u"学号"))
name = models.CharField(blank=True, null=True, max_length=128, verbose_name=_(u"姓名"))
examination = models.ForeignKey(Examination, verbose_name=_(u"课程"))
score = models.IntegerField(blank=True, null=True)
start_time = models.DateTimeField(blank=True, null=True, verbose_name=_(u"开始时间"))
end_time = models.DateTimeField(blank=True, null=True, verbose_name=_(u"结束时间"))
def __unicode__(self):
return "%s: %s" % (self.user.username, self.examination.course.name)
class Meta:
verbose_name = '记录'
verbose_name_plural = verbose_name
class Answer(models.Model):
takeinfo = models.ForeignKey(TakeInfo)
question = models.IntegerField()
choice = models.IntegerField(blank=True, null=True)
text = models.TextField(blank=True, null=True)
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return "Answer(%s: %s, %s)" % (self.question.sortnum, self.question.text, self.text)
# 统计
class ExaminationStatistics(models.Model):
examination = models.IntegerField()
name = models.CharField(max_length=128, verbose_name=_(u"标题"))
question = models.IntegerField()
question_text = models.CharField(max_length=128, verbose_name=_(u"问题"))
qsort = models.IntegerField()
type = models.CharField(max_length=32)
choice = models.IntegerField()
choice_text = models.CharField(max_length=128, verbose_name=_(u"选项"))
csort = models.IntegerField()
sum = models.IntegerField()
percent = models.IntegerField()
class Meta:
managed = False
db_table = "examination_statistics" | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,694 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0004_auto_20180422_1618.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-22 16:18
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('examination', '0003_auto_20180422_1615'),
]
operations = [
migrations.RenameField(
model_name='examination',
old_name='question_num',
new_name='question_count',
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,695 | ECNU-Studio/emoc | refs/heads/master | /apps/courses/migrations/0002_coursestoteachers.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 17:58
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('teacheres', '0001_initial'),
('courses', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CoursestoTeachers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('courses', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='courses.Courses', verbose_name='\u8bfe\u7a0b')),
('teacheres', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacheres.Teacheres', verbose_name='\u57f9\u8bad\u5e08')),
],
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,696 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/models.py | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext as _
from nengli8.models import *
CHOICES_TYPE = [('radio', u'单选'), ('checkbox', u'多选'), ('star', u'打星'), ('text', u'问答')]
class Questionnaire(models.Model):
course = models.ForeignKey(CourseOld, verbose_name=_(u"问卷"), related_name='questionnaire_course_id')
is_published = models.BooleanField(default=False, verbose_name=u'是否发布')
take_nums = models.IntegerField(default=0, verbose_name=u'参与人数')
def questions(self):
return Question.objects.filter(questionnaire=self).order_by('sortnum')
def statistics(self):
return QuestionnaireStatistics.objects.filter(questionnaire=self.id).order_by('qsort')
def __unicode__(self):
return self.course.name
class Meta:
verbose_name = '问卷'
verbose_name_plural = verbose_name
class Question(models.Model):
questionnaire = models.ForeignKey(Questionnaire, verbose_name=_(u"问卷"))
sortnum = models.IntegerField(default=1, verbose_name=_(u"序号"))
type = models.CharField(max_length=32, choices=CHOICES_TYPE, verbose_name=_(u"题型"))
text = models.CharField(max_length=128, verbose_name=_(u"问题"))
create_time = models.DateTimeField(auto_now_add=True)
update_time = models.DateTimeField(auto_now=True)
def choices(self):
return Choice.objects.filter(question=self).order_by('sortnum')
def statistics(self):
return QuestionnaireStatistics.objects.values('choice', 'choice_text', 'sum', 'percent').filter(question=self.id).order_by('csort')
def get_answer_texts(self):
return Answer.objects.values('text').filter(question=self.id).order_by('id')[:5]
class Meta:
verbose_name = '问题'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'[%s] (%d) %s' % (self.questionnaire, self.sortnum, self.text)
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
sortnum = models.IntegerField(default=1, verbose_name=_(u"序号"))
text = models.CharField(max_length=128, verbose_name=_(u"选项"))
tags = models.CharField(u"Tags", max_length=64, blank=True, editable=False)
class Meta:
verbose_name = '选项'
verbose_name_plural = verbose_name
def __unicode__(self):
return u'(%s) %d. %s' % (self.question.sortnum, self.sortnum, self.text)
class RunInfo(models.Model):
"Store the active/waiting questionnaire runs here"
user = models.ForeignKey(UserOld, verbose_name=_(u"问卷用户"))
questionnaire = models.ForeignKey(Questionnaire, verbose_name=_(u"问卷"))
create_time = models.DateTimeField(auto_now_add=True, verbose_name=_(u"问卷时间"))
class Meta:
verbose_name = '记录'
verbose_name_plural = verbose_name
class Answer(models.Model):
runinfo = models.ForeignKey(RunInfo)
question = models.IntegerField()
choice = models.IntegerField(blank=True, null=True)
text = models.TextField(blank=True, null=True)
create_time = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return "Answer(%s: %s, %s)" % (self.question.sortnum, self.question.text, self.text)
# 效率统计
class QuestionnaireStatistics(models.Model):
questionnaire = models.IntegerField()
name = models.CharField(max_length=128, verbose_name=_(u"问卷标题"))
question = models.IntegerField()
question_text = models.CharField(max_length=128, verbose_name=_(u"问题"))
qsort = models.IntegerField()
type = models.CharField(max_length=32)
choice = models.IntegerField()
choice_text = models.CharField(max_length=128, verbose_name=_(u"选项"))
csort = models.IntegerField()
sum = models.IntegerField()
percent = models.IntegerField()
class Meta:
managed = False
db_table = "questionnaire_statistics" | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,697 | ECNU-Studio/emoc | refs/heads/master | /apps/nengli8/migrations/0002_userold.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 12:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nengli8', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserOld',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_column=b'name', max_length=52, verbose_name=b'\xe7\x94\xa8\xe6\x88\xb7\xe5\x90\x8d')),
],
options={
'db_table': 'users',
'managed': False,
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,698 | ECNU-Studio/emoc | refs/heads/master | /apps/questionnaire/urls.py | # _*_ coding:utf-8 _*_
from django.conf.urls import *
from questionnaire.views import *
urlpatterns = [
# questionnaire应用
url(r'edit/(?P<course_id>[0-9]+)/$', QuestionnaireEdit.as_view(), name='edit_questionnaire'),
url(r'take/(?P<course_id>[0-9]+)/(?P<preview>[0|1])/$', QuestionnaireShow.as_view(), name='show_questionnaire'),
url(r'statistics/(?P<course_id>[0-9]+)/$', StatisticsShow.as_view(), name='show_statistics'),
url(r'submit/$', SubmitQuestionnaire.as_view(), name='submit_questionnaire'),
url(r'publish/$', PublishQuestionnaire.as_view(), name='publish_questionnaire'),
url(r'cancel/$', CancelQuestionnaire.as_view(), name='cancel_questionnaire'),
url(r'save/$', SaveQuestionnaire.as_view(), name='save_questionnaire'),
url(r'show/(?P<runinfo_id>[0-9]+)/$', ShowRuninfoDetail.as_view(), name='show_runinfo_detail'),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,699 | ECNU-Studio/emoc | refs/heads/master | /apps/teacheres/models.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
# Create your models here.
#课程
class Courses(models.Model):
class Meta:
db_table = 'courses_courses'
managed = False
verbose_name = '课程'
#培训师表单
class Teacheres(models.Model):
username = models.CharField(max_length=45, verbose_name=_(u"教师登录名"))
password = models.CharField(max_length=45, verbose_name=_(u"密码"))
name = models.CharField(max_length=45, verbose_name=_(u"教师姓名"))
email = models.CharField(max_length=45, verbose_name=_(u"邮箱"))
phone = models.CharField(max_length=45, verbose_name=_(u"手机"))
weixin = models.CharField(max_length=45, blank=True, null=True ,verbose_name=_(u"微信"))
# header = models.CharField(max_length=1000, verbose_name=_(u"头像"))
introduce = models.TextField(max_length=500, verbose_name=_(u"介绍"))
# courses = models.ForeignKey(Courses, to_field="id", verbose_name=_(u"课程"))
# cv = models.CharField(max_length=500, verbose_name=_(u"简历"))
# openid = models.CharField(max_length=45, verbose_name=_(u"openid"))
# state = models.BooleanField(choices=(("true", "有效"), ("false", "无效")), verbose_name=_(u"是否有效"))
# notice_wenda = models.CharField(max_length=45, verbose_name=_(u"问答通知"))
# notice_pinglun = models.CharField(max_length=45, verbose_name=_(u"评论通知"))
# notice_sendmail = models.CharField(max_length=45, verbose_name=_(u"问答评论发送邮箱"))
# new_ans = models.CharField(max_length=45, verbose_name=_(u"回复"))
# language = models.CharField(max_length=45, verbose_name=_(u"语言(1中文,2英文)"))
class Meta:
verbose_name = '培训师'
verbose_name_plural = verbose_name
# managed = False
# db_table = 'teacheres'
def __unicode__(self):
return self.name
class CoursestoTeachers(models.Model):
teacheres = models.ForeignKey(Teacheres, to_field="id" , verbose_name=_(u"培训师"))
courses = models.ForeignKey(Courses, to_field="id" , verbose_name=_(u"课程"))
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,700 | ECNU-Studio/emoc | refs/heads/master | /apps/users/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
def user_login(request):
if request.method == 'POST':
#
pass
elif request.method == 'GET':
# render方法的三个参数
return render(request, 'login.html', {})
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,701 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import *
# _*_ coding:utf-8 _*_
import xadmin
from .models import CourseOld, Question, Choice
class ChoiceInline(object):
model = Choice
extra = 0
class CourseOldAdmin(object):
list_display = ['name', 'manage_question', 'show_examination', 'show_statistics']
search_fields = ['name']
list_filter = ['name']
# 只读字段
readonly_fields = ['name']
model_icon = 'fa fa-calendar'
class ExaminationAdmin(object):
list_display = ['course', 'show_examination']
search_fields = []
list_filter = []
# 不显示字段
exclude = ['take_nums']
relfield_style = 'fk_ajax'
model_icon = 'far fa-calendar-check'
# 根据更新时间倒序
ordering = ['-update_time']
def queryset(self):
# super调用方法
qs = super(ExaminationAdmin, self).queryset()
qs = qs.filter(is_published=False)
return qs
class PublishedExaminationAdmin(object):
list_display = ['course', 'show_statistics']
search_fields = []
list_filter = []
# 不显示字段
exclude = ['is_published']
# 只读字段
readonly_fields = ['course', 'type', 'question_nums', 'take_nums']
# 列表页直接编辑
model_icon = 'fas fa-clipboard-list'
# 根据更新时间倒序
ordering = ['-update_time']
def queryset(self):
# super调用方法
qs = super(PublishedExaminationAdmin, self).queryset()
qs = qs.filter(is_published=True)
return qs
class QuestionAdmin(object):
list_display = ['course', 'text', 'type']
search_fields = ['text']
# list_filter = ['type']
# 只读字段
readonly_fields = ['sortnum']
model_icon = 'fas fa-question'
# 不显示字段
# exclude = ['sortnum']
relfield_style = 'fk_ajax'
inlines = [ChoiceInline]
xadmin.site.register(CourseOld, CourseOldAdmin)
# xadmin.site.register(Examination, ExaminationAdmin)
# xadmin.site.register(PublishedExamination, PublishedExaminationAdmin)
# xadmin.site.register(Question, QuestionAdmin) | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,702 | ECNU-Studio/emoc | refs/heads/master | /apps/courses/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 11:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('teacheres', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Courses',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45, verbose_name='\u8bfe\u7a0b\u540d\u79f0')),
('coursesAbstract', models.TextField(max_length=45, verbose_name='\u8bfe\u7a0b\u7b80\u4ecb')),
('cover', models.ImageField(upload_to=b'images/%Y/%m', verbose_name='\u5c01\u9762')),
('teacherid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='teacheres.Teacheres', verbose_name='\u8bb2\u5e08id')),
],
options={
'verbose_name': '\u8bfe\u7a0b',
'verbose_name_plural': '\u8bfe\u7a0b',
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,703 | ECNU-Studio/emoc | refs/heads/master | /apps/nengli8/apps.py | from __future__ import unicode_literals
from django.apps import AppConfig
class Nengli8Config(AppConfig):
name = 'nengli8'
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,704 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/__init__.py | default_app_config = "companys.apps.CompanysConfig" | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,705 | ECNU-Studio/emoc | refs/heads/master | /apps/teacheres/adminx.py | # _*_ coding:utf-8 _*_
import xadmin
from .models import Teacheres
from courses.models import Courses
class AddCourses(object):
model = Courses
extra = 0
#培训师
class TeacheresAdmin(object):
list_display = ['name', 'username', 'email', 'phone', 'weixin', 'password']
search_fields = ['name']
list_filter = ['name']
# 列表页直接编辑
list_editable = ['name']
model_icon = 'fa fa-user'
inlines = [AddCourses]
xadmin.site.register(Teacheres, TeacheresAdmin) | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,706 | ECNU-Studio/emoc | refs/heads/master | /apps/companys/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 11:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Companys',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=45, verbose_name='\u540d\u79f0')),
('account', models.CharField(max_length=45, verbose_name='\u8d26\u6237')),
('password', models.CharField(max_length=45, verbose_name='\u5bc6\u7801')),
('email', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u90ae\u7bb1')),
('legalperson', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u6cd5\u4eba')),
('address', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u4f01\u4e1a\u5730\u5740')),
('cover', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u4f01\u4e1a\u5c01\u9762')),
('memo', models.CharField(blank=True, max_length=45, null=True, verbose_name='\u5907\u6ce8')),
('state', models.BooleanField(default=0, max_length=45, verbose_name='\u662f\u5426\u6709\u6548')),
],
options={
'verbose_name': '\u4f01\u4e1a',
'verbose_name_plural': '\u4f01\u4e1a',
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,707 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/urls.py | # _*_ coding:utf-8 _*_
from django.conf.urls import *
from examination.views import *
urlpatterns = [
# examination
url(r'edit/(?P<course_id>[0-9]+)/$', QuestionEdit.as_view(), name='edit_question'),
url(r'take/(?P<course_id>[0-9]+)/(?P<preview>[0|1])/$', ExaminationShow.as_view(), name='show_examination'),
url(r'statistics/(?P<course_id>[0-9]+)/$', StatisticsShow.as_view(), name='show_statistics'),
url(r'submit/$', SubmitExamination.as_view(), name='submit_examination'),
url(r'publish/$', PublishExamination.as_view(), name='publish_examination'),
url(r'cancel/$', CancelExamination.as_view(), name='cancel_examination'),
url(r'save/$', SaveQuestion.as_view(), name='save_question'),
url(r'show/(?P<takeinfo_id>[0-9]+)/$', ShowTakeinfoDetail.as_view(), name='show_takeinfo_detail'),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,708 | ECNU-Studio/emoc | refs/heads/master | /emoc/__init__.py | # _*_ coding:utf-8 _*_
import pymysql
pymysql.install_as_MySQLdb() | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,709 | ECNU-Studio/emoc | refs/heads/master | /apps/nengli8/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 12:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CourseOld',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=52, verbose_name=b'\xe8\xaf\xbe\xe7\xa8\x8b\xe5\x90\x8d\xe5\xad\x97')),
],
options={
'verbose_name': '\u8bfe\u7a0b',
'db_table': 'courses',
'managed': False,
'verbose_name_plural': '\u8bfe\u7a0b',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_column=b'name', max_length=52, verbose_name=b'\xe7\x94\xa8\xe6\x88\xb7\xe5\x90\x8d')),
],
options={
'db_table': 'users',
'managed': False,
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,710 | ECNU-Studio/emoc | refs/heads/master | /apps/classes/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-21 12:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Classes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companyid', models.CharField(max_length=45, verbose_name='\u516c\u53f8id')),
('coursesid', models.CharField(max_length=45, verbose_name='\u8bfe\u7a0bid')),
('schoolTime', models.DateTimeField(verbose_name='\u4e0a\u8bfe\u65f6\u95f4')),
('address', models.CharField(max_length=100, verbose_name='\u4e0a\u8bfe\u5730\u70b9')),
('state', models.BooleanField(max_length=1, verbose_name='\u72b6\u6001')),
('period', models.CharField(max_length=45, verbose_name='\u5468\u671f')),
('hour', models.IntegerField(default=0, verbose_name='\u5b66\u65f6')),
],
options={
'verbose_name': '\u73ed\u7ea7',
'verbose_name_plural': '\u73ed\u7ea7',
},
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,711 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/migrations/0005_auto_20180430_2012.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-04-30 20:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('examination', '0004_auto_20180422_1618'),
]
operations = [
migrations.AddField(
model_name='takeinfo',
name='name',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='\u59d3\u540d'),
),
migrations.AddField(
model_name='takeinfo',
name='num',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='\u5b66\u53f7'),
),
migrations.AlterField(
model_name='choice',
name='text',
field=models.TextField(verbose_name='\u9009\u9879'),
),
migrations.AlterField(
model_name='question',
name='text',
field=models.TextField(verbose_name='\u95ee\u9898'),
),
]
| {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,712 | ECNU-Studio/emoc | refs/heads/master | /apps/examination/apps.py | # _*_ coding:utf-8 _*_
from __future__ import unicode_literals
from django.apps import AppConfig
class ExaminationConfig(AppConfig):
name = 'examination'
verbose_name = u'测试'
# label = u'问卷' | {"/apps/courses/adminx.py": ["/apps/courses/models.py"], "/apps/companys/adminx.py": ["/apps/companys/models.py"], "/apps/questionnaire/adminx.py": ["/apps/questionnaire/models.py"], "/apps/classes/adminx.py": ["/apps/classes/models.py"], "/apps/useradmin/urls.py": ["/apps/useradmin/views.py"], "/apps/examination/adminx.py": ["/apps/examination/models.py"], "/apps/teacheres/adminx.py": ["/apps/teacheres/models.py"]} |
52,715 | shubhygups/python_flask_docker_restful_api | refs/heads/master | /run.py | #!/usr/bin/python3
from employee_registry import app
app.run(host='0.0.0.0', port=80) | {"/run.py": ["/employee_registry/__init__.py"]} |
52,716 | shubhygups/python_flask_docker_restful_api | refs/heads/master | /employee_registry/__init__.py | import markdown
import os
import shelve
# Import the framework
from flask import Flask,g
from flask_restful import Resource, Api, reqparse
# Create a instance of Flask
app = Flask(__name__)
# Create the API
api = Api(app)
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = shelve.open("employees.db")
return db
@app.teardown_appcontext
def teardown_db(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
@app.route("/")
def index():
"""Present some documentation"""
# Open README file
with open(os.path.dirname(app.root_path) + '/README.md', 'r') as markdown_file:
# Read the content of the file
content = markdown_file.read()
# Convert to HTML
return markdown.markdown(content)
class EmployeeList(Resource):
def get(self):
shelf = get_db()
keys = list(shelf.keys())
employees = []
for key in keys:
employees.append(shelf[key])
return {'message': 'Success', 'data': employees}, 200
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('employee_id', required=True)
parser.add_argument('name', required=True)
parser.add_argument('age', required=True)
parser.add_argument('department', required=True)
parser.add_argument('location', required=True)
# Parse the arguments into a object
args = parser.parse_args()
shelf = get_db()
shelf[args['employee_id']] = args
return {'message': 'Employee Registered', 'data':args}, 201
class Employee(Resource):
def get(self, employee_id):
shelf=get_db()
# If the key doesn't exist in the data store, return 404 error.
if not (employee_id in shelf):
return {'mesage': 'Employee not found', 'data': {}}, 404
return {'message': 'Employee found', 'data':shelf[employee_id]}, 200
def delete(self, employee_id):
shelf=get_db()
# If the key doesn't exist in the data store, return 404 error.
if not (employee_id in shelf):
return {'mesage': 'Employee not found', 'data': {}}, 404
del shelf[employee_id]
return '', 204
api.add_resource(EmployeeList, '/employees')
api.add_resource(Employee, '/employees/<string:employee_id>') | {"/run.py": ["/employee_registry/__init__.py"]} |
52,719 | oruxl/angrymetalpy | refs/heads/master | /angrymetalpy/__init__.py | from .angrymetalpy import *
from .timing import *
__all__ = ['site_score_mapping', 'Review', 'Reviewer', 'reviews_from_txt', \
'reviewers_from_reviews', 'months_between', 'date_range']
try:
import matplotlib.pyplot
__all__.append('set_month_axis')
except ImportError:
print("Matplotlib not found. Some plotting functions will not be available")
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,720 | oruxl/angrymetalpy | refs/heads/master | /examples/timeline.py | #
# timeline.py
# create a 2d histogram of review count vs time for each AMG reviewer
#
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
amg_reviewers = amp.reviewers_from_reviews(reviews)
amg_reviewers = sorted(amg_reviewers, key=lambda x: len(x.reviews))
fig = plt.figure(figsize=(5, 8), dpi=100)
ax = fig.add_subplot(111)
min_date, max_date = amp.date_range(reviews)
n_months = amp.months_between(min_date, max_date)
# timeline of reviewer activity
xs = []
ys = []
for i, reviewer in enumerate(amg_reviewers):
for review in reviewer.reviews:
xs.append(amp.months_between(min_date, review.date))
ys.append(i)
xbins = np.arange(0, n_months + 1, 1)
ybins = np.arange(0, len(amg_reviewers) + 1, 1)
ax.hist2d(xs, ys, bins=[xbins, ybins])
ax.set_ylim(0, len(amg_reviewers) - 0.5)
# y axis should line up reviewer names with rows
ylabels = [x.name for x in amg_reviewers]
ytickpos = ybins + 0.5
ax.set_yticks(ytickpos)
ax.set_yticklabels(ylabels)
amp.set_month_axis(ax, min_date, max_date, step=24)
plt.savefig('timeline.png', transparent=False, dpi=100)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,721 | oruxl/angrymetalpy | refs/heads/master | /examples/score_hist.py | import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
min_date, max_date = amp.date_range(reviews)
num_months = amp.months_between(min_date, max_date) + 1
sc = []
sc_past = []
six_months_ago = dt.datetime.today() - dt.timedelta(6*365/12)
for rev in reviews:
sc.append(rev.score)
if rev.date > six_months_ago:
sc_past.append(rev.score)
print(np.mean(sc), np.median(sc))
fig_hist = plt.figure(figsize=(5,4), dpi=100)
axhist = fig_hist.add_subplot(111)
axhist.hist(sc, bins=np.arange(0, 6, step=0.5))
axhist.set_ylabel('Counts')
axhist.set_xlabel('Score')
axhist.set_xlim(0, 5.25)
axhist.set_title('All Scores')
xtickpos = 0.25 + np.arange(0, 6, step=0.5)
plt.xticks(xtickpos, np.arange(0, 6, step=0.5))
plt.savefig('hist.png', transparent=False, dpi=100)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,722 | oruxl/angrymetalpy | refs/heads/master | /examples/score_history.py | from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from matplotlib import lines
import numpy as np
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
print(len(reviews))
min_date, max_date = amp.date_range(reviews)
num_months = amp.months_between(min_date, max_date) + 1
scores = np.zeros(num_months)
counts = np.zeros(num_months)
perfect_albums = [[] for _ in range(num_months)]
for rev in reviews:
idx = amp.months_between(min_date, rev.date)
scores[idx] += rev.score
counts[idx] += 1
if rev.score == 5.0:
perfect_albums[idx].append(rev.album)
scores /= counts # average scores per month
score_unc = np.sqrt(counts) / counts
fig = plt.figure(figsize=(5,4), dpi=100)
rect = (1, 1, 1, 1)
#ax2 = fig.add_axes(rect, label='axis2')
ax1 = fig.add_axes(rect, label='axis1')
ax1.set_xlim(0, num_months)
#ax2.set_xlim(0, num_months)
ax1.yaxis.set_ticks_position('left')
#ax2.yaxis.set_ticks_position('right')
#ax2.yaxis.set_label_position('right')
#ax2.xaxis.set_major_formatter(NullFormatter())
#ax2.xaxis.set_ticks_position('none')
xs = np.arange(start=0, stop=num_months, step=1)
#ax1.plot(xs, scores, '-', lw=2, color='r', label='Avg. Score')
#ax1.fill_between(xs, scores - score_unc, scores + score_unc, lw=0, alpha=0.5)
ax1.plot(xs, counts, '-', lw=2, color='b', label='Reviews per Month')
perf_x = []
prev_perfect = -10
#flip = True
for time, albums in enumerate(perfect_albums):
txt = ' & '.join([album.decode('utf-8') for album in albums])
if txt == '':
continue
perf_x.append(time)
print(time-prev_perfect)
x = time if time - prev_perfect > 2 else prev_perfect + 4
y = 90.65 if x == time else 93
align = 'bottom'#'top' if flip else 'bottom'
ax1.plot([time, time], [0, 92], '-', color='g')
if x != time:
ax1.annotate('', xy=(time, 89.9), xytext=(x, 92.55),
arrowprops=dict(arrowstyle="-", color='g', alpha=1.0, lw=1, ls='-'))
#ax1.plot([time, x], [0, 92], '--', color='g')
ax1.text(x, y, txt, color='g',
fontsize=10, horizontalalignment='center', verticalalignment=align, rotation='vertical')
prev_perfect = x#time #if not flip else prev_perfect
ax1.set_ylim(0,90)
#ax1.plot(perf_x, counts[perf_x], '*', color='b', lw=0)
amp.set_month_axis(ax1, min_date, max_date, step=24)
#ax1.yaxis.set_tick_params(labelcolor='r', color='r')
#ax2.yaxis.set_tick_params(labelcolor='b', color='b')
ax1.set_ylabel('Reviews per Month')#, color='r')
#ax2.set_ylabel('Reviews per Month', color='b')
#ax.set_title('Brutality vs. Time')
plt.savefig('avg_score_v_time.png', transparent=False, dpi=100)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,723 | oruxl/angrymetalpy | refs/heads/master | /angrymetalpy/timing.py | import datetime as dt
import numpy as np
# Useful time-related functions for plotting AMG data
def date_range(review_list):
""" Find the date range of a set of reviews """
min_date = None
max_date = None
for rev in review_list:
if min_date is None:
min_date = rev.date
max_date = rev.date
continue
if rev.date < min_date:
min_date = rev.date
continue
elif rev.date > max_date:
max_date = rev.date
continue
return (min_date, max_date)
def months_between(min_date, max_date):
""" Return number of months between two datetime objects """
return 12 * (max_date.year - min_date.year) - min_date.month + max_date.month
def set_month_axis(ax, min_date, max_date, step=12):
""" Given a Matplotlib axis object, set the x axis to display months """
num_months = months_between(min_date, max_date) + 1
xs = np.arange(start=0, stop=num_months, step=1)
xlabels = []
yr = min_date.year
mn = min_date.month
for i in range(num_months + 1):
d = dt.datetime(yr, mn, 1)
xlabels.append(dt.datetime.strftime(d, '%b-%y'))
mn += 1
if mn == 13:
yr += 1
mn = 1
xtickpos = xs + 0.5
# start labels lined up with january of each year
offset = 13 - min_date.month
ax.set_xticks(xtickpos[offset::step])
ax.set_xticklabels(xlabels[offset::step])
ax.set_xlim(-1, num_months + 1)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,724 | oruxl/angrymetalpy | refs/heads/master | /examples/to_csv.py | import angrymetalpy as amp
if __name__ == '__main__':
print("Reading from JSON, writing to CSV")
reviews = amp.reviews_from_json('data_20180422.txt')
for r in reviews:
print(r)
with open('tst.csv', 'a') as f:
f.write(r.csv().encode('utf8') + "\n")
print("Reading from CSV")
reviews = amp.reviews_from_csv('tst.csv')
for r in reviews[:10]:
print(r)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,725 | oruxl/angrymetalpy | refs/heads/master | /examples/tag_correlation.py | import numpy as np
import matplotlib.pyplot as plt
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
# we want to build a correlation plot for pairs of tags
# start by finding all tags we are dealing with
all_tags = set()
for rev in reviews:
for tag in rev.tags:
all_tags.add(tag)
# create a 2d matrix of zeros to be filled
all_tags = list(all_tags)
tag_counts = np.zeros(len(all_tags), dtype=int)
for rev in reviews:
for tag in rev.tags:
tag_counts[all_tags.index(tag)] += 1
all_tags = sorted(zip(tag_counts, all_tags), key=lambda x: x[0], reverse=True)[:40]
_, all_tags = zip(*all_tags)
# alphabetize the list
all_tags = sorted(all_tags)
arr = np.zeros(shape=(len(all_tags), len(all_tags)), dtype=int)
# fill the histogram
for rev in reviews:
for tag1 in rev.tags:
for tag2 in rev.tags:
try:
i = all_tags.index(tag1)
j = all_tags.index(tag2)
except ValueError:
continue
if j > i:
break
if i != j:
arr[i][j] += 1
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim(0, len(all_tags))
ax.set_ylim(0, len(all_tags))
ax.set_xticklabels(all_tags, rotation='vertical')
ax.set_yticklabels(all_tags)
xbins = np.arange(0, len(all_tags) + 1, step=1)
ybins = np.arange(0, len(all_tags) + 1, step=1)
ax.xaxis.set_ticks_position('top')
xtickpos = xbins + 0.5
ytickpos = ybins + 0.5
ax.set_xticks(xtickpos)
ax.set_yticks(ytickpos)
pcm = ax.pcolormesh(xbins, ybins, arr)
fig.colorbar(pcm, ax=ax)
plt.savefig('tag_correlation.pdf')
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,726 | oruxl/angrymetalpy | refs/heads/master | /examples/score_fit.py | # do a time series analysis of AMG review score data
#
# - detrend time series using scipy optimize to do a linear fit
# - plot acf of residuals
# - look at fits to subsets of reviews by genre
from datetime import datetime
from matplotlib import gridspec
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from scipy import optimize
import numpy as np
import angrymetalpy as amp
def linear_model(p,x):
return p[0] + x*p[1]
def residual(p, x, y, err):
return (linear_model(p, x)-y)/err
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
min_date, max_date = amp.date_range(reviews)
num_months = amp.months_between(min_date, max_date) + 1
# time series
t = np.arange(start=0, stop=num_months, step=1)
scores = np.zeros(num_months)
counts = np.zeros(num_months)
for rev in reviews:
idx = amp.months_between(min_date, rev.date)
scores[idx] += rev.score
counts[idx] += 1
scores /= counts # average scores per month
scores_err = np.sqrt(counts) / counts
min_idx = np.argmin(scores)
for rev in reviews:
idx = amp.months_between(min_date, rev.date)
if idx == min_idx:
print(rev.date)
print(rev.album, rev.artist, rev.score)
# Figure 1: linear fit
p0 = [3., -0.005]
pf, cov, info, mesg, success = optimize.leastsq(residual, p0,
args=(t, scores, scores_err), full_output=1)
chisq = sum(info["fvec"]*info["fvec"])
dof = len(t)-len(pf)
pferr = [np.sqrt(cov[i,i]) for i in range(len(pf))]
global_fit = (pf[1], pferr[1])
fig_fit = plt.figure(1, figsize=(5,4), dpi=100)#figsize=(7,5))
ax = fig_fit.add_subplot(111)
ax.set_xlim(-1, num_months)
ax.errorbar(t, scores, yerr=scores_err, fmt='.', color='k', label='Data')
fit_pts = np.linspace(min(t), max(t), 2)
ax.plot(fit_pts, linear_model(pf, fit_pts), color='r', label='Fit')
amp.set_month_axis(ax, min_date, max_date, step=24)
ax.set_xlabel('Time')
ax.set_ylabel('Average Review Score per Month')
plt.savefig('avg_score_fit.png', transparent=False, dpi=100)
# Figure 2: Residuals
fig_res = plt.figure(2)
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
gs.update(wspace=0.025, hspace=0.05)
ax2 = fig_res.add_subplot(gs[0])
ax2hist = fig_res.add_subplot(gs[1])
ax2.set_xlim(-1, num_months)
residuals = scores - linear_model(pf, t)
ax2.plot(t, residuals, '.')
ax2hist.hist(residuals, bins=15, alpha=0.5, orientation='horizontal')
ax2hist.yaxis.set_major_formatter(NullFormatter())
amp.set_month_axis(ax2, min_date, max_date, step=12)
ax2.set_xlabel('Time')
ax2.set_ylabel('Fit residual')
ax2hist.set_xlabel('Counts')
plt.savefig('avg_score_res.pdf')
# Figure 3: Residual ACF
fig_acf = plt.figure(3, figsize=(5,4), dpi=100)
ax3 = fig_acf.add_subplot(111)
ax3.set_xlim(-1, num_months)
ax3.acorr(residuals, maxlags=20)
ax3.set_xlim(-1, 20)
ax3.set_ylim(-0.25, 1.05)
ax3.set_xlabel('Lag')
ax3.set_ylabel('Residual ACF')
plt.savefig('avg_score_acf.png', transparent=False, dpi=100)
# Figure 4: Genre correlations
fig_genre = plt.figure(4, figsize=(5,4), dpi=100)
ax4 = fig_genre.add_subplot(111)
genres = ['Death Metal', 'Black Metal', 'Doom Metal', 'Progressive Metal',
'Folk Metal', 'Thrash Metal', 'Heavy Metal', 'Hardcore',
'Power Metal', 'Hard Rock']
corrs = []
corr_err = []
total_counts = []
for genre in genres:
scores = np.zeros(num_months)
counts = np.zeros(num_months)
for rev in reviews:
if genre not in rev.tags:
continue
idx = amp.months_between(min_date, rev.date)
scores[idx] += rev.score
counts[idx] += 1
print('{} | {:.2f} +/- {:.2f}'.format(genre, sum(scores) / sum(counts), np.sqrt(sum(counts))/sum(counts)))
scores /= counts # average scores per month
scores_err = np.sqrt(counts) / counts
# prune out months with no reviews of this genre
_t = t[~np.isnan(scores)]
_scores = scores[~np.isnan(scores)]
_scores_err = scores_err[~np.isnan(scores)]
# now we fit the scores with a linear model
p0 = [3., 0.2]
pf, cov, info, mesg, success = optimize.leastsq(residual, p0,
args=(_t, _scores, _scores_err), full_output=1)
pferr = [np.sqrt(cov[i,i]) for i in range(len(pf))]
corrs.append(pf[1])
corr_err.append(pferr[1])
total_counts.append(sum(counts))
zipped = sorted(zip(genres, corrs, corr_err, total_counts), key=lambda x: x[3], reverse=True)
genres, corrs, corr_err, total_counts = zip(*zipped)
ax4.errorbar(range(len(genres)), corrs, yerr=corr_err, fmt='.')
ax4.axhspan(global_fit[0] - global_fit[1], global_fit[0] + global_fit[1],
color='r', alpha=0.5, label='All genres')
ax4.plot([-0.5, len(genres) - 0.5], [0, 0], 'b--')
ax4.set_xlim(-0.5, len(genres) - 0.5)
ax4.set_ylabel('Change in Average Score per Month')
ax4.set_xticks(range(len(genres)))
ax4.set_xticklabels(genres, rotation='vertical')
plt.legend(loc='best')
plt.savefig('avg_score_corr.png', transparent=False, dpi=100)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,727 | oruxl/angrymetalpy | refs/heads/master | /examples/reviewer_scores.py | from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
amg_reviewers = amp.reviewers_from_reviews(reviews)
amg_reviewers = sorted(amg_reviewers, key=lambda x: len(x.reviews))
fig = plt.figure()
ax = fig.add_subplot(111)
min_date, max_date = amp.date_range(reviews)
n_months = amp.months_between(min_date, max_date)
xs = np.arange(start=0, stop=n_months, step=1)
# timeline of reviewer activity
for i, reviewer in enumerate(amg_reviewers):
ys = np.zeros(n_months)
ys_counts = np.zeros(n_months)
for review in reviewer.reviews:
idx = amp.months_between(min_date, review.date)
ys[idx - 1] += review.score
ys_counts[idx - 1] += 1
idxs = np.where(ys_counts > 3)
ys[idxs] /= ys_counts[idxs]
if len(idxs[0]) > 1:
ax.plot(xs[idxs], ys[idxs], '.', label=reviewer.name)
amp.set_month_axis(ax, min_date, max_date, step=12)
ax.set_ylabel('Average scores of reviewers with > 3 reviews/month')
ax.set_title('Brutality vs. Time')
ax.legend(loc='best', ncol=5, fontsize=10)
plt.savefig('scores.pdf')
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,728 | oruxl/angrymetalpy | refs/heads/master | /angrymetalpy/angrymetalpy.py | import json
import datetime as dt
import numpy as np
import csv
from StringIO import StringIO
# These are the interpretation of the scores listed on the site
# Could be useful for something...
site_score_mapping = {
'perfect': 5.0,
'excellent': 4.5,
'great': 4.0,
'very good': 3.5,
'good': 3.0,
'mixed': 2.5,
'disappointing': 2.0,
'bad': 1.5,
'embarrassing': 1.0,
'pathetic': 0.5,
'worthless': 0.0,
}
class _SetEncoder(json.JSONEncoder):
''' Helper class to allow the json library to serialize set objects '''
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
class Review(object):
def __init__(self, album, artist, author, date, tags, score, text):
# found that some albums have an extra ending
self._album = album.split(' | Angry Metal Guy')[0].strip()
self._artist = artist
self._author = author
self._date = date
self._tags = tags
self._score = score
# some reviews are unscored, so we set a flag
if self._score == -1:
self._scored = False
else:
self._scored = True
self._text = text
self._filter_tags()
def __repr__(self):
return 'Review of {} by {}. Reviewer: {} on {}. Score: {}'.format(
self._album, self._artist, self._author,
dt.datetime.strftime(self._date, "%Y-%m-%d"), self._score
)
def is_valid(self):
""" True if all fields were filled successfully """
return self._album != "" and self._artist != "" and \
self._author != "" and self._date is not None and \
self._score != -1
def _filter_tags(self):
"""
Remove numbers and dates from tag list. Private method because
this should be done while creating the review object
"""
newtags = set()
for tag in self._tags:
# see if the tag is a number
try:
tag = float(tag)
continue
except ValueError:
pass
# cut out generic tags
if tag.lower() == "review" or tag.lower() == "reviews":
continue
if tag.lower() == "release" or tag.lower() == "releases":
continue
# cut out tags of the form e.g. Mar2016 or Mar16
try:
dt.datetime.strptime(tag, "%b%y")
continue
except ValueError:
pass
try:
dt.datetime.strptime(tag, "%b%Y")
continue
except ValueError:
pass
newtags.add(tag)
self._tags = newtags
def json(self):
json_dict = {
'album': self._album,
'artist': self._artist,
'author': self._author,
'date': dt.datetime.strftime(self._date, "%Y-%m-%d"),
'tags': self._tags,
'score': self._score,
}
return json.dumps(json_dict, cls=_SetEncoder, indent=4, sort_keys=True)
def csv(self):
def escape(string):
esc_string = "\"" + string + "\""
if type(esc_string) != unicode:
esc_string = esc_string.decode('utf-8')
return esc_string
tag_string = escape(';'.join(self._tags))
csv_fields = [
escape(self._album), escape(self._artist),
escape(self._author),
dt.datetime.strftime(self._date, "%Y-%m-%d"),
tag_string, str(self._score)
]
return ','.join(csv_fields)
@property
def album(self):
return self._album
@property
def artist(self):
return self._artist
@property
def author(self):
return self._author
@property
def score(self):
return self._score
@property
def tags(self):
return self._tags
@author.setter
def author(self, val):
self._author = val
@property
def date(self):
return self._date
@staticmethod
def from_json(string):
""" Create a review object from a JSON string """
try:
json_dict = json.loads(string)
rev = Review(json_dict['album'].encode('utf-8'),
json_dict['artist'].encode('utf-8'),
json_dict['author'].encode('utf-8'),
dt.datetime.strptime(json_dict['date'], '%Y-%m-%d'),
set(json_dict['tags']), json_dict['score'], '')
return rev
except Exception as e:
raise ValueError
@staticmethod
def from_csv(string):
""" Create a review object from a CSV string """
def unicode_csv_reader(utf8_data, dialect=csv.excel, **kwargs):
csv_reader = csv.reader(utf8_data, dialect=dialect, **kwargs)
for row in csv_reader:
yield [unicode(cell, 'utf-8') for cell in row]
line = StringIO(string)
csv_parse = unicode_csv_reader(line, quotechar='"')
def unescape(_str):
try:
_str = _str.encode('utf-8')
except:
pass
if _str[0] == '\"' and _str[:-1] == '\"':
return _str[1:-2]
return _str
rev = None
for info in csv_parse:
try:
album = unescape(info[0])
artist = unescape(info[1])
author = unescape(info[2])
date = dt.datetime.strptime(info[3], '%Y-%m-%d')
tags = unescape(info[4]).split(';')
score = float(info[5])
rev = Review(album, artist, author, date, set(tags), score, '')
except Exception as e:
raise ValueError
return rev
class Reviewer(object):
def __init__(self, name):
self._name = name
self._reviews = set()
@property
def name(self):
return self._name
@property
def reviews(self):
return self._reviews
def add_review(self, review):
""" Associate a review with this reviewer """
if review.author != '' and review.author != self._name:
print('Warning: Overwriting review author field.')
review.author = self._name
self._reviews.add(review)
def tag_list(self):
tagset = set()
for review in self._reviews:
for tag in review.tags:
tagset.add(tag)
return list(tagset)
def tag_counts(self, sort='a'):
the_tags = self.tag_list()
tag_counts = np.zeros(len(the_tags), dtype=int)
for review in self._reviews:
for tag in review.tags:
tag_counts[the_tags.index(tag)] += 1
# user may pass 'a' or 'd' to sort ascending or descending
rev = True if sort == 'd' else False
return sorted(zip(the_tags, tag_counts), key=lambda x: x[1], reverse=rev)
def score_list(self):
scorelist = []
for review in self._reviews:
scorelist.append(review.score)
return np.asarray(scorelist)
def score_counts(self):
scorehist = np.zeros(11) # always [0, 5.0] in 0.5 steps
for review in self._reviews:
scorehist[int(review.score * 2)] += 1
return scorehist
def reviews_from_json(fname):
""" Return a list of reviews from a text file containing JSON dumps of review objects """
reviews = []
with open(fname, 'r') as f:
for line in f:
# skip header
if line[0] == '#':
continue
while True:
try:
rev = Review.from_json(line)
if rev.is_valid():
# filter out unscored reviews
reviews.append(rev)
break
except ValueError:
# Not yet a complete JSON value
line += next(f)
return reviews
def reviews_from_csv(fname):
""" Return a list of reviews from a text file containing csv-style review info """
reviews = []
with open(fname, 'r') as f:
for line in f:
#try:
rev = Review.from_csv(line)
if rev.is_valid():
reviews.append(rev)
#except ValueError:
# pass
return reviews
def reviewers_from_reviews(rev_list):
""" Returns a list of reviewers inferred from the author field of
each review in a review list """
amg_reviewers = []
for rev in rev_list:
if rev.score == -1 or rev._album == '':
continue
if rev.author not in [_.name for _ in amg_reviewers]:
amg_reviewers.append(Reviewer(rev.author))
for reviewer in amg_reviewers:
if reviewer.name == rev.author:
reviewer.add_review(rev)
break
return amg_reviewers
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,729 | oruxl/angrymetalpy | refs/heads/master | /examples/score_genre.py | from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
min_date, max_date = amp.date_range(reviews)
num_months = amp.months_between(min_date, max_date) + 1
genres = set(['Death Metal', 'Black Metal', 'Doom Metal', 'Progressive Metal', 'Folk Metal', 'Thrash Metal', 'Hardcore', 'Hard Rock'])
fig = plt.figure(figsize=(7,5))
ax = fig.add_subplot(111)
ax.set_xlim(0, num_months)
for genre in genres:
scores = np.zeros(num_months)
counts = np.zeros(num_months)
for rev in reviews:
if genre not in rev.tags:
continue
#if len(list(genre & set(rev.tags))) > 1:
# continue
idx = amp.months_between(min_date, rev.date)
scores[idx] += rev.score
counts[idx] += 1
scores /= counts # average scores per month
xs = np.arange(start=0, stop=num_months, step=1)
month_bin = 12
binned_xs = xs[::month_bin][1:]
binned_scores = np.zeros(len(binned_xs))
for i in range(len(binned_xs)):
scores_in_range = [_ for _ in scores[month_bin * i:][:month_bin] if not np.isnan(_)]
if len(scores_in_range) > 0:
binned_scores[i] = np.sum(scores_in_range) / len(scores_in_range)
else:
binned_scores[i] = np.nan
ax.plot(binned_xs, binned_scores, '-', lw=2, label=genre)
# ax.plot(xs, scores, '-', lw=2, label=genre)
amp.set_month_axis(ax, min_date, max_date)
#ax.set_ylabel('Average Review Scores')
plt.legend(loc='best')
plt.savefig('genre_score.pdf')
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,730 | oruxl/angrymetalpy | refs/heads/master | /examples/score_tag.py | from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import numpy as np
import angrymetalpy as amp
if __name__ == '__main__':
reviews = amp.reviews_from_json('data_20180422.txt')
min_date, max_date = amp.date_range(reviews)
num_months = amp.months_between(min_date, max_date) + 1
scores = np.zeros(num_months)
counts = np.zeros(num_months)
for rev in reviews:
idx = amp.months_between(min_date, rev.date)
scores[idx] += rev.score
counts[idx] += 1
scores /= counts # average scores per month
score_unc = np.sqrt(counts) / counts
fig = plt.figure(figsize=(7,5))
rect = (1, 1, 1, 1)
ax2 = fig.add_axes(rect, label='axis2')
ax1 = fig.add_axes(rect, label='axis1')
ax1.set_xlim(0, num_months)
ax2.set_xlim(0, num_months)
ax1.yaxis.set_ticks_position('left')
ax2.yaxis.set_ticks_position('right')
ax2.yaxis.set_label_position('right')
ax2.xaxis.set_major_formatter(NullFormatter())
ax2.xaxis.set_ticks_position('none')
xs = np.arange(start=0, stop=num_months, step=1)
ax1.plot(xs, scores, '-', lw=2, color='r', label='Avg. Score')
ax1.fill_between(xs, scores - score_unc, scores + score_unc, lw=0, alpha=0.5)
ax2.plot(xs, counts, '-', lw=2, color='b', label='Reviews per Month')
amp.set_month_axis(ax1, min_date, max_date)
ax1.yaxis.set_tick_params(labelcolor='r', color='r')
ax2.yaxis.set_tick_params(labelcolor='b', color='b')
ax1.set_ylabel('Average Review Scores', color='r')
ax2.set_ylabel('Reviews per Month', color='b')
#ax.set_title('Brutality vs. Time')
plt.savefig('avg_score_v_time.pdf')
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,731 | oruxl/angrymetalpy | refs/heads/master | /tools/amg_scrape.py | # Scrape data from AMG review pages.
#
# This can take a long time, so ideally this is only run once
# Also included is an update function which stops when it finds a review that
# is already in a file.
from lxml import html
from lxml.etree import tostring
from itertools import chain
import requests
import sys
from datetime import datetime
import re
import angrymetalpy as amp
def stringify_children(node):
""" Converts content within a tag to text even if inside another tag """
parts = ([node.text] +
list(chain(*([c.text, tostring(c), c.tail] for c in node.getchildren()))) +
[node.tail])
# filter removes possible Nones in texts and tails
return ''.join(filter(None, parts))
def get_review_url(start_page=1, end_page=None):
""" Returns a list of review page urls from AMG """
baseurl = 'http://www.angrymetalguy.com/' #category/reviews/'
urllist = []
if end_page is None:
# user only wants reviews from a specific page
end_page = start_page + 1
# Get 5 pages of reviews
for i in range(start_page, end_page):
URL = baseurl
if i > 1:
URL += 'page/' + str(i) + '/'
try:
page = requests.get(URL, timeout=2.0)
except:
return None
try:
tree = html.fromstring(page.content)
except:
return None
urls = tree.xpath('//a[@class="post-thumb img fix"]/@href')
for i in range(len(urls)):
urllist.append(urls[i])
return urllist
def get_page_data(URL):
""" Scrape score from each review page """
try:
page = requests.get(URL, timeout=10.0)
except:
return
try:
tree = html.fromstring(page.content)
except:
return
# some reviews use the unicode '-' character, u2013, so we have to replace it before splitting
try:
album_artist = tree.xpath('//title/text()')[0].strip().replace(u'\u2013', '-').split(' Review')[0]
artist = album_artist.split(' - ')[0]
album = album_artist.split(' - ')[1]
except:
with open('log.txt', 'a') as f:
f.write('Could not get album/artist info from {}\n'.format(URL))
artist = ''
album = ''
author = tree.xpath('//a[@rel="author"]/text()')[0] # name of reviewer
date = tree.xpath('//time[@class="date time published updated sc"]/text()')[0] # date of review
date_as_obj = datetime.strptime(date, "%B %d, %Y")
taglist = tree.xpath('//meta[@property="article:tag"]/@content') # all tag fields
scorep = None
scorestr = ''
# try to find score, or skip things you might have missed reviews
for tag in taglist:
score_search = re.search('(\d\.\d)', tag)
if score_search is not None:
scorestr = score_search.group(0)
if 'things you might have missed' in tag.lower():
return None
# if score not in the tag, find it the old fashioned way
if scorestr == '':
# score text box is almost always a <p> preceded by a horizontal rule
try:
scorep = stringify_children(tree.xpath("//hr/following-sibling::p[1]")[0])
except IndexError:
# ... but sometimes it's a div
try:
scorep = stringify_children(tree.xpath("//hr/following-sibling::div[1]")[0])
except IndexError:
# ... and sometimes there is no horizontal rule, so we look for center-justified text
try:
scorep = stringify_children(tree.xpath('//p[@style="text-align: center;"]')[0])
except IndexError:
# otherwise we got nothing
scorestr = '-1'
if scorep is not None:
score_search = re.search('(\d\.\d\/5.0)', scorep)
if score_search is not None:
scorestr = score_search.group(0).split('/5.0')[0]
else:
# try to see if any line in the <p> contains an AMG score keyword that
# we can convert to a score
scorep = scorep.split('<br/>')
scorestr = ''
for elem in scorep:
elem = elem.strip().split('!')[0] # some cleanup of the string...
for keyword in amp.site_score_mapping.keys():
# technically this section is incorrect because "good" will also find "very good"
# however the key list has "very good" first, so it should break before checking "good"
score_search = re.search(keyword, elem, flags=re.IGNORECASE)
if score_search is not None:
scorestr = str(amp.site_score_mapping[score_search.group(0).lower()])
break
if scorestr != '':
break
try:
score = float(scorestr)
except:
with open('log.txt', 'a') as f:
f.write('Could not get score from {}\n'.format(URL))
score = -1
try:
# metal bands use unicode characters
album, artist, author = (_.encode('utf-8') for _ in [album, artist, author])
review = amp.Review(album, artist, author, date_as_obj, taglist, score, '')
return review
except UnicodeEncodeError:
print('Unicode error: {}'.format(URL))
def update(prev_file='', max_page=None):
""" Scrape data. If a filename is specified, only scrape until the program
encounters a review already in the file. """
if prev_file != '':
reviews = amp.reviews_from_csv(prev_file)
review_titles = [_.album for _ in reviews]
filename = 'data_{}.txt'.format(
datetime.strftime(datetime.now(), format='%Y%m%d')) if prev_file == '' else prev_file
with open(filename, 'a') as f:
page_count = 1
found_end = False
while not found_end:
urls = get_review_url(start_page=page_count)
print('found {} reviews on page {}'.format(len(urls), page_count))
for url in urls:
rev = get_page_data(url)
if rev is not None:
if prev_file != '':
# check if this review was already in prev_file
if rev.album in review_titles:
# if yes, don't write it and mark this page as the end
found_end = True
continue
# else write to the file
#print('writing {}'.format(rev.album))
f.write(rev.csv().encode('utf-8') + '\n')
page_count += 1
if max_page is not None:
if page_count > max_page:
found_end = True
if __name__ == '__main__':
if len(sys.argv) > 1:
update(sys.argv[1])
else:
update()
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,732 | oruxl/angrymetalpy | refs/heads/master | /setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='angrymetalpy',
version='1.0',
description='Python classes for representing Angry Metal Guy reviews and reviewers',
author='oruxl',
url='https://www.github.com/oruxl/angrymetalpy',
packages=['angrymetalpy'],
setup_requires=['numpy'],
)
| {"/angrymetalpy/__init__.py": ["/angrymetalpy/angrymetalpy.py", "/angrymetalpy/timing.py"], "/examples/timeline.py": ["/angrymetalpy/__init__.py"], "/examples/score_hist.py": ["/angrymetalpy/__init__.py"], "/examples/score_history.py": ["/angrymetalpy/__init__.py"], "/examples/to_csv.py": ["/angrymetalpy/__init__.py"], "/examples/tag_correlation.py": ["/angrymetalpy/__init__.py"], "/examples/score_fit.py": ["/angrymetalpy/__init__.py"], "/examples/reviewer_scores.py": ["/angrymetalpy/__init__.py"], "/examples/score_genre.py": ["/angrymetalpy/__init__.py"], "/examples/score_tag.py": ["/angrymetalpy/__init__.py"], "/tools/amg_scrape.py": ["/angrymetalpy/__init__.py"]} |
52,734 | danielbrzn/SephoraRecommender | refs/heads/master | /app/api/rest/consumer.py | from app.api.rest.test import Receive
from app.api.rest.product import get_info
import numpy
from PIL import Image
import io
from app.api.rest.autoencoder import model_predict
def Consume(uploadedImage):
image_uploaded = uploadedImage.read()
image_data = image_uploaded
rgb = Image.open(io.BytesIO(image_data)).convert("RGB")
#print ("unresized: ", (numpy.array(rgb)).shape)
resized = rgb.resize((500,500), Image.ANTIALIAS)
numpy_array = numpy.array(resized)
#print("resized: ", numpy_array.shape)
r = model_predict(numpy_array)
return get_info(r)
| {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
52,735 | danielbrzn/SephoraRecommender | refs/heads/master | /app/api/rest/test.py | def Receive(image):
return [{'productId': '37', 'variantId': '54', 'confidence': '0.5'},
{'productId': '11411', 'variantId': '7792', 'confidence': '0.8'},
{'productId': '14286', 'variantId': '31501', 'confidence': '0.1'},
{'productId': '15121', 'variantId': '34220', 'confidence': '0.4'},
{'productId': '5552', 'variantId': '9689', 'confidence': '0.7'},
{'productId': '9141', 'variantId': '18641', 'confidence': '0.95'},
{'productId': '11502', 'variantId': '24870', 'confidence': '0.34'},
{'productId': '9934', 'variantId': '20724', 'confidence': '0.56'},
{'productId': '10268', 'variantId': '21840', 'confidence': '0.6'},
{'productId': '6254', 'variantId': '12414', 'confidence': '0.6'},
{'productId': '6176', 'variantId': '12036', 'confidence': '0.7'},
{'productId': '6220', 'variantId': '12208', 'confidence': '0.3'}] | {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
52,736 | danielbrzn/SephoraRecommender | refs/heads/master | /app/api/rest/resources.py | from datetime import datetime
from flask import request
from flask_restplus import Api
from app.api.rest.base import BaseResource, SecureResource
from app.api import api_rest
from app.api.rest.consumer import Consume
@api_rest.route('/upload')
class uploadResource(BaseResource):
def post(self):
json_payload = request.json
uploadedImage = request.files['photos']
response = Consume(uploadedImage)
print(response)
return response, 200
| {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
52,737 | danielbrzn/SephoraRecommender | refs/heads/master | /app/api/rest/product.py | from flask import request
from flask_restplus import Api
import json
from operator import itemgetter
import requests
from app.api.rest.base import BaseResource, SecureResource
from app.api import api_rest
def get_info(listOfMatches):
# sort the list of dictionaries by confidence in descending order
# take only the first 10 products
sortedList = sorted(listOfMatches, key=itemgetter('confidence'), reverse=True)[:10]
listOfProductInfoDictionaries = [];
# for each of the 10 products, call the sephora endpoint to get product name, brand, price, variant image, variant name
for product in sortedList:
r = requests.get('https://sephora.sg/api/v2/products/' + str(product.get('productId')) + '?include=variants', headers={'Accept-Language':'en-SG', 'Content-Type':'application/json'}).json()
print(r.keys())
print(r)
if 'included' in r.keys():
listOfIncluded = r["included"]
#retrieve the variant dictionary
if len(listOfIncluded) > 0:
variant = next((v for v in listOfIncluded if v['id'] == str(product.get('variantId'))), None)
if variant:
variantName = variant['attributes']['name']
variantPrice = variant['attributes']['price']
variantImage = variant['attributes']['image-url']
productName = variant['attributes']['product-name']
brandName = variant['attributes']['brand-name']
productInfo = {"variantName": variantName, "variantPrice": variantPrice, "variantImage": variantImage, "productName": productName, "brandName": brandName}
listOfProductInfoDictionaries.append(productInfo)
return json.dumps(listOfProductInfoDictionaries)
| {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
52,738 | danielbrzn/SephoraRecommender | refs/heads/master | /app/api/rest/autoencoder.py | from keras.models import load_model, Model
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import pickle
PATH_TO_MODEL = 'autoencoder.h5'
PATH_TO_OUT_ENCODINGS = 'out_encodings.pckl'
PATH_TO_PRODUCTIDS = 'productIds.pckl'
PATH_TO_VARIANTIDS = 'variantIds.pckl'
# Load previsouly trained model
autoencoder = load_model(PATH_TO_MODEL)
# Get encoder layer from trained model
model = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('encoded').output)
model._make_predict_function()
with open(PATH_TO_OUT_ENCODINGS, 'rb') as f:
out_encodings = pickle.load(f)
with open(PATH_TO_PRODUCTIDS, 'rb') as f:
productIds = pickle.load(f)
with open(PATH_TO_VARIANTIDS, 'rb') as f:
variantIds = pickle.load(f)
def model_predict(image_as_np_array, model=model, out_encodings=out_encodings, productIds=productIds, variantIds=variantIds):
in_encoding = np.reshape(model.predict(np.array([image_as_np_array])), (1,-1))
scores = np.reshape(cosine_similarity(in_encoding, out_encodings), (-1))
ret = []
for i, score in enumerate(scores):
ret.append(
{
'productId' : productIds[i],
'variantId' : variantIds[i],
'confidence' : score,
}
)
return ret | {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
52,739 | danielbrzn/SephoraRecommender | refs/heads/master | /app/__main__.py | import os
import click
import subprocess
from subprocess import Popen
from .config import Config
CLIENT_DIR = Config.CLIENT_DIR
@click.group()
def cli():
""" Flask VueJs Template CLI """
pass
def _bash(cmd, **kwargs):
""" Helper Bash Call"""
click.echo('>>> {}'.format(cmd))
return subprocess.call(cmd, env=os.environ, shell=True, **kwargs)
@cli.command(help='Run Flask Dev Server')
def serve_api():
""" Run Flask Development servers"""
click.echo('Starting Flask dev server...')
cmd = 'python run.py'
_bash(cmd)
@cli.command(help='Run Vue Dev Server')
def serve_client():
""" Run Vue Development Server"""
click.echo('Starting Vue dev server...')
cmd = 'npm run serve'
_bash(cmd, cwd=CLIENT_DIR)
@cli.command(help='Build Vue Application', name='build')
def build():
""" Builds Vue Application """
cmd = 'npm run build'
_bash(cmd, cwd=CLIENT_DIR)
click.echo('Build completed')
if __name__ == '__main__':
cli()
| {"/app/api/rest/consumer.py": ["/app/api/rest/test.py", "/app/api/rest/product.py", "/app/api/rest/autoencoder.py"], "/app/api/rest/resources.py": ["/app/api/__init__.py", "/app/api/rest/consumer.py"], "/app/api/rest/product.py": ["/app/api/__init__.py"], "/app/__init__.py": ["/app/api/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.