index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
17,104
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/As301_cnn.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : As301_classifier.py -->
#<!-- Description: Script to train a car detector based on binary classifier-->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 24/04/2018 -->
#<!-- Change : 24/04/2018 - Creation of this script -->
#<!-- Review : 24/04/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
########################################################################
import cv2
import numpy as np
import pandas as pd
import random
import sklearn
from sklearn import svm
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import sys
import imutils
import matplotlib.pyplot as plt
from glob import glob
from random import shuffle
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import load_model
from skimage import transform as tf
from keras.utils import plot_model
########################################################################
def loadDataset(dataset):
"""
This function load all images from a dataset and return a list of Numpy images.
"""
# List of images.
images = []
# Read all filenames from the dataset.
for filename in dataset:
# Read the input image.
image = cv2.imread(filename)
# Add the current image on the list.
if image is not None:
images.append(image)
else:
print("Could not read file: {}".format(filename))
sys.exit()
# Return the images list.
return images
def sampleNegativeImages(images, negativeSample, size=(64, 64), N=200):
"""
The dataset has several images of high resolution without cars,
i.e. called here as negative images. This function select "N" 64x64 negative
sub-images randomly from each original negative image.
"""
# Initialize internal state of the random number generator.
random.seed(1)
# Final image resolution.
w, h = size[0], size[1]
resizedImages = []
for image in images:
res = cv2.resize(image, dsize=(1728, 1152), interpolation=cv2.INTER_CUBIC)
resizedImages.append(res)
for image in resizedImages:
images.append(image)
# Read all images from the negative list.
i=0
for image in images:
if i > 4:
N = 100
for j in range(N):
# random.random produced random number in [0,1) range
y = int(random.random() * (len(image) - h))
x = int(random.random() * (len(image[0]) - w))
sample = image[y:y + h, x:x + w].copy()
negativeSample.append(sample)
# Create Afine transform
afine_tf = tf.AffineTransform(shear = random.uniform(-0.2,0.2))
# Apply transform to image data
shearedImage = tf.warp(sample, inverse_map=afine_tf)
negativeSample.append(shearedImage)
i = i + 1
print("Non-car dataset:")
print(len(negativeSample))
return
def samplePositiveImages(images, positiveSample, size=(64, 64), N=200):
"""
The dataset has not enough positive images, so we'll increase it by generating new positive
images by, first, using linear transormation (rotation and reflection) on the
available positive subset
"""
for image in images:
rotated = imutils.rotate_bound(image, random.randint(-15,15))
h, w, channels = rotated.shape
cropped_img = rotated[w//2 - 64//2:w//2 + 64//2, h//2 - 64//2:h//2 + 64//2]
positiveSample.append(image);
positiveSample.append(cropped_img)
positiveSample.append(np.fliplr(image))
positiveSample.append(np.fliplr(cropped_img))
supportList = []
for img in positiveSample:
supportList.append(img)
for img in supportList:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
hsv = hsv + 10
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
positiveSample.append(img)
hsv = hsv - 20
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
positiveSample.append(img)
print("Car dataset:")
print(len(positiveSample))
return
def getY(positiveImages, negativeImages):
sizePositive = len(positiveImages)
sizeNegative = len(negativeImages)
labels = []
for x in range (0, sizePositive):
labels.append(1)
for x in range (0, sizeNegative):
labels.append(-1)
return labels;
#<!--------------------------------------------------------------------------->
#<!--------------------------------------------------------------------------->
#<!--------------------------------------------------------------------------->
#<!--------------------------------------------------------------------------->
def main():
# Folder where the dataset images are saved.
folder = "./inputs"
# Dataset filenames.
positiveFile = glob("%s/cars/*.png" % folder)
negativeFile = glob("%s/non-cars/*.png" % folder)
# Vectors used to train the dataset.
positiveList = []
negativeList = []
negativeSample = []
positiveSample = []
labels = []
X = []
# As 3.02. (a) : Load our car images dataset.
positiveList = loadDataset(positiveFile)
negativeList = loadDataset(negativeFile)
# As 3.02. (b) : Get a sample of negative images. (returns list in negativeSample)
sampleNegativeImages(negativeList, negativeSample, size=(64,64), N=200)
samplePositiveImages(positiveList, positiveSample, size=(64,64), N=200)
#-----------------------------------------------------------#
# #
# Classification Model using Convolutionary neural network #
# #
#-----------------------------------------------------------#
y = getY(positiveSample, negativeSample)
for image in positiveSample:
X.append(image)
for image in negativeSample:
X.append(image)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.85, random_state=1, shuffle=True)
X_train = np.array(X_train)
y_train = np.array(y_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
print (X_train.shape)
print("Count of non-car/car in training data")
print(y_train.shape[0])
print("Count of non-car/car in test data")
print(y_test.shape[0])
# the final preprocessing step for the input data is to convert our data
# type to float 32 and normalize our data values to the range[0, 1]
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
y_trainBeforeProcessing = y_train
y_testBeforeProcessing = y_test
print("Before Categorization")
print(y_train[np.where(y_train == -1.0)].shape)
print(y_train[np.where(y_train == 1.0)].shape)
print(y_train[:5])
# preprocessing class labels for Keras
y_train_1hot = []
for y in y_train:
if y == -1:
y_train_1hot.append([1,0])
elif y == 1:
y_train_1hot.append([0,1])
y_test_1hot = []
for y in y_test:
if y == -1:
y_test_1hot.append([1,0])
elif y == 1:
y_test_1hot.append([0,1])
y_train = np.array(y_train_1hot)
y_test = np.array(y_test_1hot)
#print("After Categorization")
#print(sum([row[0] for row in y_train]))
#print(sum([row[1] for row in y_train]))
# sys.exit()
"""
model = Sequential()
# 32 corresponds to the number of convolution filters to use
# 3 corresponds to the numbers of rows in each convolution kernel
# 3 corresponds to the number of columns in each convolution kernel
model.add(Convolution2D(32, (3, 3), activation='relu', input_shape=(64,64,3)))
model.add(Convolution2D(32, (3, 3), activation='relu'))
model.add(Dropout(0.25)) # this layer is important because it prevents overfitting
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25)) # this layer is important because it prevents overfitting
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(2, activation='sigmoid')) #output layer with size of 2 (2 classes)
# compile model
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# fit model on training data
history = model.fit(X_train, y_train, validation_split=0.1765,
batch_size=32, nb_epoch=5, verbose=1)
# evaluate model on the train data
print("Train Model Eval: {}".format(model.evaluate(X_train, y_train, verbose=1)))
# evaluate model on test data
print("Test Model Eval: {}".format(model.evaluate(X_test, y_test, verbose=1)))
model.save('./outputs/datamodel5epochsNewDataset.h5')
"""
model = load_model('./outputs/datamodel5epochsNewDataset.h5')
print(model.summary())
y_test_pred = model.predict(X_test)
y_test_pred_format = []
for row in y_test_pred:
if row[0] > row[1]:
y_test_pred_format.append(-1)
else:
y_test_pred_format.append(1)
y_train_pred = model.predict(X_train)
y_train_pred_format = []
for row in y_train_pred:
if row[0] > row[1]:
y_train_pred_format.append(-1)
else:
y_train_pred_format.append(1)
print("Confusion on the test set.")
print("1st Col/Row: Non-Cars | 2nd Col/Row: Cars")
cm_test = confusion_matrix(y_testBeforeProcessing, y_test_pred_format)
print(cm_test)
print("Confusion on the training set.")
print("1st Col/Row: Non-Cars | 2nd Col/Row: Cars")
cm_train = confusion_matrix(y_trainBeforeProcessing, y_train_pred_format)
print(cm_train)
accuracyTest = sklearn.metrics.accuracy_score(y_testBeforeProcessing, y_test_pred_format)
print("Accuracy on the test set: {}".format(accuracyTest))
accuracyTrain = sklearn.metrics.accuracy_score(y_trainBeforeProcessing, y_train_pred_format)
print("Accuracy on the training set: {}".format(accuracyTrain))
"""
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.ylim((0,1))
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
"""
#<!--------------------------------------------------------------------------->
#<!-- -->
#<!--------------------------------------------------------------------------->
# put executing code in main, so that the defined functions
# can be imported in a separate script without executing
# the code
if __name__ == "__main__":
main()
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,105
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/As301_pca_visualization.py
|
#<!--------------------------------------------------------------------------->
#<!-- File : As301_pca_visualization -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
########################################################################
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.decomposition import PCA
import sys
import As301_classifier
########################################################################
dataset = np.load("./outputs/dataset.npy")
df = pd.read_csv("./outputs/hog_dataset.csv", index_col=0)
indices = df.index.values
features = dataset[:,:-1]
labels = dataset[:,-1]
########################################################################
# We are going to apply PCA to reduce the dimensions of our HOG feature
# to see if there is a obvious separation between the two classes
# car and non-car
########################################################################
pca = PCA(n_components=2)
fig_imgs = plt.figure("PCA Analysis", figsize=(12,4))
colors = ['navy', 'darkorange']
lw = 2
positive_set = np.array([row for row in dataset if (row[-1] == 1 )])
pos_size = positive_set.shape[0]
negative_set = np.array([row for row in dataset if (row[-1] == -1) ])
neg_size = negative_set.shape[0]
features_pca = pca.fit(np.vstack((positive_set,negative_set))).transform(np.vstack((positive_set,negative_set)))
print(features_pca.shape)
print(labels.shape)
data_pca = np.vstack((features_pca.T, indices, labels)).T
print(data_pca[:5])
# Percentage of variance explained for each components
print('explained variance ratio (first two components): {0}'
.format(str(pca.explained_variance_ratio_)))
var_expl_1st, var_expl_2nd = pca.explained_variance_ratio_
sub = fig_imgs.add_subplot(1,3,1)
plt.scatter(features_pca[:pos_size,0], features_pca[:pos_size,1],
color=colors[0], alpha=.8, lw=lw, label='car')
plt.scatter(features_pca[pos_size:,0], features_pca[pos_size:,1],
color=colors[1], alpha=.8, lw=lw, label='non-car')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of Car/Non-Car dataset')
# 1st principal component explains 26% of the total variance
plt.xlabel('1st principal component')
plt.xlim((-4,4))
# 2nd principal component explains 8% of the total variance
plt.ylabel('2nd principal component')
plt.ylim((-4,4))
plt.plot()
# Analyze the corner cases ('non-car' label but value of 1st component less than 0.5)
special_cases = np.array([row for row in data_pca if (row[-1] == -1 and row[0] < 0.5)])
img2_neg_set = np.array([row for row in data_pca if (row[-1] == -1 and row[0] >= 0.5)])
img2_pos_set = np.array([row for row in data_pca if (row[-1] == 1)])
sub = fig_imgs.add_subplot(1,3,2)
plt.scatter(img2_pos_set[:,0], img2_pos_set[:,1],
color=colors[0], alpha=.8, lw=lw, label='car')
plt.scatter(img2_neg_set[:,0], img2_neg_set[:,1],
color=colors[1], alpha=.8, lw=lw, label='non-car')
plt.scatter(special_cases[:,0], special_cases[:,1],
color="red", alpha=.8, lw=lw, label='special cases of non-car')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('Highlighting instances of Non-Cars\nthat are very similar to Cars', fontsize=8)
plt.xlabel(r"$1^{st}$ principal component")
plt.xlim((-4,4))
plt.ylabel(r"$2^{nd}$ principal component")
plt.ylim((-4,4))
plt.plot()
# analyze the special non-car indices
special_indices = special_cases[:,-2].astype(np.int)
special_filenames = df.iloc[list(special_indices)]["files"].values
res = cv2.imread(special_filenames[0])
for fname in list(special_filenames)[1:]:
img = cv2.imread(fname)
res = np.hstack((res,img))
# create a 5x5 collage
collage = np.zeros((5*64,5*64,3), dtype=int)
collage[0:64,0:5*64,:] = res[:,0:5*64,:].copy()
collage[64:128,0:5*64,:] = res[:,5*64:10*64,:].copy()
collage[128:192,0:5*64,:] = res[:,10*64:15*64,:].copy()
collage[192:256,0:5*64,:] = res[:,15*64:20*64,:].copy()
collage[256:320,0:5*64,:] = res[:,20*64:25*64,:].copy()
collage = collage.astype(np.uint8)
sub = fig_imgs.add_subplot(1,3,3)
sub.set_title("Special Case Images")
sub.imshow(collage)
sub.axis("off")
plt.plot()
plt.show()
########################################################################
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,106
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/As301_pca_viz_compare.py
|
#<!--------------------------------------------------------------------------->
#<!-- File : As301_pca_viz_compare -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
########################################################################
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import sys
import As301_classifier
########################################################################
dataset = np.load("./outputs/dataset.npy")
df = pd.read_csv("./outputs/hog_dataset.csv", index_col=0)
indices = df.index.values
features = dataset[:,:-1]
labels = dataset[:,-1]
positive_set = np.array([row for row in dataset if (row[-1] == 1 )])
pos_size = positive_set.shape[0]
negative_set = np.array([row for row in dataset if (row[-1] == -1) ])
neg_size = negative_set.shape[0]
########################################################################
# Compare 2D PCA of the HOG features and the PCA applied on the
# raw image data (grayscale).
########################################################################
pca = PCA(n_components=2)
fig_imgs = plt.figure("PCA HOG Comparison", figsize=(12,4))
colors = ['navy', 'darkorange']
lw = 2
sub = fig_imgs.add_subplot(1,2,1)
img_data = []
for fpath in list(df["files"]):
img = cv2.imread(fpath)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_data.append(gray.flatten())
img_data = np.array(img_data)
scaler = StandardScaler()
img_data = scaler.fit_transform(img_data)
img_pca = pca.fit(img_data).transform(img_data)
print('explained variance ratio (first two components): {0}'
.format(str(pca.explained_variance_ratio_)))
plt.scatter(img_pca[:pos_size,0], img_pca[:pos_size,1],
color=colors[0], alpha=.8, lw=lw, label='car')
plt.scatter(img_pca[pos_size:,0], img_pca[pos_size:,1],
color=colors[1], alpha=.8, lw=lw, label='non-car')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of Car/Non-Car dataset (raw image data)')
# 1st principal component explains 40% of the total variance
plt.xlabel('1st principal component')
plt.xlim((-140,170))
# 2nd principal component explains 10% of the total variance
plt.ylabel('2nd principal component')
plt.ylim((-65,65))
plt.plot()
full_set = np.vstack((positive_set,negative_set))
features_pca = pca.fit(full_set).transform(full_set)
data_pca = np.vstack((features_pca.T, indices, labels)).T
# Percentage of variance explained for each components
print('explained variance ratio (first two components): {0}'
.format(str(pca.explained_variance_ratio_)))
var_expl_1st, var_expl_2nd = pca.explained_variance_ratio_
sub = fig_imgs.add_subplot(1,2,2)
plt.scatter(features_pca[:pos_size,0], features_pca[:pos_size,1],
color=colors[0], alpha=.8, lw=lw, label='car')
plt.scatter(features_pca[pos_size:,0], features_pca[pos_size:,1],
color=colors[1], alpha=.8, lw=lw, label='non-car')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of Car/Non-Car dataset (HOG)')
# 1st principal component explains 26% of the total variance
plt.xlabel('1st principal component')
plt.xlim((-4,4))
# 2nd principal component explains 8% of the total variance
plt.ylabel('2nd principal component')
plt.ylim((-4,4))
plt.plot()
plt.show()
########################################################################
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,107
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/As301_classifier.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : As301_classifier.py -->
#<!-- Description: Script to train a car detector based on binary classifier-->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 24/04/2018 -->
#<!-- Change : 24/04/2018 - Creation of this script -->
#<!-- Review : 24/04/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
########################################################################
import cv2
import numpy as np
import os
import pandas as pd
import random
import sklearn
from sklearn import svm
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split
import sys
import imutils
from glob import glob
########################################################################
def loadDataset(dataset):
"""
This function load all images from a dataset and return a list of Numpy images.
"""
# List of images.
images = []
# Read all filenames from the dataset.
for filename in dataset:
# Read the input image.
image = cv2.imread(filename)
# Add the current image on the list.
if image is not None:
images.append(image)
else:
print("Could not read file: {}".format(filename))
sys.exit()
# Return the images list.
return images
def sampleNegativeImages(images, negativeSample, size=(64, 64), N=200):
"""
The dataset has several images of high resolution without cars,
i.e. called here as negative images. This function select "N" 64x64 negative
sub-images randomly from each original negative image.
"""
# Initialize internal state of the random number generator.
random.seed(1)
# Final image resolution.
w, h = size[0], size[1]
resizedImages = []
for image in images:
res = cv2.resize(image, dsize=(1728, 1152), interpolation=cv2.INTER_CUBIC)
resizedImages.append(res)
for image in resizedImages:
images.append(image)
# Read all images from the negative list.
i = 0
for image in images:
if i > 4:
N = 100
for j in range(N):
# random.random produced random number in [0,1) range
y = int(random.random() * (len(image) - h))
x = int(random.random() * (len(image[0]) - w))
sample = image[y:y + h, x:x + w].copy()
negativeSample.append(sample)
# Create Afine transform
afine_tf = tf.AffineTransform(shear = random.uniform(-0.2,0.2))
# Apply transform to image data
shearedImage = tf.warp(sample, inverse_map=afine_tf)
negativeSample.append(shearedImage)
i = i + 1
return
def samplePositiveImages(images, positiveSample, size=(64, 64), N=200):
"""
The dataset has not enough positive images, so we'll increase it by generating new positive
images by, first, using linear transormation (rotation and reflection) on the
available positive subset
"""
for image in images:
rotated = imutils.rotate_bound(image, random.randint(-15,15))
h, w, channels = rotated.shape
cropped_img = rotated[w//2 - 64//2:w//2 + 64//2, h//2 - 64//2:h//2 + 64//2]
positiveSample.append(image);
positiveSample.append(cropped_img)
positiveSample.append(np.fliplr(image))
positiveSample.append(np.fliplr(cropped_img))
supportList = []
for img in positiveSample:
supportList.append(img)
for img in supportList:
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #convert it to hsv
hsv = hsv + 10
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
positiveSample.append(img)
hsv = hsv - 20
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
positiveSample.append(img)
return
def showImages(images):
"""
Helper function to view images generated in the script without having to store
them on the disk. Use 'a' and 'd' key to go to the next image.
"""
idx = 0
while True:
cv2.imshow("Image", images[idx])
if cv2.waitKey(15) & 0xFF == ord("d"):
if idx+1 >= len(images):
print("This is the last image in the set.")
else:
idx += 1
print("Viewing image no. {0} / {1}".format(idx+1, len(images)))
if cv2.waitKey(15) & 0xFF == ord("a"):
if idx-1 < 0:
print("This is the first image in the set.")
else:
idx -= 1
print("Viewing image no. {0} / {1}".format(idx+1, len(images)))
if cv2.waitKey(15) & 0xFF == ord("q"):
break
def computeHOG(images, hogList, size=(64, 64)):
"""
This function computes the Histogram of Oriented Gradients (HOG) of each
image from the dataset.
[Code from Exercise 10 solution. Could be used for a SVM with HOG Features]
"""
# Creates a HOG descriptor with custom parameters
# (only changed the window size from the default settings to function
# correctly for our 64x64 input images)
# see (https://stackoverflow.com/questions/28390614/opencv-hogdescripter-python?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa)
hog = cv2.HOGDescriptor("./inputs/hog.xml")
# Read all images from the image list.
for image in images:
# Image resolution
h, w = image.shape[:2]
# Calculate HOG
if w >= size[0] and h >= size[1]:
# Region of Interest
y = (h - size[1]) // 2
x = (w - size[0]) // 2
roi = image[y:y + size[1], x:x + size[0]].copy()
# Compute HOG
grayscale = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
hogList.append(hog.compute(grayscale))
return
def main():
# Folder where the dataset images are saved.
folder = "./inputs"
# Dataset filenames.
positiveFile = glob("%s/cars/*.png" % folder)
negativeFile = glob("%s/non-cars/*.png" % folder)
# Vectors used to train the dataset.
positiveList = []
negativeList = []
negativeSample = []
positiveSample = []
labels = []
# As 3.02. (a) : Load our car images dataset.
positiveList = loadDataset(positiveFile)
negativeList = loadDataset(negativeFile)
print("Initial size of car set: {0} \t\t (dim: {1})".format(len(positiveList), positiveList[0].shape))
print("Initial size of non-car set: {0} \t\t (dim: {1})".format(len(negativeList), negativeList[0].shape))
# As 3.02. (b) : Get a sample of negative images. (returns list in negativeSample)
sampleNegativeImages(negativeList, negativeSample, size=(64,64), N=200)
# As 3.02. (c) : [EXTRA] increase the car dataset by generating new positive images
samplePositiveImages(positiveList, positiveSample, size=(64,64), N=200)
print("Size of non-car sample set: {0} \t (dim: {1})".format(len(negativeSample), negativeSample[0].shape))
print("Size of car sample set: {0} \t\t (dim: {1})".format(len(positiveSample), positiveSample[0].shape))
#--------------------------------------------------#
# #
# Classification Model using SVM with HOG Features #
# #
#--------------------------------------------------#
# Computing the HOG features for each image
hogList = []
computeHOG(positiveSample, hogList, size=(64,64))
computeHOG(negativeSample, hogList, size=(64,64))
hogList = [vec.flatten() for vec in hogList]
print("Vector Length of one HOG: {}".format(len(hogList[0])))
# create the labels (1: car, -1: non-car)
[labels.append(+1) for _ in range(len(positiveSample))]
[labels.append(-1) for _ in range(len(negativeSample))]
# Split into a train/test/validation set (70/15/15)
np_labels = np.array(labels).reshape(len(labels),1)
np_hogs = np.array(hogList)
dataset = np.hstack((np_hogs,np_labels))
np.save('./outputs/dataset.npy', dataset)
# store the 2500 images in a separate output folder
if not os.path.isdir("./outputs/extra_images/"):
os.makedirs("./outputs/extra_images/")
file_names = []
idx = 0
for image in (positiveSample + negativeSample):
fname = "./outputs/extra_images/Cars_" + str(idx) + "_Extra.png"
cv2.imwrite(fname, image)
file_names.append(fname)
idx += 1
print("Done storing the " + str(len(positiveSample+negativeSample)) + " images.")
# also store as CSV
df = pd.DataFrame(data={
'files' : file_names,
'HOG' : [row for row in dataset[:,:-1]],
'label' : dataset[:,-1]})
df.to_csv("./outputs/hog_dataset.csv")
X_train, X_test, y_train, y_test = train_test_split(dataset[:,:-1], dataset[:,-1], test_size=0.15, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1765, random_state=1)
print("sizes of train/validation/test sets: {0}/{1}/{2}".format(X_train.shape[0],X_val.shape[0],X_test.shape[0]))
kernel = "rbf" # "linear"
svc = svm.SVC(kernel=kernel, probability=True, class_weight='balanced')
svc.fit(X_train,y_train)
# store prediction results on the validation set
train_pred = svc.predict(X_train)
val_pred = svc.predict(X_val)
test_pred = svc.predict(X_test)
train_acc = sklearn.metrics.accuracy_score(y_train,train_pred)
val_acc = sklearn.metrics.accuracy_score(y_val, val_pred)
test_acc = sklearn.metrics.accuracy_score(y_test, test_pred)
print("Accuracy on the training set: \t\t {number:.{digit}f}".format(number=train_acc, digit=3))
print("Accuracy on the validation set: \t {number:.{digit}f}".format(number=val_acc, digit=3))
# confusion matrix on the validation set
print("Confusion on the validation set.")
print("1st Col/Row: Non-Cars | 2nd Col/Row: Cars")
print(sklearn.metrics.confusion_matrix(y_val, val_pred))
print("\n\nAccuracy on the test set: \t {number:.{digit}f}".format(number=test_acc, digit=3))
print("Confusion on the test set.")
print("1st Col/Row: Non-Cars | 2nd Col/Row: Cars")
print(sklearn.metrics.confusion_matrix(y_test, test_pred))
joblib.dump(svc, './inputs/svm_model_weights_' + kernel + '.pkl')
#<!--------------------------------------------------------------------------->
#<!-- -->
#<!--------------------------------------------------------------------------->
# put executing code in main, so that the defined functions
# can be imported in a separate script without executing
# the code
if __name__ == "__main__":
main()
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,108
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/As301_detector.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : As301_detector.py.py -->
#<!-- Description: Script to detect cars using a binary classifier -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 24/04/2018 -->
#<!-- Change : 24/04/2018 - Creation of this script -->
#<!-- Review : 24/04/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
########################################################################
import cv2
from enum import Enum
import keras
from keras.models import load_model
import numpy as np
from pathlib import Path
from skimage.transform import pyramid_gaussian
from sklearn.externals import joblib
import sys
import time
import As301_classifier
########################################################################
INPUT_FILEPATH = Path("./inputs/videos/Cars_05.mov")
FILENAME = INPUT_FILEPATH.stem
# Setup Video
RECORD_VIDEO = True
if RECORD_VIDEO:
print("Recording a video of " + FILENAME + ".mov")
capture = cv2.VideoCapture(str(INPUT_FILEPATH))
# Get the video frame rate.
fps = int(round(capture.get(cv2.CAP_PROP_FPS)))
w = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH) * 0.5)
h = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT) * 0.5)
# Check if the fps variable has a correct value.
fps = fps if fps > 0 else 30
frame_count = 0
isColor = True
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
if RECORD_VIDEO:
record = cv2.VideoWriter("outputs/" + FILENAME + "_record.mov",
fourcc, fps, (w, h), isColor)
# Setup Classifiers
class Classifier(Enum):
SVM = 0
CNN = 1
# pre-load all models
CLF_SVM = joblib.load("./inputs/svm_model_weights_rbf.pkl")
CLF_CNN = load_model("./outputs/datamodel30epochs.h5")
########################################################################
# We are going to apply both Image Pyramids and Sliding Windows
# for our car detector and then use our classification model on the
# image patches
########################################################################
# script for sliding window from "Sliding Windows for Object Detection with Python and OpenCV"
# (see https://www.pyimagesearch.com/2015/03/23/sliding-windows-for-object-detection-with-python-and-opencv/)
def sliding_window(image, stepSize=(8,8), windowSize=(64,64)):
# slide a window across the image
for y in range(0, image.shape[0], stepSize[0]):
for x in range(0, image.shape[1], stepSize[1]):
# yield the current window
yield (x, y, image[y:y + windowSize[1], x:x + windowSize[0]])
# function returns a list of rectangles that were detected as cars
def detectCars(frame, model = Classifier.SVM):
org_height, org_width = frame.shape[:2]
# store the frame in different dimensions
# and begin with the lowest resolution
scaled_frames = list(pyramid_gaussian(frame, downscale=1.5, max_layer=2))
scaled_frames = list(reversed(scaled_frames))
detected_cars = []
rectangles = []
# loop over every image scale
for image_scaled in scaled_frames:
# loop over window in the image
scaled_height, scaled_width = image_scaled.shape[:2]
SCALING_FACTOR = (org_height / scaled_height + org_width / scaled_width) / 2.0
if scaled_height < 64 or scaled_width < 64:
continue
windows = list(sliding_window(image_scaled))
windows = [w for w in windows if (w[0] <= scaled_width - 64 and w[1] <= scaled_height - 64)]
x = [w[0] for w in windows]
y = [w[1] for w in windows]
image_window = np.array([w[2] for w in windows])
# convert from float [0,1] range to integer [0,255] range
image_window = image_window * 255
image_window = image_window.astype(np.uint8)
predictions = []
if model == Classifier.SVM:
hogList = []
# Compute the HOG
As301_classifier.computeHOG(image_window,hogList, size=(64,64))
try:
hog_features = np.array(hogList)
num_patches, num_hog_features = hog_features.shape[:2]
hog_features = hog_features.reshape((num_patches,num_hog_features))
predictions = CLF_SVM.predict(hog_features)
except IndexError:
print("Caught an IndexError")
print((x,y))
print(hogList)
sys.exit()
elif model == Classifier.CNN:
# TODO: don't forget to scale the input into [0,1] range from [0,255]
predictions = CLF_CNN.predict_classes(np.array(image_window))
else:
raise Exception("Did not specify a valid model.")
# create a list of detected cars in the image
for idx, pred in enumerate(predictions):
if pred == 1:
detected_cars.append((x[idx],y[idx],SCALING_FACTOR))
res_image = frame.copy()
for (x,y,scale) in detected_cars:
rectangles.append((
int(x * scale),
int(y * scale),
int(64 * scale),
int(64 * scale)
))
return rectangles
# uses background substraction to find ROIs for our classifier
def backgroundDetection(frame):
rectangles = []
processed = fgbg.apply(frame)
_, contours, hierarchy = cv2.findContours(processed, cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if (area < 500):
continue
x,y,w,h = cv2.boundingRect(cnt)
rectangles.append((x,y,w,h))
return rectangles
# returns True if (x1,y1,a1,b1) encloses (x2,y2,a2,b2) with a certain
# (x,y) = coords of the top left corner point
# (a,b) = coords of the bottom right corner point
# margin of pixel allowance
def rectangleOverlap(rect1 = (0,0,0,0), rect2 = (0,0,0,0), margin=0):
x1, y1, a1, b1 = rect1
x2, y2, a2, b2 = rect2
overlapCheck = (
x2 > x1 - margin and
a2 < a1 + margin and
y2 > y1 - margin and
b2 < b1 + margin
)
return overlapCheck
# https://www.pyimagesearch.com/2014/11/17/non-maximum-suppression-object-detection-python/
# (code from https://stackoverflow.com/questions/37847923/combine-overlapping-rectangles-python)
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
# Create an OpenCV window.
if not RECORD_VIDEO:
cv2.namedWindow("Video", cv2.WINDOW_AUTOSIZE)
fgbg = cv2.createBackgroundSubtractorMOG2()
# measure the frame by frame calculation performance
frame_times = []
while True:
# Capture frame-by-frame.
retval, frame = capture.read()
time_diff = time.time()
# Check if there is a valid frame.
if not retval:
break
# Resize the frame.
scaleX, scaleY = (0.5,0.5)
frame = cv2.resize(frame, (0, 0), fx=scaleX, fy=scaleY)
bgRectangles = backgroundDetection(frame)
# remove too small rectangles
bgRectangles = [(x,y,w,h) for (x,y,w,h) in bgRectangles if (w*h > 700 and w*h < 65000)]
# remove rectangles that aren't square
bgRectangles = [(x,y,w,h) for (x,y,w,h) in bgRectangles if ((w/h > 0.2) if h > w else (h/w > 0.2))]
bg_rect_overlap_free = []
# remove overlapping rectangles
for (x1,y1,w1,h1) in bgRectangles:
isOverlapping = False
for (x2,y2,w2,h2) in bgRectangles:
if (
(x1,y1,w1,h1) != (x2,y2,w2,h2) and
rectangleOverlap((x2,y2,x2+w2,y2+h2), (x1,y1,x1+w1,y1+h1), margin=20)
):
isOverlapping = True
break
if not isOverlapping:
bg_rect_overlap_free.append((x1,y1,w1,h1))
detectedRect = []
# (x,y,w,h)
for (x,y,w,h) in bg_rect_overlap_free:
PIXEL_BOUND = 20
bound_x = PIXEL_BOUND if x >= PIXEL_BOUND else x
bound_y = PIXEL_BOUND if y >= PIXEL_BOUND else y
# cv2.rectangle(frame, (x-bound_x, y-bound_y), (x+w+bound_x, y+h+bound_y), (0,0,255), 2 )
detections = detectCars(frame[y-bound_y:y+h+bound_y,x-bound_x:x+w+bound_x,:], model=Classifier.SVM)
detections = [(x1+(x-bound_x),y1+(y-bound_y),w1,h1) for (x1,y1,w1,h1) in detections]
detectedRect += detections
# print("Detections before overlap {0}".format(len(detectedRect)))
# convert from (x,y,w,h) to (x1,y1,x2,y2) for non-maximum suppression
detectedRect = np.array([(x,y,x+w,y+h) for (x,y,w,h) in detectedRect])
detectedRect = non_max_suppression_fast(detectedRect, 0.1)
# print("Detections after overlap {0}".format(len(detectedRect)))
for (x1,y1,x2,y2) in detectedRect:
cv2.rectangle(frame,(x1,y1),(x2,y2), (0,255,0), 2)
# detect_count = 0
# for (x1,y1,w1,h1) in detectedRect:
# isOverlapping = False
# for (x2,y2,w2,h2) in detectedRect:
# if (
# (x1,y1,w1,h1) != (x2,y2,w2,h2) and
# rectangleOverlap((x2,y2,x2+w2,y2+h2), (x1,y1,x1+w1,y1+h1), margin=10)
# ):
# isOverlapping = True
# break
# if not isOverlapping:
# detect_count += 1
# cv2.rectangle(frame,(x1,y1),(x1+w1,y1+h1),(0,255,0),2)
# print("Detections after overlap {0}".format(detect_count))
time_diff = time.time() - time_diff
frame_times.append(time_diff)
# Display the resulting frame.
if RECORD_VIDEO == True:
record.write(frame)
if frame_count % 30 == 0:
print("Processed {0} frames \t({1} seconds of video)".format(frame_count, frame_count//30))
print("Average processing time for one frame {0}".format(str(np.mean(np.array(frame_times)))))
else:
cv2.imshow("Video", frame)
if cv2.waitKey(fps) & 0xFF == ord("q"):
break
frame_count += 1
#<!--------------------------------------------------------------------------->
#<!-- -->
#<!--------------------------------------------------------------------------->
if RECORD_VIDEO:
record.release()
capture.release()
cv2.destroyAllWindows()
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,109
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/Ex10TemplateCode/Ex1001_svm_training.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : Ex1001_svm_training.py -->
#<!-- Description: Script to train a pedestrian detector based on SVM -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 29/03/2018 -->
#<!-- Change : 29/03/2018 - Creation of this script -->
#<!-- Review : 29/03/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018032901 $"
########################################################################
import cv2
import numpy as np
import random
import sys
import os
cwd = os.getcwd() + "\\"
########################################################################
def loadDataset(dataset):
"""
This function load all images from a dataset and return a list of Numpy images.
"""
# List of images.
images = []
# Read the dataset file.
file = open(dataset)
filename = file.readline()
# Read all filenames from the dataset.
while (filename != ""):
# Get the current filename.
filename = (dataset.rsplit("/", 1)[0] + "/" +
filename.split("/", 1)[1].strip("\n"))
# Read the input image.
# ERROR: My image files in the dataset seem to be corrupted
# Will add the code to the Ass3 Repo as template for our
# car detector. There the images do not seem to be corrupted.
image = cv2.imread(full_path)
# C:\Users\Peter Mortimer\Desktop\IAML\Week10\Exercises_10_material\inputs\Train\pos
if image is None:
print("Could not read the file: {}".format(full_path))
# Read the next image filename.
filename = file.readline()
# Add the current image on the list.
if image is not None:
images.append(image)
# Return the images list.
return images
def getSVMDetector(svm):
"""
This function calculates and returns the feature descriptor.
"""
# Retrieves all the support vectors.
sv = svm.getSupportVectors()
# Retrieves the decision function.
rho, _, _ = svm.getDecisionFunction(0)
# Transpose the support vectors matrix.
sv = np.transpose(sv)
# Returns the feature descriptor.
return np.append(sv, [[-rho]], 0)
def sampleNegativeImages(images, negativeSample, size=(64, 128), N=10):
"""
INRIA Dataset has several images of different resolution without pedestrians,
i.e. called here as negative images. This function select "N" 64x128 negative
sub-images randomly from each original negative image.
"""
# Initialize internal state of the random number generator.
random.seed(1)
# Final image resolution.
w, h = size[0], size[1]
# Read all images from the negative list.
for image in images:
for j in range(N):
# random.random produced random number in [0,1) range
y = int(random.random() * (len(image) - h))
x = int(random.random() * (len(image[0]) - w))
sample = image[y:y + h, x:x + w].copy()
negativeSample.append(sample)
return negativeSample
def computeHOG(images, hogList, size=(64, 128)):
"""
This function computes the Histogram of Oriented Gradients (HOG) of each
image from the dataset.
"""
# Creates the HOG descriptor and detector with default parameters.
hog = cv2.HOGDescriptor()
# Read all images from the image list.
for image in images:
# Image resolution
h, w = image.shape[:2]
# Calculate HOG
if w >= size[0] and h >= size[1]:
# Region of Interest
y = (h - size[1]) // 2
x = (w - size[0]) // 2
roi = image[y:y + size[1], x:x + size[0]].copy()
# Compute HOG
grayscale = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
hogList.append(hog.compute(grayscale))
return hogList
# Dataset filenames.
positiveFile = "./inputs/train/pos.lst"
negativeFile = "./inputs/train/neg.lst"
# Vectors used to train the dataset.
hogList = []
positiveList = []
negativeList = []
negativeSample = []
hardNegativeList = []
labels = []
# Load the INRIA dataset.
positiveList = loadDataset(positiveFile)
negativeList = loadDataset(negativeFile)
print(str(len(positiveList)))
print(str(len(negativeList)))
# Get a sample of negative images.
sampleNegativeImages(negativeList, negativeSample)
# Compute the Histogram of Oriented Gradients (HOG).
computeHOG(positiveList, hogList)
computeHOG(negativeSample, hogList)
if len(hogList) == 0:
exit(0)
# Create the class labels, i.e. (+1) positive and (-1) negative.
[labels.append(+1) for _ in range(len(positiveList))]
[labels.append(-1) for _ in range(len(negativeSample))]
# Create an empty SVM model.
svm = cv2.ml.SVM_create()
# Define the SVM parameters.
# By default, Dalal and Triggs (2005) use a soft (C=0.01) linear SVM trained with SVMLight.
svm.setDegree(3)
criteria = (cv2.TERM_CRITERIA_MAX_ITER + cv2.TERM_CRITERIA_EPS, 1000, 1e-3)
svm.setTermCriteria(criteria)
svm.setGamma(0)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setNu(0.5)
svm.setP(0.1)
svm.setC(0.01)
svm.setType(cv2.ml.SVM_EPS_SVR)
svm.train(np.array(hogList), cv2.ml.ROW_SAMPLE, np.array(labels))
# Create the HOG descriptor and detector with default params.
hog = cv2.HOGDescriptor()
hog.setSVMDetector(getSVMDetector(svm))
# Add the wrong identification sample for the second round of training
# (hard examples)
for image in negativeList:
# Detects objects od different sizes in the input image
rectangles, _ = hog.detectMultiScale(image)
# Get the ROI in the false positive pedestrian
for (x, y, w, h) in rectangles:
roi = image[y:y + h, x:x + w]
hardNegativeList.append(cv2.resize(roi, (64,128)))
# Compute the Histogram of Oriented Gradients (HOG)
computeHog(hardNegativeList, hogList)
# Update the class labels, i.e. (-1) hard negative.
# (these are added to the list with pos. and neg. images)
[labels.append(-1) for _ in range(len(hardNegativeList))]
# Train the SVM based on HOG Features
svm.train(np.array(hogList), cv2.ml.ROW_SAMPLE, np.array(labels))
# Save the HOG feature.
feature = getSVMDetector(svm)
np.save("./outputs/feature.npy", feature)
print("stored SVM weights")
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,110
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/Ex10TemplateCode/Ex1002_pedestrian_detector.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : Ex1002_pedestrian_detector.py -->
#<!-- Description: Script to detect pedestrians using HOG and SVM -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 29/03/2018 -->
#<!-- Change : 29/03/2018 - Creation of this script -->
#<!-- Review : 29/03/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018032901 $"
########################################################################
import cv2
import numpy as np
########################################################################
def loadDataset(dataset):
"""
This function load all images from a dataset and return a list of Numpy images.
"""
# List of images.
images = []
# Read the dataset file.
file = open(dataset)
filename = file.readline()
# Read all filenames from the dataset.
while (filename != ""):
# Get the current filename.
filename = (dataset.rsplit("/", 1)[0] + "/" +
filename.split("/", 1)[1].strip("\n"))
# Read the input image.
image = cv2.imread(filename)
# Read the next image filename.
filename = file.readline()
# Add the current image on the list.
if image is not None:
images.append(image)
# Return the images list.
return images
# Create the HOG descriptor and import the HOG feature.
feature = np.load("./outputs/feature.npy")
hog = cv2.HOGDescriptor()
hog.setSVMDetector(feature)
# Dataset filename.
positiveFile = "./inputs/Test/pos.lst"
# Load the INRIA dataset.
positiveList = loadDataset(positiveFile)
# --- ADD CODE HERE TO READ A VIDEO INTO IMAGES ---
# Detects objects of different sizes in the input image
rectangles, _ = hog.detectMultiScale(image)
# Draw the detetcted pedestrians
for (x, y, w, h) in rectangles:
cv2.rectangle(image, (x,y), (x + w, y + h), (0,0,255))
# --- ADD CODE TO DISPLAY THE RESULT IMAGE ---
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,111
|
EmanuelAlogna/IAML-Ass03
|
refs/heads/master
|
/warmUpAnnotations.py
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhage -->
#<!-- Computer Science Department -->
#<!-- Eye Information Research Group -->
#<!-- Introduction to Image Analysis and Machine Learning Course -->
#<!-- File : warmUpAnnotations.py -->
#<!-- Description: Example of code for select regions of interest in images -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D25 - DK-2300 - Kobenhavn S. -->
#<!-- : narcizo[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: You DO NOT need to change this file -->
#<!-- Date : 24/04/2018 -->
#<!-- Change : 24/04/2018 - Creation of this script -->
#<!-- Review : 24/04/2018 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = "$Revision: 2018042401 $"
###############################################################################
import cv2
import time
import os
###############################################################################
# Global variables.
points = []
cropping = False
def saveImage(image):
"""Save a new positive image in the dataset."""
# Folder where the image will be saved.
folder = "./inputs/cars/"
# Current image ID.
index = 0
while True:
# Create the image filename.
filename = "%sCars_%03d.png" % (folder, index + 1)
# Check if it is available in the folder and avoid to overwrite it.
if not os.path.isfile(filename):
# Save the image as grayscale.
cv2.imwrite(filename, image)
print("Saved: %s" % filename)
break
# Try to use a new image ID.
index += 1
def selectROI(event, x, y, flags, param):
"""Select a region of interest using the mouse."""
# Global variables.
global points, cropping
# Event for the left mouse button click.
if event == cv2.EVENT_LBUTTONDOWN:
points = [(x, y)]
cropping = True
# Event for the left mouse button release.
elif event == cv2.EVENT_LBUTTONUP:
# Check the upper left corner.
if len (points) == 0:
return
# Calculate the width and height of bouding box.
width = int(x) - points[0][0]
height = int(y) - points[0][1]
# Aspect ratio.
if width < height:
width = int(height)
else:
height = int(width)
# Record the ending (x, y) coordinates and indicate that
# the cropping operation is finished.
points.append((points[0][0] + width, points[0][1] + height))
cropping = False
# Draw a rectangle around the region of interest.
image = frame.copy()
cv2.rectangle(image, points[0], points[1], (0, 255, 0), 2)
cv2.imshow("Video", image)
# Press the key "S" on your keyboard to save the selected ROI.
key = cv2.waitKey(0)
if key == ord("s"):
roi = frame[points[0][1]:points[1][1], points[0][0]:points[1][0]]
roi = cv2.resize(roi, (64, 64))
saveImage(roi)
# Defines the filepath.
filepath = "./inputs/videos/Cars_05.mov"
# Create a capture video object.
capture = cv2.VideoCapture(filepath)
# Get the video frame rate.
fps = int(round(capture.get(cv2.CAP_PROP_FPS)))
# Check if the fps variable has a correct value.
fps = fps if fps > 0 else 30
# Create an OpenCV window.
cv2.namedWindow("Video", cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback("Video", selectROI)
# Press the key "P" on your keyboard to pause the video.
isPaused = False
# This repetion will run while there is a new frame in the video file or
# while the user do not press the "q" (quit) keyboard button.
while True:
# Capture frame-by-frame.
if not isPaused:
retval, frame = capture.read()
# Check if there is a valid frame.
if not retval:
break
# Display the resulting frame.
cv2.imshow("Video", frame)
# Check the keyboard events.
key = cv2.waitKey(fps)
if key == ord("q"):
break
elif key == 32:
isPaused = not isPaused
# When everything done, release the capture and record objects.
capture.release()
cv2.destroyAllWindows()
|
{"/As301_pca_visualization.py": ["/As301_classifier.py"], "/As301_pca_viz_compare.py": ["/As301_classifier.py"], "/As301_detector.py": ["/As301_classifier.py"]}
|
17,120
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/resolvers.py
|
import logging
from app.logic.helpers import *
from app.logic.active_learning import *
from app.logic.classify import *
from app.logic.feature_selection import *
from app.logic.model_selection import *
from app.logic.train import *
logger = logging.getLogger(__name__)
resolvers = {
'Query': {
#'train': lambda value, info, **args: train(args['trainingTask']),
'train_batch': lambda value, info, **args: train_batch(args['candidates'], args['training_data'],
args['params'], args['model_id']),
'train_from_local_data': lambda value, info, **args: train_from_local_data(args['candidates'],
args['schema'], args['training_data_file_name'], args['params'], args['model_id']),
'get_training_results': lambda value, info, **args: get_training_results(args['model_id']),
'all_training_results': lambda value, info, **args: all_training_results(),
'delete_training_results': lambda value, info, **args: delete_training_results(args['model_id']),
#'modelSelection': lambda value, info, **args: model_selection(args['models'], args['modelSel']),
'classify': lambda value, info, **args: classify(args['cachedModelID'], args['data']),
'loadModelSelectionResults': lambda value, info, **args: loadModelSelectionResults(args['obj']),
'modelSelectionResultsToObject': lambda value, info, **args: modelSelectionResultsToObject(args['savedId'], args['msr']),
'defaultModelConfiguration': lambda value, info, **args: defaultModelConfiguration(),
'defaultModelSelection': lambda value, info, **args: defaultModelSelection(),
'defaultFeatures': lambda value, info, **args: defaultFeatures(args['dataset']),
'defaultFeaturizers': lambda value, info, **args: defaultFeaturizers(args['features']),
'defaultCandidate': lambda value, info, **args: defaultCandidate(args['dataset']),
'createModelConfiguration': lambda value, info, **args: createModelConfiguration(
args['type'], args['weighting'], args['tokenizer'], args['ngrams'], args['tf'],
args['df'], args['penalty'], args['multiclass'], args['solver'], args['primal_dual'],
args['fitIntercept'], args['max_df'], args['min_df'], args['stopwords'], args['C'], args['max_iter']
),
'createModelSelection': lambda value, info, **args: createModelSelection(
args['metric'], args['method'], args['evalMode'], args['numFolds']
),
'createCandidate': lambda value, info, **args: createCandidate(args['features'], args['featurizers'], args['config']),
'addCandidate': lambda value, info, **args: addToList(args['addThis'], args.get('toThis', None)),
#'createTrainingTasks': lambda value, info, **args: createTrainingTasks(args['candidates'],
# args['training_data'], args['params']),
'mergeDatasets': lambda value, info, **args: mergeDatasets(args['datasets']),
'mergeLabeledDatasets': lambda value, info, **args: mergeLabeledDatasets(args['datasets']),
'addFeaturizer': lambda value, info, **args: addToList(args['addThis'], args.get('toThis', None)),
'subsetFeatures': lambda value, info, **args: subsetFeatures(args['dataset'], args['selectedFeatures']),
'topNCorrelatedFeatures': lambda value, info, **args: top_correlated_features(args['dataset'], args['config'], args['topN']),
'topNPctCorrelatedFeatures': lambda value, info, **args: top_pct_correlated_features(args['dataset'], args['config'], args['pct']),
'topNRFEFeatures': lambda value, info, **args: top_rfe_features(args['dataset'], args['config'], args['topN']),
'topNPctRFEFeatures': lambda value, info, **args: top_pct_rfe_features(args['dataset'], args['config'], args['pct']),
'numericalFeatureType': lambda value, info, **args: 'NUMERICAL',
'categoricalFeatureType': lambda value, info, **args: 'CATEGORICAL',
'textFeatureType': lambda value, info, **args: 'TEXT',
'setFeatureType': lambda value, info, **args: 'SET',
'booleanFeatureType': lambda value, info, **args: 'BOOLEAN',
'labelFeatureType': lambda value, info, **args: 'LABEL',
'noopFeaturizerType': lambda value, info, **args: 'NOOP',
'minMaxScalerFeaturizerType': lambda value, info, **args: 'MIN_MAX_SCALER',
'labelBinarizerFeaturizerType': lambda value, info, **args: 'LABEL_BINARIZER',
'tfidfVectorizerFeaturizerType': lambda value, info, **args: 'TFIDF_VECTORIZER',
'multilabelBinarizerFeaturizerType': lambda value, info, **args: 'MULTILABEL_BINARIZER',
'labelEncoderFeaturizerType': lambda value, info, **args: 'LABEL',
'textToVectorFeaturizerType': lambda value, info, **args: 'TEXT_TO_VECTOR',
'correlationFeatureSelectionMode': lambda value, info, **args: 'CORRELATION',
'rfeFeatureSelectionMode': lambda value, info, **args: 'RFE',
'logisticRegressionModelType': lambda value, info, **args: 'LOGISTIC_REGRESSION',
'linearSVCModelType': lambda value, info, **args: 'LINEAR_SVC',
'noneClassWeightingType': lambda value, info, **args: 'NONE',
'balancedClassWeightingType': lambda value, info, **args: 'BALANCED',
'unigramNGramType': lambda value, info, **args: 'UNIGRAM',
'bigramNGramType': lambda value, info, **args: 'BIGRAM',
'bothNGramType': lambda value, info, **args: 'BOTH',
'linearTermFreqType': lambda value, info, **args: 'LINEAR',
'sublinearTermFreqType': lambda value, info, **args: 'SUBLINEAR',
'defaultDocumentFreqType': lambda value, info, **args: 'DEFAULT',
'smoothDocumentFreqType': lambda value, info, **args: 'SMOOTH',
'l1PenaltyType': lambda value, info, **args: 'L1',
'l2PenaltyType': lambda value, info, **args: 'L2',
'ovrMultiClassType': lambda value, info, **args: 'OVR',
'multinomialMultiClassType': lambda value, info, **args: 'MULTINOMIAL',
'autoMultiClassType': lambda value, info, **args: 'AUTO',
'liblinearSolverType': lambda value, info, **args: 'LIBLINEAR',
'newtonCGSolverType': lambda value, info, **args: 'NEWTON_CG',
'lbfgsSolverType': lambda value, info, **args: 'LBFGS',
'sagSolverType': lambda value, info, **args: 'SAG',
'sagaSolverType': lambda value, info, **args: 'SAGA',
'primalMode': lambda value, info, **args: 'PRIMAL',
'dualMode': lambda value, info, **args: 'DUAL',
'wordTokenizerType': lambda value, info, **args: 'WORD_TOKENIZER',
'stemmerTokenizerType': lambda value, info, **args: 'STEMMER',
'lemmatizerTokenizerType': lambda value, info, **args: 'LEMMATIZER',
'precisionModelSelectionMetric': lambda value, info, **args: 'PRECISION',
'recallModelSelectionMetric': lambda value, info, **args: 'RECALL',
'f1ModelSelectionMetric': lambda value, info, **args: 'F1',
'bestModelModelSelectionMethod': lambda value, info, **args: 'BEST',
'kneePointModelSelectionMethod': lambda value, info, **args: 'KNEE_POINT',
'oneStdevModelSelectionMethod': lambda value, info, **args: 'ONE_STDEV',
'twoStdevModelSelectionMethod': lambda value, info, **args: 'TWO_STDEV',
'looModelEvaluationType': lambda value, info, **args: 'LEAVE_ONE_OUT',
'kFoldsModelEvaluationType': lambda value, info, **args: 'K_FOLDS',
'noneStopWordType': lambda value, info, **args: 'NONE',
'englishStopWordType': lambda value, info, **args: 'ENGLISH',
},
'Object': {
},
'Mutation': {
},
'Scalar': {
},
}
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,121
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/logic/helpers.py
|
import pandas as pd
from app.core.main.Classifier import Classifier
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
from app.core.main.tokenizer.PorterTokenizer import PorterTokenizer
from app.core.main.tokenizer.LemmaTokenizer import LemmaTokenizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
import uuid
def id():
return str(uuid.uuid4())
def extract(datasource, fields):
res = {}
for fld in fields:
res.update(datasource[fld])
return res
def getDataFieldName(feature_type):
field_name = 'text' # default field name
if feature_type in ('NUMERICAL', 'SET'):
field_name = feature_type.lower()
elif feature_type in ('BOOLEAN'):
field_name = 'numerical'
return field_name
def featureDataToSeries(fd):
feature_name = fd['feature']['name']
feature_type = fd['feature']['type']
data = fd['data']
field_name = getDataFieldName(feature_type)
rows = [row[field_name] for row in data]
return pd.Series(rows, name=feature_name)
def datasetToDataframe(ds):
df = pd.DataFrame()
features = ds['features'] if 'features' in ds else ds['data']['features']
for feature in features:
df = pd.concat([df, featureDataToSeries(feature)], axis=1)
if 'label' in ds:
label_series = featureDataToSeries(ds['label'])
df = pd.concat([df, label_series], axis=1)
return df
def inferFeatureType(series):
if series.dtype in ('int64', 'float64'):
return 'NUMERICAL'
val = series.values[0]
if type(val) in (list, set, tuple):
return 'SET'
val = val.lower()
if 'isdigit' in val and val.isdigit():
return 'NUMERICAL'
if val in ('true', 'false'):
return 'BOOLEAN'
return 'TEXT'
def dataframeToDataset(df):
features = []
for col in df:
feature_index = df.columns.get_loc(col)
feature_name = col
feature_type = inferFeatureType(df[col])
prop_name = getDataFieldName(feature_type)
data = [{ 'id': id(),
prop_name: val } for val in df[col].values]
feature_data = {
'id': id(),
'feature': {
'id': id(),
'index': feature_index,
'name': feature_name,
'type': feature_type
},
'data': data
}
features.append(feature_data)
ds = {
'id': id(),
'features': features }
return ds
def schema(dataset):
if 'label' in dataset:
return [f['feature']['type'] for f in dataset['data']['features']] + ['LABEL']
else:
return [f['feature']['type'] for f in dataset['features']]
def defaultModelConfiguration():
return {
'id': id(),
"type": "LOGISTIC_REGRESSION",
"weighting": "BALANCED",
"tokenizer": "WORD_TOKENIZER",
"ngrams": "UNIGRAM",
"tf": "SUBLINEAR",
"df": "SMOOTH",
"penalty": "L2",
"multiclass": "OVR",
"solver": "LIBLINEAR",
"primal_dual": "PRIMAL",
"fitIntercept": True,
'max_df': 1.,
'min_df': 0.0,
'stopwords': "ENGLISH",
'C': 1.,
'max_iter': 2,
}
def defaultModelSelection():
return {
'id': id(),
"metric": "PRECISION",
"method": "BEST",
"evalMode": "K_FOLDS",
"numFolds": 3
}
def defaultFeaturizers(features):
feature_types = [f['type'] for f in features]
featurizer_types = []
for ft in feature_types:
if ft in ["NUMERICAL", "BOOLEAN"]:
featurizer_types.append("NOOP")
elif ft in ["CATEGORICAL"]:
featurizer_types.append("LABEL_BINARIZER")
elif ft in ["TEXT"]:
featurizer_types.append("TFIDF_VECTORIZER")
elif ft in ["SET"]:
featurizer_types.append("MULTILABEL_BINARIZER")
elif ft == "LABEL":
featurizer_types.append("LABEL")
elif ft in ["LABEL_ENCODER"]:
featurizer_types.append("LABEL_ENCODER")
elif ft in ["TEXT2VEC", "TEXT_TO_VECTOR"]:
featurizer_types.append("TEXT_TO_VECTOR")
return featurizer_types
def defaultCandidate(labeled_dataset):
features = defaultFeatures(labeled_dataset)
return [{
'id': id(),
"features": features,
"featurizers": defaultFeaturizers(features=features),
"config": defaultModelConfiguration()
}]
def createCandidate(features, featurizers, config):
return {
'id': id(),
"features": features,
"featurizers": featurizers,
"config": config
}
def createTrainingTasks(candidates, training_data, params):
return [{
'id': id(),
'candidate': candidate,
'data': training_data,
'modelSelectionParams': params
} for candidate in candidates]
def createFeaturizer(featureType, featurizerType):
return {
'id': id(),
"featureType": featureType,
"featurizer": featurizerType
}
def createModelConfiguration(
type, weighting, tokenizer, ngrams, tf,
df, penalty, multiclass, solver, primal_dual,
fitIntercept, max_df, min_df, stopwords, C=1, max_iter=2
):
return {
'id': id(),
"type": type,
"weighting": weighting,
"tokenizer": tokenizer,
"ngrams": ngrams,
"tf": tf,
"df": df,
"penalty": penalty,
"multiclass": multiclass,
"solver": solver,
"primal_dual": primal_dual,
"fitIntercept": fitIntercept,
"max_df": max_df,
"min_df": min_df,
"stopwords": stopwords,
"C": C,
"max_iter": max_iter
}
def createModelSelection(metric, method, evalMode, numFolds):
return {
'id': id(),
"metric": metric,
"method": method,
"evalMode": evalMode,
"numFolds": numFolds
}
def defaultFeatures(dataset):
if 'label' in dataset:
feature_data = dataset['data']['features'] + [dataset['label']]
else:
feature_data = dataset['features']
features = [ feat_dat['feature'] for feat_dat in feature_data]
return features
def subsetFeatures(dataset, selectedFeatures):
allFeatures = defaultFeatures(dataset)
return [allFeatures[i] for i in selectedFeatures]
def create_classifier(config):
return Classifier(model_configuration={
'id': id(),
"type": config["type"],
"class_weight": None if config['weighting'].lower() == 'none' else config['weighting'].lower(),
"tokenizer": BaseTokenizer() if config["tokenizer"] == "WORD_TOKENIZER" \
else PorterTokenizer() if config["tokenizer"] == "STEMMER" \
else LemmaTokenizer() if config["tokenizer"] == "LEMMATIZER" \
else None,
"ngram_range": (1, 1) if config["ngrams"] == "UNIGRAM" \
else (2, 2) if config["ngrams"] == "BIGRAM" \
else (1, 2) if config["ngrams"] == "BOTH" \
else None,
"sublinear_tf": config["tf"] == "SUBLINEAR",
"smooth_idf": config["df"] == "SMOOTH",
"penalty": config['penalty'].lower(),
"multi_class": config['multiclass'].lower(),
"solver": config['solver'].lower(),
"dual": config['primal_dual']=='DUAL',
"fit_intercept": config['fitIntercept'],
'max_df': config['max_df'],
'min_df': config['min_df'],
'stopwords': ENGLISH_STOP_WORDS if config["stopwords"] == "ENGLISH" else [],
'C': config['C'],
'max_iter': config['max_iter']
})
def addToList(addThis, toThis):
if toThis is None:
return [addThis]
else:
toThis.append(addThis)
return toThis
def mergeDatasets(datasets):
if len(datasets) == 0:
return {
'id': -1,
'features': []
}
featureData = {}
nonEmptyDSIdx = 0
#merge by feature name, as feature IDs are randomized
for dsidx, ds in enumerate(datasets):
if len(ds['features']) > 0 and len(ds['features'][0]['data']) > 0:
nonEmptyDSIdx = dsidx
for featData in ds['features']:
if featData['feature']['name'] not in featureData:
featureData[featData['feature']['name']] = featData
else:
featureData[featData['feature']['name']]['data'] += featData['data']
return {
'id': datasets[nonEmptyDSIdx]['id'],
'features': featureData.values()
}
def mergeLabeledDatasets(labeledDatasets):
if len(labeledDatasets) == 0:
return {
'id': -1,
'data': {'id': -1, 'features': []},
'label': { 'id': -1, 'feature': {'id': -1, 'index': -1, 'name': '', 'type': ''}, 'data': []}
}
datasets = [lds['data'] for lds in labeledDatasets]
dataset = mergeDatasets(datasets)
allLabels = []
nonEmptyLdsIdx = 0
for ldsIdx, lds in enumerate(labeledDatasets):
lbls = lds['label']['data']
if len(lbls) > 0:
allLabels += lbls
nonEmptyLdsIdx = ldsIdx
return {
'id': dataset['id'],
'data': dataset,
'label': {
'id':labeledDatasets[nonEmptyLdsIdx]['label']['id'],
'feature': labeledDatasets[nonEmptyLdsIdx]['label']['feature'],
'data': allLabels
}
}
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,122
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/tokenizer/BaseTokenizer.py
|
'''
Base Tokenizer
'''
from nltk import word_tokenize
class BaseTokenizer(object):
def __init__(self):
pass
def __call__(self, doc):
return self.tokenize(doc)
def tokenize(self, doc):
tokens = [t for t in word_tokenize(doc)]
return tokens
def __str__(self):
return 'Base tokenizer.'
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,123
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/tokenizer/LemmaTokenizer.py
|
'''
WordNet Lemmatizer
'''
from nltk.stem import WordNetLemmatizer
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
class LemmaTokenizer(object):
def __init__(self):
self.__wnl = WordNetLemmatizer()
self.__basetokenizer = BaseTokenizer()
def __call__(self, doc):
return self.tokenize(doc)
def tokenize(self, doc):
return [self.__wnl.lemmatize(t) for t in self.__basetokenizer.tokenize(doc)]
def __str__(self):
return '''
WordNet Lemmatizer based on
%s
''' % self.__basetokenizer
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,124
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/classifier/Ensemble.py
|
'''
Ensemble model
'''
import numpy as np
import copy
class Ensemble(object):
def __init__(self, base_model, group_index):
self.__base_model = base_model
self.__group_index = group_index
self.__models = []
self.__labels = []
self.__num_features = 0
#required by backward feature selection
self.coef_ = None
def fit(self, _X, _Y):
X = np.asarray(_X)
Y = np.asarray(_Y)
self.__num_features = len(X[0]) - 1 #excl group
assert self.__group_index < self.__num_features + 1
self.__labels = sorted(set(Y))
featureIndices = list(range(self.__num_features + 1))
featureIdxWoGrp = featureIndices[:self.__group_index] + featureIndices[self.__group_index+1:]
self.__models = []
groups = set(X[:, self.__group_index].tolist())
assert len(groups) > 1
for grp in groups:
Xp = X[X[:, self.__group_index] != grp]
Xpp = Xp[:, featureIdxWoGrp]
Yp = Y[X[:, self.__group_index] != grp]
numUniqueLabels = len(set(list(Yp)))
if numUniqueLabels > 1:
print('Train a model with ', Xpp.shape, ' data and ', len(Yp), ' labels.')
m = copy.deepcopy(self.__base_model)
m.fit(Xpp, Yp)
self.__models.append(m)
else:
print('Skip a model of single label: ', set(list(Yp)), ', e.g ignoring ', len(Yp), ' data points.')
print('Total ', len(self.__models), ' models trained and saved.')
#required by backward feature selection
self.coef_ = self.get_all_weights()
def predict(self, _X):
X = np.asarray(_X)
featureIndices = list(range(self.__num_features + 1))
featureIdxWoGrp = featureIndices[:self.__group_index] + featureIndices[self.__group_index+1:]
Xp = X[:, featureIdxWoGrp]
all_predictions = []
assert(len(self.__models) > 0)
for m in self.__models:
all_predictions.append(m.predict(Xp))
all_predictions = np.array(all_predictions).transpose()
#count votes by rows, tie-break arbitrarily
return np.apply_along_axis(lambda votes: np.argmax(np.bincount(votes)), 1, all_predictions)
#
#
# Probability = number of votes (of sub-models)/ number of sub-models.
# (a vote of 51% has same contribution as a vote of 99%)
#
#
# def predict_proba(self, _X):
# X = np.asarray(_X)
#
# featureIndices = list(range(self.__num_features + 1))
# featureIdxWoGrp = featureIndices[:self.__group_index] + featureIndices[self.__group_index+1:]
# Xp = X[:, featureIdxWoGrp]
#
# all_predictions = []
# assert(len(self.__models) > 0)
#
# for m in self.__models:
# all_predictions.append(m.predict(Xp))
# all_predictions = np.array(all_predictions).transpose()
#
# num_classes = self.num_classes()
# probs = np.apply_along_axis(lambda votes: np.bincount(votes, minlength=num_classes)/sum(np.bincount(votes)),
# 1, all_predictions)
#
# return probs
#
#
# Returns average probabilities of sub-models
#
#
def predict_proba(self, _X):
X = np.asarray(_X)
featureIndices = list(range(self.__num_features + 1))
featureIdxWoGrp = featureIndices[:self.__group_index] + featureIndices[self.__group_index+1:]
Xp = X[:, featureIdxWoGrp]
all_labels = self.labels()
all_predictions = []
assert(len(self.__models) > 0)
for m in self.__models:
probs = m.predict_proba(Xp)
full_probs = np.zeros((len(Xp), len(all_labels)))
m_labels = m.labels()
for clsidx, clsprob in enumerate(probs.transpose()):
full_probs[:, all_labels.index(m_labels[clsidx])] = clsprob
all_predictions.append(full_probs)
preds = np.array(sum(all_predictions)/len(all_predictions))
return preds
def get_weights(self, class_no):
return self.get_all_weights()[class_no]
def get_intercepts(self):
assert len(self.__models) > 0
num_classes = self.num_classes()
all_models_intercepts = []
for m in self.__models:
m_labels = m.labels()
m_intercepts = np.asarray(m.get_intercepts())
full_intercepts = np.zeros((num_classes, ))
for idx, intercept in enumerate(m_intercepts):
full_intercepts[self.__labels.index(m_labels[idx])] = intercept
all_models_intercepts.append(full_intercepts)
assert len(all_models_intercepts) > 0
return sum(np.array(all_models_intercepts))/len(all_models_intercepts)
#OVR should be used
def get_all_weights(self):
assert len(self.__models) > 0
num_classes = self.num_classes()
all_models_weights = []
for m in self.__models:
m_labels = m.labels()
m_weights = np.asarray(m.get_all_weights())
full_weights = np.zeros((num_classes, self.__num_features))
for idx, weights in enumerate(m_weights):
full_weights[self.__labels.index(m_labels[idx])] = weights
all_models_weights.append(full_weights)
assert len(all_models_weights) > 0
return sum(np.array(all_models_weights))/len(all_models_weights)
def get_params(self, deep = True):
params = {
'base_model': self.__base_model,
'group_index': int(self.__group_index),
}
#available only after trained
if len(self.__models) > 0:
all_models_params = []
for m in self.__models:
all_models_params.append(m.get_params())
params.update({'all_models_params': all_models_params})
params.update({'labels': [int(lbl) for lbl in self.__labels]})
params.update({'num_features': int(self.__num_features)})
params.update({'coef_': self.coef_.tolist()})
params.update({'base_model': self.__base_model.get_params()})
return params
def set_params(self, **params):
if 'group_index' in params:
self.__group_index = int(params['group_index'])
self.__models = []
for m_params in params['all_models_params']:
m = copy.deepcopy(self.__base_model)
m.set_params(**m_params)
self.__models.append(m)
self.__labels = params['labels']
self.__num_features = int(params['num_features'])
self.coef_ = np.asarray(params['coef_'], dtype=np.float64)
return
def labels(self):
return self.__labels
def num_classes(self):
return len(self.labels())
def num_weights(self):
all_weights = self.get_all_weights()
return len(all_weights[0])
def __str__(self):
return 'Ensemble Classification based on ' + str(self.__base_model)
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,125
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/classifier/LSVC.py
|
'''
SVM
'''
from sklearn.svm import SVC
import numpy as np
class LSVC(object):
def __init__(self, class_weighting='balanced', C=1.0, max_iter=2):
self.__class_weight = class_weighting
self.__C = C
self.__kernel = 'linear'
self.__probability = True
self.__decision_func_shape = 'ovr'
self.__max_iter=max_iter
#required by backward feature selection
self.coef_ = None
#probability must set to True in order to call predict_proba
self.__model = SVC(class_weight=self.__class_weight, kernel=self.__kernel, C=self.__C,
probability=self.__probability, decision_function_shape = self.__decision_func_shape,
max_iter=self.__max_iter, random_state=1078)
def fit(self, X, Y):
self.__model.fit(X, Y)
#required by backward feature selection, only available for linear kernel
self.coef_ = self.get_all_weights()
def predict(self, X):
return self.__model.predict(X)
def predict_proba(self, X):
return self.__model.predict_proba(X)
def get_weights(self, class_no):
return self.get_all_weights()[class_no]
#even if OVR is used the intercept_ are still results of OVO
#return avg OVO intercepts as OVR intercepts - for information only, e.g. intercepts should not be used to make prediction
def get_intercepts(self):
num_classes = len(self.__model.classes_)
if num_classes > 2:
all_classes_avg_intercept_ = []
running_seq = 0
for clsidx in range(num_classes):
if clsidx == 0:
this_class_avg_intercept_ = []
else:
this_class_avg_intercept_ = [intercepts for intercepts in self.__model.intercept_[0:clsidx]]
for other_clsidx in range(clsidx+1, num_classes):
this_class_avg_intercept_.append(self.__model.intercept_[running_seq])
running_seq += 1
this_class_avg_intercept_ = sum(np.array(this_class_avg_intercept_))/(num_classes-1)
all_classes_avg_intercept_.append(this_class_avg_intercept_)
return np.array(all_classes_avg_intercept_)
else:
return np.array([self.__model.intercept_[0], self.__model.intercept_[0]])
#even if OVR is used the coef_ are still results of OVO
#return avg OVO weights as OVR weights - for information only, e.g. weights should not be used to make prediction
def get_all_weights(self):
num_classes = len(self.__model.classes_)
if num_classes > 2:
all_classes_avg_coef_ = []
running_seq = 0
for clsidx in range(num_classes):
if clsidx == 0:
this_class_avg_coef_ = []
else:
this_class_avg_coef_ = [weights for weights in self.__model.coef_[0:clsidx]]
for other_clsidx in range(clsidx+1, num_classes):
this_class_avg_coef_.append(self.__model.coef_[running_seq])
running_seq += 1
this_class_avg_coef_ = sum(np.array(this_class_avg_coef_))/(num_classes-1)
all_classes_avg_coef_.append(this_class_avg_coef_)
return np.array(all_classes_avg_coef_)
else:
return np.array([self.__model.coef_[0], self.__model.coef_[0]])
def get_params(self, deep = True):
params = {
'C': self.__C,
'class_weighting': self.__class_weight,
'max_iter': self.__max_iter,
}
#only available after trained
if hasattr(self.__model, "support_"):
params.update({
'support': self.__model.support_.tolist(),
'support_vectors': self.__model.support_vectors_.tolist(),
'n_support': self.__model.n_support_.tolist(),
'dual_coef': self.__model.dual_coef_.tolist(),
'_dual_coef': self.__model._dual_coef_.tolist(),
#'coef': self.__model.coef_.tolist(),
'intercept': self.__model.intercept_.tolist(),
'_intercept': self.__model._intercept_.tolist(),
'fit_status': self.__model.fit_status_,
'classes': self.__model.classes_.tolist(),
'probA': self.__model.probA_.tolist(),
'probB': self.__model.probB_.tolist(),
'class_weight': self.__model.class_weight_.tolist(),
'shape_fit': self.__model.shape_fit_,
'sparse': self.__model._sparse,
'gamma': self.__model._gamma,
'degree': self.__model.degree,
'coef0': self.__model.coef0,
'kernel': self.__model.kernel,
'impl': self.__model._impl
})
return params
def set_params(self, **params):
if 'C' in params:
self.__C = float(params['C'])
if 'max_iter' in params:
self.__max_iter = int(params['max_iter'])
if 'support' in params:
self.__model.support_ = np.asarray(params['support'], dtype=np.int32)
if 'support_vectors' in params:
self.__model.support_vectors_ = np.asarray(params['support_vectors'], dtype=np.float64)
if 'n_support' in params:
self.__model.n_support_ = np.asarray(params['n_support'], dtype=np.int32)
if 'dual_coef' in params:
self.__model.dual_coef_ = np.asarray(params['dual_coef'], dtype=np.float64)
if '_dual_coef' in params:
self.__model._dual_coef_ = np.asarray(params['_dual_coef'], dtype=np.float64)
# if 'coef' in params: #coef is readonly
# self.__model.coef_ = params['coef']
if 'intercept' in params:
self.__model.intercept_ = np.asarray(params['intercept'], dtype=np.float64)
if '_intercept' in params:
self.__model._intercept_ = np.asarray(params['_intercept'], dtype=np.float64)
if 'fit_status' in params:
self.__model.fit_status_ = int(params['fit_status'])
if 'classes' in params:
self.__model.classes_ = np.asarray(params['classes'], dtype=np.int32)
if 'probA' in params:
self.__model.probA_ = np.asarray(params['probA'], dtype=np.float64)
if 'probB' in params:
self.__model.probB_ = np.asarray(params['probB'], dtype=np.float64)
if 'class_weight' in params:
self.__model.class_weight_ = np.asarray(params['class_weight'], dtype=np.float64)
if 'class_weighting' in params:
self.__class_weight = params['class_weighting']
if 'shape_fit' in params:
self.__model.shape_fit_ = np.asarray(params['shape_fit'], dtype=np.int32)
if 'sparse' in params:
self.__model._sparse = bool(params['sparse'])
if 'gamma' in params:
self.__model._gamma = float(params['gamma'])
if 'degree' in params:
self.__model.degree = float(params['degree'])
if 'coef0' in params:
self.__model.coef0 = float(params['coef0'])
if 'kernel' in params:
self.__model.kernel = params['kernel']
if 'impl' in params:
self.__model._impl = params['impl']
return
def labels(self):
return self.__model.classes_
def num_classes(self):
return len(self.__model.classes_)
def num_weights(self):
all_weights = self.get_all_weights()
return len(all_weights[0])
def __str__(self):
return 'Linear Support Vector Classification.'
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,126
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/test/Classifier.py
|
# coding: utf-8
import sys
import unittest
import pandas as pd
import random as rd
from app.core.main.classifier.LR import LR
from app.core.main.classifier.LSVC import LSVC
from app.core.main.classifier.Ensemble import Ensemble
from app.core.main.featurizer.Featurizer import Featurizer
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
from sklearn.feature_extraction import stop_words
from app.core.main.Classifier import Classifier
lrModelConfiguration = {
"type": "LOGISTIC_REGRESSION",
"class_weight": "balanced",
"tokenizer": BaseTokenizer(),
"ngram_range": (1, 1),
"sublinear_tf": True,
"smooth_idf": True,
"penalty": "l2",
"multi_class": "ovr",
"solver": "liblinear",
"dual": True,
"fit_intercept": True,
'max_df': 1.,
'min_df': 0.,
'stopwords': stop_words.ENGLISH_STOP_WORDS,
'C': 1.,
'max_iter': 1000,
}
lsvcModelConfiguration = {
"type": "LINEAR_SVC",
"class_weight": "balanced",
"tokenizer": BaseTokenizer(),
"ngram_range": (1, 1),
"sublinear_tf": True,
"smooth_idf": True,
"penalty": "l2",
"multi_class": "ovr",
"solver": "liblinear",
"dual": True,
"fit_intercept": True,
'max_df': 1.,
'min_df': 0.,
'stopwords': stop_words.ENGLISH_STOP_WORDS,
'C': 10.,
'max_iter': 1000,
}
ensembleSvcModelConfiguration = {
"type": "ENSEMBLE_LINEAR_SVC",
"class_weight": "balanced",
"tokenizer": BaseTokenizer(),
"ngram_range": (1, 1),
"sublinear_tf": True,
"smooth_idf": True,
"penalty": "l2",
"multi_class": "ovr",
"solver": "liblinear",
"dual": True,
"fit_intercept": True,
'max_df': 1.,
'min_df': 0.,
'stopwords': stop_words.ENGLISH_STOP_WORDS,
'C': 10.,
'max_iter': 1000,
}
ensembleLRModelConfiguration = {
"type": "ENSEMBLE_LOGISTIC_REGRESSION",
"class_weight": "balanced",
"tokenizer": BaseTokenizer(),
"ngram_range": (1, 1),
"sublinear_tf": True,
"smooth_idf": True,
"penalty": "l2",
"multi_class": "ovr",
"solver": "liblinear",
"dual": True,
"fit_intercept": True,
'max_df': 1.,
'min_df': 0.,
'stopwords': stop_words.ENGLISH_STOP_WORDS,
'C': 1.,
'max_iter': 1000,
}
modelConfigurations = [lrModelConfiguration, lsvcModelConfiguration, ensembleLRModelConfiguration, ensembleSvcModelConfiguration]
testModelConfiguration = None
class FunctionalityTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_data_fn_prefix = "test_data"
cls.inp_avro_file_with_label = cls.test_data_fn_prefix + ".avro"
cls.binary_inp_avro_file_with_label = cls.test_data_fn_prefix + ".two_classes.avro"
cls.inp_avro_file_without_label = cls.test_data_fn_prefix + ".nolabel.avro"
cls.out_avro_file = cls.test_data_fn_prefix + ".out.avro"
cls.model_avro_file = cls.test_data_fn_prefix + ".model.out.avro"
cls.schema_with_label = ["NUMERICAL", "NUMERICAL", "CATEGORICAL", "BOOLEAN", "TEXT", "TEXT2VEC", "SET", "LABEL"]
#for correlation feature ranking test, remove text2vec as it produces negative values, which is prohibited
#by chisquare ranking.
cls.schema_with_label_nonnegative = ["NUMERICAL", "NUMERICAL", "CATEGORICAL", "BOOLEAN", "TEXT", "TEXT", "SET", "LABEL"]
cls.schema_without_label = ["NUMERICAL", "NUMERICAL", "CATEGORICAL", "BOOLEAN", "TEXT", "TEXT2VEC", "SET"]
cls.fields_with_label = ["TRAIN_GROUP", "num_field", "cat_field", "bool_field", "text_field", "txt2vec_field", "set_field",
"label_field"]
cls.fields_without_label = cls.fields_with_label[:-1]
cls.num_recs = 150
cls.num_models_for_ensemble = 8
cls.categories = ["Oil and Gas", "Automobile", "Software", "Retail", "Health Care", "Finance", "Construction",
"Agriculture"]
cls.labels = ["a posteriori", "acta non verba", "alea iacta est", "amor patriae", "alma mater"]
cls.binary_labels = ["verum", "falsus"]
words = '''
The distribution of oil and gas reserves among the world's 50 largest oil companies. The reserves of the privately
owned companies are grouped together. The oil produced by the "supermajor" companies accounts for less than 15% of
the total world supply. Over 80% of the world's reserves of oil and natural gas are controlled by national oil companies.
Of the world's 20 largest oil companies, 15 are state-owned oil companies.
The petroleum industry, also known as the oil industry or the oil patch, includes the global processes of exploration,
extraction, refining, transporting (often by oil tankers and pipelines), and marketing of petroleum products.
The largest volume products of the industry are fuel oil and gasoline (petrol). Petroleum (oil) is also the raw material
for many chemical products, including pharmaceuticals, solvents, fertilizers, pesticides, synthetic fragrances, and plastics.
The industry is usually divided into three major components: upstream, midstream and downstream. Midstream operations are
often included in the downstream category.
Petroleum is vital to many industries, and is of importance to the maintenance of industrial civilization in its
current configuration, and thus is a critical concern for many nations. Oil accounts for a large percentage of the
world’s energy consumption, ranging from a low of 32% for Europe and Asia, to a high of 53% for the Middle East.
Governments such as the United States government provide a heavy public subsidy to petroleum companies, with major
tax breaks at virtually every stage of oil exploration and extraction, including the costs of oil field leases and
drilling equipment.[2]
Principle is a term defined current-day by Merriam-Webster[5] as: "a comprehensive and fundamental law, doctrine,
or assumption", "a primary source", "the laws or facts of nature underlying the working of an artificial device",
"an ingredient (such as a chemical) that exhibits or imparts a characteristic quality".[6]
Process is a term defined current-day by the United States Patent Laws (United States Code Title 34 - Patents)[7]
published by the United States Patent and Trade Office (USPTO)[8] as follows: "The term 'process' means process,
art, or method, and includes a new use of a known process, machine, manufacture, composition of matter, or material."[9]
Application of Science is a term defined current-day by the United States' National Academies of Sciences, Engineering,
and Medicine[12] as: "...any use of scientific knowledge for a specific purpose, whether to do more science; to design
a product, process, or medical treatment; to develop a new technology; or to predict the impacts of human actions."[13]
The simplest form of technology is the development and use of basic tools. The prehistoric discovery of how to control
fire and the later Neolithic Revolution increased the available sources of food, and the invention of the wheel
helped humans to travel in and control their environment. Developments in historic times, including the printing
press, the telephone, and the Internet, have lessened physical barriers to communication and allowed humans to
interact freely on a global scale.
Technology has many effects. It has helped develop more advanced economies (including today's global economy)
and has allowed the rise of a leisure class. Many technological processes produce unwanted by-products known as
pollution and deplete natural resources to the detriment of Earth's environment. Innovations have always influenced
the values of a society and raised new questions of the ethics of technology. Examples include the rise of the
notion of efficiency in terms of human productivity, and the challenges of bioethics.
Philosophical debates have arisen over the use of technology, with disagreements over whether technology improves
the human condition or worsens it. Neo-Luddism, anarcho-primitivism, and similar reactionary movements criticize
the pervasiveness of technology, arguing that it harms the environment and alienates people; proponents of ideologies
such as transhumanism and techno-progressivism view continued technological progress as beneficial to society and
the human condition.
Health care or healthcare is the maintenance or improvement of health via the prevention, diagnosis, and treatment
of disease, illness, injury, and other physical and mental impairments in human beings. Healthcare is delivered by
health professionals (providers or practitioners) in allied health fields. Physicians and physician associates are
a part of these health professionals. Dentistry, midwifery, nursing, medicine, optometry, audiology, pharmacy,
psychology, occupational therapy, physical therapy and other health professions are all part of healthcare. It
includes work done in providing primary care, secondary care, and tertiary care, as well as in public health.
Access to health care may vary across countries, communities, and individuals, largely influenced by social and
economic conditions as well as the health policies in place. Countries and jurisdictions have different policies
and plans in relation to the personal and population-based health care goals within their societies. Healthcare
systems are organizations established to meet the health needs of targeted populations. Their exact configuration
varies between national and subnational entities. In some countries and jurisdictions, health care planning is
distributed among market participants, whereas in others, planning occurs more centrally among governments or
other coordinating bodies. In all cases, according to the World Health Organization (WHO), a well-functioning
healthcare system requires a robust financing mechanism; a well-trained and adequately paid workforce; reliable
information on which to base decisions and policies; and well maintained health facilities and logistics to deliver
quality medicines and technologies.[1]
Health care is conventionally regarded as an important determinant in promoting the general physical and mental
health and well-being of people around the world. An example of this was the worldwide eradication of smallpox
in 1980, declared by the WHO as the first disease in human history to be completely eliminated by deliberate health
care interventions.[4]
'''.split()
truth = ["true", "false"]
set_base = ["bona fide", "bono malum superate", "carpe diem", "caveat emptor", "circa", "citius altius fortius",
"corpus christi", "curriculum vitae", "de facto", "discendo discimus", "emeritus", "ex animo",
"fortis in arduis", "labor omnia vincit", "magnum opus", "persona non grata", "vivere militare est"]
set_size = list(map(lambda _: rd.randint(int(len(set_base)/10), int(len(set_base)/2)), range(cls.num_recs)))
set_field = list(map(lambda n: set(map(lambda _: set_base[rd.randint(0, len(set_base)-1)], range(n))), set_size))
#chisquare feature ranking requires non-negative values
train_group_field = list(map(lambda _ : str(rd.randint(1, cls.num_models_for_ensemble)), range(cls.num_recs)))
numeric_field = list(map(lambda _ : rd.random() * rd.randint(0, 100), range(cls.num_recs)))
categorical_field = list(map(lambda _: cls.categories[rd.randint(0, len(cls.categories)-1)], range(cls.num_recs)))
boolean_field = list(map(lambda _: truth[rd.randint(0, 1)], range(cls.num_recs)))
label_field = list(map(lambda _: cls.labels[rd.randint(0, len(cls.labels)-1)], range(cls.num_recs)))
binary_label_field = list(map(lambda _: cls.binary_labels[rd.randint(0, len(cls.binary_labels)-1)], range(cls.num_recs)))
text_size = list(map(lambda _: rd.randint(int(len(words)/10), int(len(words)/2)), range(cls.num_recs)))
text_field1 = list(map(lambda n: ' '.join(map(lambda _: words[rd.randint(0, len(words)-1)], range(n))), text_size))
text_size = list(map(lambda _: rd.randint(int(len(words)/10), int(len(words)/2)), range(cls.num_recs)))
text_field2 = list(map(lambda n: ' '.join(map(lambda _: words[rd.randint(0, len(words)-1)], range(n))), text_size))
cls.labeled_inp_df = pd.DataFrame(zip(train_group_field, numeric_field, categorical_field, boolean_field, text_field1, text_field2,
set_field, label_field),
columns=cls.fields_with_label)
cls.nonlabeled_inp_df = pd.DataFrame(zip(train_group_field, numeric_field, categorical_field, boolean_field, text_field1, text_field2,
set_field),
columns=cls.fields_without_label)
cls.labeled_binary_inp_df = pd.DataFrame(zip(train_group_field, numeric_field, categorical_field, boolean_field, text_field1, text_field2,
set_field, binary_label_field),
columns=cls.fields_with_label)
def test_correlation_feature_selection(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label_nonnegative = self.schema_with_label_nonnegative.copy()
ac = Classifier(model_configuration = testModelConfiguration)
res_df = ac.feature_ranking(input_df=__labeled_inp_df, schema=__schema_with_label_nonnegative,
mode=Classifier.CC_fs_correlation)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), 2)
self.assertEqual(res_df.dtypes[0], "object")
self.assertIn(res_df.dtypes[1], ["int64", "float64" ])
def test_backward_feature_selection(self):
if testModelConfiguration['type'] in [Classifier.ENSEMBLE_SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE]:
return
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
res_df = ac.feature_ranking(input_df=__labeled_inp_df, schema=__schema_with_label,
mode=Classifier.CC_fs_backward)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), 2)
self.assertEqual(res_df.dtypes[0], "object")
self.assertIn(res_df.dtypes[1], ["int64", "float64" ])
def test_training(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
self.assertTrue(isinstance(lr, LR) or isinstance(lr, LSVC) or isinstance(lr, Ensemble))
self.assertTrue(isinstance(fm, Featurizer))
self.assertTrue(isinstance(lm, Featurizer))
def test_predict_proba(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__nonlabeled_inp_df = self.nonlabeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
ac = Classifier(model_configuration = testModelConfiguration)
ac.load_models(lr, fm, lm)
for multilbl_pred in [True, False]:
res_df = ac.predict_proba(input_df=__nonlabeled_inp_df, multilabel_pred=multilbl_pred)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), len(self.fields_without_label) + 3)
self.assertEqual(res_df.dtypes[-1], "float64")
self.assertEqual(res_df.dtypes[-2], "object")
self.assertEqual(res_df.dtypes[-3], "object")
self.assertEqual(len(res_df), self.num_recs)
if not multilbl_pred:
self.assertFalse(any(list(map(lambda x: x[0] not in self.labels, res_df.filter([res_df.columns[-3]]).values))))
else:
list_lbls = list(map(lambda lbls: lbls[0].split(","), res_df.filter([res_df.columns[-3]]).values))
list_valid_lbls = list(map(lambda lbls: map(lambda lbl: lbl not in self.labels, lbls), list_lbls))
self.assertFalse(any(list(map(any, list_valid_lbls))))
#Test if probabilities sum-up to 1
prob_str = list(map(lambda p_str: p_str.split(','), res_df["Probabilities"].values))
prob_float = list(map(lambda prob_with_label: [float(p.split(':')[1]) for p in prob_with_label], prob_str))
self.assertFalse(any(list(map(lambda probs: sum(probs) >= 1.0 + 0.005 * len(self.fields_without_label) \
or sum(probs) <= 1.0 - 0.005 * len(self.fields_without_label), prob_float))))
def test_learn(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__nonlabeled_inp_df = self.nonlabeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
ac = Classifier(model_configuration = testModelConfiguration)
ac.load_models(lr, fm, lm)
res_df = ac.learn(input_df=__nonlabeled_inp_df)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), len(self.fields_without_label) + 3)
self.assertEqual(res_df.dtypes[-1], "float64")
def test_input_qlty(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__nonlabeled_inp_df = self.nonlabeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
ac.train(input_df=__labeled_binary_inp_df, schema=__schema_with_label)
else:
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
ac = Classifier(model_configuration = testModelConfiguration)
ac.load_models(lr, fm, lm)
res_df = ac.input_qlty(input_df=__nonlabeled_inp_df)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), len(self.fields_without_label) + 2)
self.assertEqual(res_df.dtypes[-1], "object")
self.assertEqual(res_df.dtypes[-2], "object")
self.assertFalse(any(list(map(lambda x: x not in ["Good", "Bad", "OK"], res_df.filter([res_df.columns[-2]]).values))))
#Test if all suggested features are not present
def chk_feature_nonexistance(row):
suggested_features = row["SuggestedFeatures"].split(',')
for feat in suggested_features:
if '::' in feat:
field_name, field_value = feat.split('::')
self.assertIn(field_name, self.fields_without_label)
fld_no = list(res_df.columns).index(field_name)
if self.schema_without_label[fld_no] in ["text", "text2vec"]:
self.assertNotIn(' ' + field_value + ' ', row[field_name].lower())
#self.assertTrue(True)
elif self.schema_without_label[fld_no] == "set":
if len(field_value) > 0:
self.assertNotIn(field_value, row[field_name])
elif self.schema_without_label[fld_no] in ["string", "numeric", "boolean"]:
self.assertNotEqual(field_value, row[field_name])
else:
field_name = feat
if len(field_name) > 0:
self.assertIn(field_name, self.fields_without_label)
res_df.apply(chk_feature_nonexistance, axis=1)
def test_input_qlty_binary_prob(self):
self.test_input_qlty(binary_problem=True)
def test_predict_explain(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__nonlabeled_inp_df = self.nonlabeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
ac.train(input_df=__labeled_binary_inp_df, schema=__schema_with_label)
else:
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
ac = Classifier(model_configuration = testModelConfiguration)
ac.load_models(lr, fm, lm)
res_df = ac.predict_explain(input_df=__nonlabeled_inp_df)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), len(self.fields_without_label) + 4)
self.assertEqual(res_df.dtypes[-1], "object")
self.assertEqual(res_df.dtypes[-2], "float64")
self.assertEqual(res_df.dtypes[-3], "object")
self.assertEqual(res_df.dtypes[-4], "object")
#Test if all top-contributed features are present
def chk_contributor_existance(row):
contributors = row["TopContributors"].split(';')
features = [ contrib.split('=')[0] for contrib in contributors]
for feat in features:
if '::' in feat:
field_name, field_value = feat.split('::')
self.assertIn(field_name, self.fields_without_label)
fld_no = list(res_df.columns).index(field_name)
if self.schema_without_label[fld_no] in ["text", "set"]:
#self.assertIn(field_value, row[field_name].lower())
self.assertTrue(True)
elif self.schema_without_label[fld_no] in ["string", "numeric", "boolean"]:
self.assertEqual(field_value, row[field_name])
else:
field_name = feat
if len(field_name) > 0:
self.assertIn(field_name, self.fields_without_label)
res_df.apply(chk_contributor_existance, axis=1)
def test_predict_explain_binary_prob(self):
self.test_predict_explain(binary_problem = True)
def test_kfolds_eval(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
res_df = ac.eval(input_df=__labeled_binary_inp_df, schema=__schema_with_label,
mode = "K_FOLDS", nfolds=3)
else:
res_df = ac.eval(input_df=__labeled_inp_df, schema=__schema_with_label,
mode = "K_FOLDS", nfolds=3)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(res_df.dtypes[0], "object")
if binary_problem:
self.assertEqual(len(res_df.columns), max(1 + len(self.binary_labels), 5))
else:
self.assertEqual(len(res_df.columns), max(1 + len(self.labels), 5))
def test_kfolds_eval_binary_prob(self):
self.test_kfolds_eval(binary_problem = True)
def test_LOO_eval(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
res_df = ac.eval(input_df=__labeled_binary_inp_df, schema=__schema_with_label,
mode = "LEAVE_ONE_OUT", nfolds=3)
else:
res_df = ac.eval(input_df=__labeled_inp_df, schema=__schema_with_label,
mode = "LEAVE_ONE_OUT", nfolds=3)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), max(1 + len(self.labels), 5))
self.assertEqual(res_df.dtypes[0], "object")
def test_kfolds_eval_topN(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
res_df = ac.eval(input_df=__labeled_binary_inp_df, schema=__schema_with_label,
mode = "K_FOLDS", nfolds=3, topN=2)
else:
res_df = ac.eval(input_df=__labeled_inp_df, schema=__schema_with_label,
mode = "K_FOLDS", nfolds=3, topN=2)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), max(1 + len(self.labels), 5))
self.assertEqual(res_df.dtypes[0], "object")
def test_LOO_eval_table_format(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
res_df = ac.eval(input_df=__labeled_inp_df, schema=__schema_with_label,
mode = "LEAVE_ONE_OUT", nfolds=3)
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), max(1 + len(self.labels), 5))
self.assertEqual(res_df.dtypes[0], "object")
def test_eval_data(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
labels, true_lbls, pred_lbls, conf_mat, cls_report = ac.eval_data(input_df=__labeled_binary_inp_df, schema=__schema_with_label,
mode = "LEAVE_ONE_OUT", nfolds=3)
else:
labels, true_lbls, pred_lbls, conf_mat, cls_report = ac.eval_data(input_df=__labeled_inp_df, schema=__schema_with_label,
mode = "LEAVE_ONE_OUT", nfolds=3)
if binary_problem:
self.assertTrue(len(labels)==2)
else:
self.assertTrue(len(labels)==len(self.labels))
self.assertTrue(len(true_lbls)==self.num_recs)
self.assertTrue(len(true_lbls)==len(pred_lbls))
self.assertTrue(len(conf_mat)==len(labels))
self.assertTrue(len(conf_mat[0])==len(labels))
ext_labels = list(labels) + ['macro avg', 'weighted avg']
for lbl in ext_labels:
self.assertTrue(lbl in cls_report.keys())
self.assertTrue('precision' in cls_report[lbl])
self.assertTrue('recall' in cls_report[lbl])
self.assertTrue('f1-score' in cls_report[lbl])
self.assertTrue('support' in cls_report[lbl])
self.assertTrue('accuracy' in cls_report.keys())
def test_binary_eval_data(self):
self.test_eval_data(binary_problem=True)
def test_model_visualization(self, binary_problem = False):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__labeled_binary_inp_df = self.labeled_binary_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
if binary_problem:
ac.train(input_df=__labeled_binary_inp_df, schema=__schema_with_label)
else:
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
lr, fm, lm = ac.get_models()
ac = Classifier(model_configuration = testModelConfiguration)
ac.load_models(lr, fm, lm)
res_df = ac.model_visualization()
self.assertTrue(isinstance(res_df, pd.DataFrame))
self.assertEqual(len(res_df.columns), 3)
self.assertEqual(res_df.dtypes[-1], "float64")
self.assertEqual(res_df.dtypes[-2], "object")
self.assertEqual(res_df.dtypes[-3], "object")
def test_model_viz_binary_prob(self):
self.test_model_visualization(binary_problem=True)
def test_labels(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
labels = ac.labels()
diff1 = [elem for elem in labels if elem not in self.labels]
diff2 = [elem for elem in self.labels if elem not in labels]
self.assertTrue(len(diff1)==0)
self.assertTrue(len(diff2)==0)
self.assertTrue(len(labels)==len(self.labels))
def test_numclasses(self):
__labeled_inp_df = self.labeled_inp_df.copy(deep=True)
__schema_with_label = self.schema_with_label.copy()
ac = Classifier(model_configuration = testModelConfiguration)
ac.train(input_df=__labeled_inp_df, schema=__schema_with_label)
nclasses = ac.num_classes()
self.assertTrue(nclasses == len(self.labels))
if __name__ == "__main__":
testModelConfiguration = modelConfigurations[int(sys.argv[1])]
print("Testing ", testModelConfiguration['type'])
unittest.main(argv=[''])
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,127
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/classify.py
|
# coding: utf-8
import unittest
import sys
from app.logic.helpers import *
from app.test.setup import *
from app.logic.classify import classify
from app.logic.train import *
modelTypes = [Classifier.LR_MODEL_TYPE, Classifier.SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE, Classifier.ENSEMBLE_SVC_MODEL_TYPE]
testModelType = modelTypes[0]
class ClassifyTest(unittest.TestCase):
def test_classify(self):
labeled_dataset = random_labeled_dataset()
lbl_dataset_id = labeled_dataset['id']
candidate = defaultCandidate(labeled_dataset)[0]
candidate['config']['type'] = testModelType
candidate['config']['C'] = 10
candidate['config']['max_iter'] = 2
model_sel_params = defaultModelSelection()
task = {
'data': labeled_dataset,
'candidate': candidate,
'modelSelectionParams': model_sel_params
}
model = train(training_task=task)
test_set = {
'id': id(),
'features': labeled_dataset['data']['features']
}
batch_classification_res = classify(model=model, data=test_set)
self.assertTrue(isinstance(batch_classification_res, dict))
self.assertIn('classSummaries', batch_classification_res)
self.assertTrue(isinstance(batch_classification_res['classSummaries'], list))
self.assertTrue(isinstance(batch_classification_res['classSummaries'][0], dict))
self.assertIn('label', batch_classification_res['classSummaries'][0])
self.assertIn('numInstances', batch_classification_res['classSummaries'][0])
self.assertIn('probabilities', batch_classification_res['classSummaries'][0])
self.assertIn('entropies', batch_classification_res['classSummaries'][0])
self.assertIn('results', batch_classification_res['classSummaries'][0])
if __name__ == '__main__':
testModelType = modelTypes[int(sys.argv[1])]
print("Testing ", testModelType)
unittest.main(argv=[''])
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,128
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/logic/model_selection.py
|
import sys
import numpy as np
import pandas as pd
import datetime
import logging
from pathlib import Path
import pickle
import os
from app.logic.train import *
from app.logic.helpers import *
from app.settings import *
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=LOG_LEVEL)
cachedMSR = {}
def model_selection(models, model_sel):
degree_of_freedom = np.array([model['degreeOfFreedom'] for model in models])
if model_sel['metric']== 'RECALL':
metrics = np.array([model['performance']['avgRecall'] for model in models])
elif model_sel['metric'] == 'PRECISION':
metrics = np.array([model['performance']['avgPrecision'] for model in models])
elif model_sel['metric'] == 'F1':
metrics = np.array([model['performance']['avgF1'] for model in models])
if model_sel['method']== 'BEST':
selected_midx = np.argmax(metrics)
elif model_sel['method']== 'KNEE_POINT':
selected_midx = knee_point(metrics, degree_of_freedom)
elif model_sel['method']== 'ONE_STDEV':
selected_midx = one_stdev(metrics, degree_of_freedom)
elif model_sel['method']== 'TWO_STDEV':
selected_midx = two_stdev(metrics, degree_of_freedom)
else:
selected_midx = 0
selected_model = models[selected_midx]
#type ModelSelectionResults
res = {
'id': id(),
'modelSelection': model_sel,
'learnedModels': models,
'selectedModel': selected_model
}
return res
def knee_point(metrics, degree_of_freedom):
num_models = len(metrics)
if num_models == 1:
opt_split_idx = 0
else:
metrics_with_dof = zip(metrics, degree_of_freedom, range(num_models))
sorted_metrics_by_dof = sorted(metrics_with_dof, key = lambda metric_dof_idx: -metric_dof_idx[1])
err = np.zeros(num_models - 1, dtype=np.float)
for split_idx in range(num_models - 1):
left_ = np.array([m for (m, _, _) in sorted_metrics_by_dof[:split_idx+1]])
right_ = np.array([m for (m, _, _) in sorted_metrics_by_dof[split_idx+1:]])
err1 = 0 if len(left_) < 2 else sum(abs(left_ - np.average(left_)))
err2 = 0 if len(right_) < 2 else sum(abs(right_ - np.average(right_)))
err[split_idx] = err1 + err2
opt_split_idx = np.argmin(err)
return opt_split_idx
def one_stdev(metrics, degree_of_freedom):
num_models = len(metrics)
metrics_with_dof = zip(metrics, degree_of_freedom, range(num_models))
avg = np.average(metrics)
std = np.std(metrics)
lower_bound = avg - std
upper_bound = avg + std
eligible = [ mm for mm in metrics_with_dof if mm[0] >= lower_bound and mm[0] <= upper_bound ]
lowest_dof_idx = np.argmin([ mm[1] for mm in eligible ])
opt_idx = eligible[lowest_dof_idx][2]
return opt_idx
def two_stdev(metrics, degree_of_freedom):
num_models = len(metrics)
metrics_with_dof = zip(metrics, degree_of_freedom, range(num_models))
avg = np.average(metrics)
std = np.std(metrics)
lower_bound = avg - 2*std
upper_bound = avg + 2*std
eligible = [ mm for mm in metrics_with_dof if mm[0] >= lower_bound and mm[0] <= upper_bound ]
lowest_dof_idx = np.argmin([ mm[1] for mm in eligible ])
opt_idx = eligible[lowest_dof_idx][2]
return opt_idx
def train_batch(candidates, training_data, model_selection_params, model_id):
startedTime = datetime.datetime.now()
global cachedMSR
training_tasks = createTrainingTasks(candidates, training_data, model_selection_params)
trained_models = list(map(train, training_tasks))
msr = model_selection(trained_models, model_selection_params)
msr['id'] = model_id
cachedMSR[model_id] = msr
save_training_results(model_id)
seconds = (datetime.datetime.now() - startedTime).total_seconds()
print('Trained ' + str(len(training_tasks)) + ' models in ' + str(seconds//60) + ' minutes ' + str(seconds%60) + ' seconds.')
print('Model ' + str(model_id) + ' cached.')
return msr
def train_from_local_data(candidates, schema, training_data_file_name, model_selection_params, model_id):
training_data = loadLocalData(schema, training_data_file_name)
if training_data is not None:
return train_batch(candidates, training_data, model_selection_params, model_id)
else:
print("Can't read data from file " + training_data_file_name)
return None
def loadLocalData(schema, file_name):
dataFile = Path(CLASSIFICATION_DATA_DIR + "/" + file_name)
if dataFile.is_file():
df = pd.read_csv(dataFile)
for feat in schema['data']['features'] + [schema['label']]:
col = feat['feature']['name']
cType = feat['feature']['type']
prop_name = 'numerical' if cType in ['NUMERICAL'] else 'set' if cType in ['SET'] else 'text'
if prop_name == 'numerical':
feat['data'] = [{ 'id': id(), prop_name: float(val) } for val in df[col].values]
else:
feat['data'] = [{ 'id': id(), prop_name: str(val) } for val in df[col].values]
return schema
else:
return None
def delete_training_results(model_id):
assert(model_id in cachedMSR), 'Model ID ' + str(model_id) + ' not found.'
cachedMSR.pop(model_id)
remove_model(model_id)
return model_id
def get_training_results(model_id):
global cachedMSR
assert(model_id in cachedMSR), 'Training results with given ID not found.'
return cachedMSR[model_id]
def save_training_results(model_id):
global cachedMSR
assert(model_id in cachedMSR), 'Training results with given ID not found.'
path = Path(CLASSIFICATION_MODEL_DIR)
path.mkdir(parents=True, exist_ok=True)
output = open(CLASSIFICATION_MODEL_DIR + "/" + model_id + ".pkl", 'wb')
pickle.dump({model_id: cachedMSR[model_id]}, output)
output.close()
print("Model " + str(model_id) + " saved.")
def load_models():
global cachedMSR
path = Path(CLASSIFICATION_MODEL_DIR)
path.mkdir(parents=True, exist_ok=True)
fileNames = os.listdir(CLASSIFICATION_MODEL_DIR)
print("Loading models from " + CLASSIFICATION_MODEL_DIR)
for fname in fileNames:
modelDataFile = Path(CLASSIFICATION_MODEL_DIR + "/" + fname)
if modelDataFile.is_file():
datafile = open(CLASSIFICATION_MODEL_DIR + "/" + fname, 'rb')
modelData = pickle.load(datafile)
cachedMSR.update(modelData)
datafile.close()
print("Model " + fname + " loaded.")
print(str(len(cachedMSR)) + " models loaded from " + CLASSIFICATION_MODEL_DIR)
return cachedMSR
def remove_model(model_id):
filename = CLASSIFICATION_MODEL_DIR + "/" + model_id + ".pkl"
modelData = Path(filename)
if modelData.is_file():
os.remove(filename)
print("Model " + str(model_id) + " deleted.")
else:
print("Model " + str(model_id) + " not found.")
def all_training_results():
return list(cachedMSR.values())
cachedMSR = load_models()
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,129
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/logic/train.py
|
import json
import numpy as np
#import pprint
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
from app.core.main.tokenizer.PorterTokenizer import PorterTokenizer
from app.core.main.tokenizer.LemmaTokenizer import LemmaTokenizer
from app.core.main.featurizer.Doc2Vector import Doc2Vector
from app.core.main.Classifier import Classifier
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import *
from app.core.main.classifier.LR import LR
from app.core.main.classifier.LSVC import LSVC
from app.core.main.classifier.Ensemble import Ensemble
from app.core.main.featurizer.Featurizer import Featurizer
from app.logic.helpers import *
def evaluate(training_task):
labeled_data = training_task['data']
candidate = training_task['candidate']
model_sel = training_task['modelSelectionParams']
input_df = datasetToDataframe(labeled_data)
features = candidate["features"]
featurizers = candidate["featurizers"]
config = candidate["config"]
ac = create_classifier(config)
filtered_df = input_df.filter([f['name'] for f in features])
labels, _, _, conf_mat, cls_report = ac.eval_data(input_df=filtered_df, schema=featurizers,
mode = model_sel['evalMode'], nfolds=model_sel['numFolds'])
class_performances = []
for (lbl_idx, lbl) in enumerate(labels):
cls_buckets = []
conf_mat_data = conf_mat[lbl_idx]
for (idx, nval) in enumerate(conf_mat_data):
cls_buckets.append({'id': id(), 'trueLabel': lbl, 'predictedLabel': labels[idx], 'numInstances': int(nval), 'weight': 1 })
perf = {
'id': id(),
'label': lbl,
'weight': 1,
'numInstances': int(cls_report[lbl]['support']),
'classifiedAs': cls_buckets,
'recall': float(cls_report[lbl]['recall']),
'precision': float(cls_report[lbl]['precision']),
'f1': float(cls_report[lbl]['f1-score']),
}
class_performances.append(perf)
#type ModelPerformance
pres = {
'id': id(),
'classPerformances': class_performances,
'numInstances': int(cls_report['weighted avg']['support']),
'avgRecall': float(cls_report['weighted avg']['recall']),
'avgPrecision': float(cls_report['weighted avg']['precision']),
'avgF1': float(cls_report['weighted avg']['f1-score']),
}
return pres
def train(training_task):
#debug
print("train (train.py) starts ...")
labeled_data = training_task['data']
candidate = training_task['candidate']
input_df = datasetToDataframe(labeled_data)
features = candidate["features"]
featurizers = candidate["featurizers"]
config = candidate["config"]
filtered_df = input_df.filter([f['name'] for f in features])
ac = create_classifier(config)
ac.train(input_df=filtered_df, schema=featurizers)
mainModel, featurizer, labelEncoder = ac.get_models()
featurizer_models, _, featurizer_end_offset, _ = featurizer.get_params()
weights = mainModel.get_all_weights()
num_weights = mainModel.num_weights()
labels = list(ac.labels())
intercepts = mainModel.get_intercepts()
assert (labels is not None)
assert (labels[0] is not None)
class_weights = []
for cls_idx, lbl in enumerate(labels):
feature_weights = []
for fidx in range(len(features)-1):
feat = features[fidx]
_start_widx = featurizer_end_offset[fidx - 1] if fidx > 0 else 0
_end_widx = featurizer_end_offset[fidx]
feature_weights.append({'id': id(), 'feature': feat, 'weights': list(weights[cls_idx][_start_widx: _end_widx]) })
class_weights.append({'id': id(), 'class': lbl, 'weights': feature_weights, 'intercept': float(intercepts[cls_idx])})
featurizers = []
for fidx, fmodel in enumerate(featurizer_models):
if fmodel is None:
featurizers.append(
{
'id': id(),
'noop': { 'id': id() }
}
)
elif isinstance(fmodel, MinMaxScaler):
featurizers.append(
{
'id': id(),
'min_max_scaler': {
'id': id(),
'minValue': float(fmodel.min_[0]),
'maxValue': None,
'scale': float(fmodel.scale_[0]),
'dataMin': float(fmodel.data_min_[0]),
'dataMax': float(fmodel.data_max_[0])
}
}
)
elif isinstance(fmodel, LabelBinarizer):
featurizers.append(
{
'id': id(),
'label_binarizer': { 'id': id(),
'labels': list(fmodel.classes_) }
}
)
elif isinstance(fmodel, TfidfVectorizer):
term_feature_map = []
for (term, fidx) in fmodel.vocabulary_.items():
term_feature_map.append({'id': id(), 'term': term, 'featureIdx': int(fidx)})
featurizers.append(
{
'id': id(),
'tfidf_vectorizer': {
'id': id(),
'vocab': term_feature_map,
'idf': [float(idf_val) for idf_val in fmodel.idf_],
'stopwords': list(fmodel.stop_words_)
}
}
)
elif isinstance(fmodel, MultiLabelBinarizer):
featurizers.append(
{
'id': id(),
'multilabel_binarizer': { 'id': id(),
'labels': list(fmodel.classes_) }
}
)
elif isinstance(fmodel, LabelEncoder):
assert (fmodel.classes_ is not None)
assert (fmodel.classes_[0] is not None)
featurizers.append(
{
'id': id(),
'label_encoder': { 'id': id(),
'labels': list(fmodel.classes_) }
}
)
elif isinstance(fmodel, Doc2Vector):
featurizers.append(
{
'id': id(),
'doc_to_vector': {
'id': id(),
'modelFile': fmodel.model_file_,
'maxNumWords': int(fmodel.max_num_words_)
}
}
)
perf = evaluate(training_task)
model_data = mainModel.get_params()
rmodl = {
'id': id(),
'type': config["type"],
'candidate': candidate,
'labels': labels,
'learnedWeights': class_weights,
'learnedFeaturizers': featurizers,
'labelEncoder':{ 'id': id(), 'labels': labels },
'degreeOfFreedom': num_weights,
'performance': perf,
'json': json.dumps(model_data)
}
#debug
print("train (train.py) finished ...")
#type Model
return rmodl
def loadTrainedModel(model):
#debug
print("loadTrainedModel starts ...")
config = model["candidate"]["config"]
featurizers = model["learnedFeaturizers"]
labelEncoder = model["labelEncoder"]
learnedWeights = model["learnedWeights"]
labels = model["labels"]
modelType = model["type"]
if modelType in [Classifier.LR_MODEL_TYPE, Classifier.SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE, Classifier.ENSEMBLE_SVC_MODEL_TYPE]:
#extract labels, coefs, intercepts, feature names and types from learned weights
num_classes = len(labels)
num_features = len(learnedWeights[0]["weights"])
if num_classes > 2:
num_submodels = num_classes
else:
num_submodels = 1
featureNames = []
for clsidx in range(num_submodels):
for featidx in range(num_features):
if clsidx == 0:
featureNames.append(learnedWeights[clsidx]["weights"][featidx]["feature"]["name"])
if modelType == Classifier.LR_MODEL_TYPE:
#initialize LR model
clsModel = LR(penalty=config["penalty"].lower(), dual=config["primal_dual"]=="DUAL", solver=config["solver"].lower(),
multi_class=config["multiclass"].lower(), class_weight=config["weighting"].lower(),
fit_intercept=config["fitIntercept"])
#clsModel.set_params(classes=labels, coef=coefficients, intercept=intercepts)
model_data = json.loads(model['json'])
clsModel.set_params(**model_data)
elif modelType == Classifier.SVC_MODEL_TYPE:
clsModel = LSVC(class_weighting=config["weighting"].lower(), C=config["C"], max_iter=config["max_iter"])
model_data = json.loads(model['json'])
clsModel.set_params(**model_data)
elif modelType == Classifier.ENSEMBLE_LR_MODEL_TYPE:
baseModel = LR(penalty=config["penalty"].lower(), dual=config["primal_dual"]=="DUAL", solver=config["solver"].lower(),
multi_class=config["multiclass"].lower(), class_weight=config["weighting"].lower(),
fit_intercept=config["fitIntercept"])
model_data = json.loads(model['json'])
clsModel = Ensemble(baseModel, model_data['group_index'])
clsModel.set_params(**model_data)
elif modelType == Classifier.ENSEMBLE_SVC_MODEL_TYPE:
baseModel = LSVC(class_weighting=config["weighting"].lower(), C=config["C"], max_iter=config["max_iter"])
model_data = json.loads(model['json'])
clsModel = Ensemble(baseModel, model_data['group_index'])
clsModel.set_params(**model_data)
stop_words = ENGLISH_STOP_WORDS if config["stopwords"] == "ENGLISH" else []
tokenizer = BaseTokenizer() if config["tokenizer"] == "WORD_TOKENIZER" \
else PorterTokenizer() if config["tokenizer"] == "STEMMER" \
else LemmaTokenizer() if config["tokenizer"] == "LEMMATIZER" \
else None
ngram_range = (1, 1) if config["ngrams"] == "UNIGRAM" \
else (2, 2) if config["ngrams"] == "BIGRAM" \
else (1, 2) if config["ngrams"] == "BOTH" \
else None
#initialize featurizers
featurizer_models = []
featurizer_data = []
featurizer_offsets = []
feat_offset = 0
featureTypes = []
for f_rizer in featurizers:
if 'noop' in f_rizer and f_rizer["noop"] is not None:
m = None
feat_offset += 1
featurizer_data.append(None)
featureTypes.append("NOOP")
elif 'min_max_scaler' in f_rizer and f_rizer["min_max_scaler"] is not None:
m = MinMaxScaler()
m.min_ = np.ndarray((1,), dtype=np.float)
m.min_[0] = f_rizer["min_max_scaler"]["minValue"]
m.scale_ = np.ndarray((1,), dtype=np.float)
m.scale_[0] = f_rizer["min_max_scaler"]["scale"]
m.data_min_ = np.ndarray((1,), dtype=np.float)
m.data_min_[0] = f_rizer["min_max_scaler"]["dataMin"]
m.data_max_ = np.ndarray((1,), dtype=np.float)
m.data_max_[0] = f_rizer["min_max_scaler"]["dataMax"]
feat_offset += 1
featurizer_data.append(None)
featureTypes.append("MIN_MAX_SCALER")
elif 'label_binarizer' in f_rizer and f_rizer["label_binarizer"] is not None:
m = LabelBinarizer()
m.classes_ = np.array(f_rizer["label_binarizer"]["labels"])
feat_offset += len(m.classes_)
featurizer_data.append(None)
featureTypes.append("LABEL_BINARIZER")
elif 'tfidf_vectorizer' in f_rizer and f_rizer["tfidf_vectorizer"] is not None:
m = TfidfVectorizer(input='content', max_df=config["max_df"], min_df=config["min_df"],
stop_words=stop_words,
decode_error='ignore',
sublinear_tf=config["tf"] == "SUBLINEAR",
smooth_idf=config["df"] == "SMOOTH",
ngram_range = ngram_range, tokenizer = tokenizer)
m.vocabulary_ = dict()
for fmap in f_rizer["tfidf_vectorizer"]["vocab"]:
m.vocabulary_.update({fmap["term"]: fmap["featureIdx"]})
m.idf_ = f_rizer["tfidf_vectorizer"]["idf"]
m.stop_words_ = f_rizer["tfidf_vectorizer"]["stopwords"]
feat_offset += len(m.vocabulary_)
featurizer_data.append(dict((fidx, w) for (w, fidx) in m.vocabulary_.items()))
featureTypes.append("TFIDF_VECTORIZER")
elif 'multilabel_binarizer' in f_rizer and f_rizer["multilabel_binarizer"] is not None:
m = MultiLabelBinarizer()
#must re-fit
m.fit([f_rizer["multilabel_binarizer"]["labels"]])
feat_offset += len(m.classes_)
featurizer_data.append(m.classes_)
featureTypes.append("MULTILABEL_BINARIZER")
elif 'label_encoder' in f_rizer and f_rizer["label_encoder"] is not None:
m = LabelEncoder()
m.classes_ = np.array(f_rizer["label_encoder"]["labels"])
feat_offset += 1
featurizer_data.append(None)
featureTypes.append("LABEL_ENCODER")
elif 'doc_to_vector' in f_rizer and f_rizer["doc_to_vector"] is not None:
m = Doc2Vector(model_file=f_rizer["doc_to_vector"]["modelFile"],
max_num_words=f_rizer["doc_to_vector"]["maxNumWords"])
m.fit()
feat_offset += m.vector_size()
featurizer_data.append(None)
featureTypes.append("TEXT_TO_VECTOR")
else:
m = None
featureTypes.append("NOOP")
featurizer_models.append(m)
featurizer_offsets.append(feat_offset)
featurizer = Featurizer(featureNames, featureTypes,
max_df=config["max_df"], min_df=config["min_df"], stop_words=stop_words,
sublinear_tf=config["tf"] == "SUBLINEAR", smooth_idf=config["df"] == "SMOOTH",
ngram_range=ngram_range, tokenizer=tokenizer)
featurizer.set_params(models=featurizer_models, model_data=featurizer_data, featurizer_offsets=featurizer_offsets,
tokenizer=tokenizer)
#initialize label encoder model
m = LabelEncoder()
m.classes_ = np.array(labelEncoder["labels"])
labelEncoder = Featurizer(["Label"], ["LABEL"],
max_df=config["max_df"], min_df=config["min_df"], stop_words=stop_words,
sublinear_tf=config["tf"] == "SUBLINEAR", smooth_idf=config["df"] == "SMOOTH",
ngram_range=ngram_range, tokenizer=tokenizer)
labelEncoder.set_params(models=[m], model_data=[None], featurizer_offsets=[1],
tokenizer=tokenizer)
else:
clsModel = featurizer = labelEncoder = None
#debug
print("loadTrainedModel finished ...")
return clsModel, featurizer, labelEncoder
def loadModelSelectionResults(json_obj):
return json.loads(json_obj['json'])
def modelSelectionResultsToObject(objId, msr):
return {
'id': objId,
'json': json.dumps(msr)
}
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,130
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/evaluator/ModelEvaluator.py
|
'''
Leave one out model evaluator
'''
import numpy as np
import pandas as pd
from sklearn.model_selection import LeaveOneOut, StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
class ModelEvaluator(object):
ME_LeaveOneOut = "LEAVE_ONE_OUT"
ME_KFoldXVal = "K_FOLDS"
supported_modes = [ME_LeaveOneOut, ME_KFoldXVal]
def __init__(self, lr, fm, lm, feature_data, label_data, topN = 1):
self.__lr = lr
self.__fm = fm
self.__lm = lm
self.__X = self.__fm.fit_transform(feature_data)
self.__Y = self.__lm.fit_transform(label_data)
self.__topN = topN
def eval(self, mode, nfolds):
labels, _, _, conf_mat, cls_report = self.eval_data(mode, nfolds)
cls_report_text = ("%s" % cls_report).split("\n")
nCols = max(5, len(labels) + 1)
table_data = []
table_data.append(["."] + [""] * (nCols - 1))
table_data.append(["Confusion Matrix".upper()] + [""] * (nCols - 1))
table_data.append(["."] + [""] * (nCols - 1))
table_data.append(["Actual\\Predicted"] + list(labels) + [""] * (nCols - len(labels) - 1))
for (row_no, row) in enumerate(conf_mat):
table_data.append([labels[row_no]] + [str(val) for val in row] + [""] * (nCols - len(labels) - 1))
table_data.append(["."] + [""] * (nCols - 1))
table_data.append(["Classification Report".upper()] + [""] * (nCols - 1))
table_data.append(["."] + [""] * (nCols - 1))
for (lin_no, txt_line) in enumerate(cls_report_text):
if lin_no == 0:
table_data.append(["Class", "Precision", "Recall", "F1-score", "Support"] + [""] * (nCols - 5))
else:
lin_dat = txt_line.split()
if len(lin_dat) < 1:
table_data.append([""] * nCols)
else:
table_data.append([' '.join(lin_dat[:-4])] + lin_dat[-4:] + [""] * (nCols - 5))
return pd.DataFrame(table_data, columns=list(map(lambda n: "column " + str(n+1), range(nCols))))
def eval_data(self, mode, nfolds, output_dict = False):
assert mode in ModelEvaluator.supported_modes, "Invalid splitting mode %s. Supported modes are %s" % \
(mode, ModelEvaluator.supported_modes)
if mode == ModelEvaluator.ME_KFoldXVal:
assert nfolds > 1 and nfolds <= len(self.__Y), "Invalid num-folds %d" % nfolds
spliter = None
if mode == ModelEvaluator.ME_LeaveOneOut:
spliter = LeaveOneOut()
elif mode == ModelEvaluator.ME_KFoldXVal:
spliter = StratifiedKFold(n_splits=nfolds, shuffle=True)
pred_classes = np.zeros(len(self.__Y))
for train_idx, test_idx in spliter.split(self.__X, self.__Y):
X_train = self.__X[train_idx]
X_test = self.__X[test_idx]
Y_train = self.__Y[train_idx]
Y_test = self.__Y[test_idx]
self.__lr.fit(X_train, Y_train)
if self.__topN > 1:
list_of_probs = self.__lr.predict_proba(X_test)
list_of_prob_with_index = list(map(lambda probs: zip(probs, range(len(probs))), list_of_probs))
list_of_sorted_prob_with_index = list(map(
lambda prob_with_index: sorted(prob_with_index, key = lambda prob_idx: -prob_idx[0]),
list_of_prob_with_index))
topN_preds = list(map(lambda sorted_prob_with_index: [idx for (prob, idx) in sorted_prob_with_index[:self.__topN]],
list_of_sorted_prob_with_index))
topN_preds_and_true_lbl = zip(topN_preds, Y_test)
pred_classes[test_idx] = list(map(lambda list_preds__true_lbl:
list_preds__true_lbl[1] if list_preds__true_lbl[1] in list_preds__true_lbl[0]
else list_preds__true_lbl[0][0], topN_preds_and_true_lbl))
else:
pred_classes[test_idx] = self.__lr.predict(X_test)
labels = self.__lm.inverse_transform(range(max(self.__Y)+1))
lbl_true = self.__lm.inverse_transform(self.__Y)
lbl_pred = self.__lm.inverse_transform(pred_classes.astype(int))
conf_mat = confusion_matrix(lbl_true, lbl_pred, labels)
cls_report = classification_report(lbl_true, lbl_pred, target_names=labels, output_dict=output_dict)
return labels, lbl_true, lbl_pred, conf_mat, cls_report
def __str__(self):
return 'Model evaluator.'
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,131
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/featurizer/Doc2Vector.py
|
import os
import numpy as np
from numpy.linalg import norm
from app.settings import W2VEC_MODEL_DIR
class Doc2Vector(object):
#
# Pre-trained word2vec models of various languages with Creative Commons Attribution-Share-Alike License 3.0:
# https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md
# https://fasttext.cc/docs/en/crawl-vectors.html
#
__W2V_file = W2VEC_MODEL_DIR + "/wiki-news-300d-100K.vec"
__MAX_NUM_WORDS = 20000
def __init__(self, model_file = None, max_num_words = -1):
self.model_file_ = model_file if model_file is not None else self.__W2V_file
self.max_num_words_ = max_num_words if max_num_words is not None else self.__MAX_NUM_WORDS
assert os.access(self.model_file_, os.R_OK), "Failed to read from w2v model file %s" % self.model_file_
self.__vectors = {}
self.__vector_size = 0
self.__vocab_size = 0
#Load pre-trained model, no fitting
def fit(self, data=None):
if data is not None:
inp_words = set('\n'.join(data).split())
else:
inp_words = None
#fasttext format
word_count = 0
headerline = True
with open(Doc2Vector.__W2V_file, 'rt') as txtFile:
for line in txtFile:
if headerline:
headerline = False
self.__vector_size = int(line.split()[1])
else:
tokens = line.split()
if inp_words is None or tokens[0] in inp_words:
self.__vectors.update({tokens[0]: np.array([float(val) for val in tokens[1:]])})
word_count += 1
if word_count >= self.max_num_words_ and self.max_num_words_ >= 0:
break
txtFile.close()
self.__vocab_size = word_count
#avg vector of word vectors
def transform(self, data):
feat_vectors = []
for text in data:
words = text.split()
doc_vec = np.zeros(self.__vector_size, dtype=float)
for w in words:
if w in self.__vectors:
doc_vec += self.__vectors[w]
if np.count_nonzero(doc_vec) > 0:
doc_vec = doc_vec / norm(doc_vec)
feat_vectors.append(doc_vec)
return feat_vectors
def vector_size(self):
return self.__vector_size
def vocab_size(self):
return self.__vocab_size
def __str__(self):
return '''
Pre-trained word embedding model: %s
Vector size: %d
Vocabulary size: %d
''' % (Doc2Vector.__W2V_file, self.vector_size(), self.vocab_size())
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,132
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/settings.py
|
import os
from dotenv import load_dotenv
import logging
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
load_dotenv(verbose=True, dotenv_path=os.path.join(PROJECT_ROOT, '.env'))
LOG_LEVEL = logging.DEBUG
NLTK_DATA_DIR = os.getenv('NLTK_DATA_DIR')
W2VEC_MODEL_DIR = os.getenv('W2VEC_MODEL_DIR')
CLASSIFICATION_DATA_DIR = os.getenv('CLASSIFICATION_DATA_DIR')
CLASSIFICATION_MODEL_DIR = os.getenv('CLASSIFICATION_MODEL_DIR')
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,133
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/train.py
|
import unittest
import sys
from app.logic.helpers import *
from app.test.setup import *
from app.logic.train import evaluate, train, loadTrainedModel
from app.core.main.Classifier import Classifier
modelTypes = [Classifier.LR_MODEL_TYPE, Classifier.SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE, Classifier.ENSEMBLE_SVC_MODEL_TYPE]
testModelType = modelTypes[0]
class TrainingTest(unittest.TestCase):
def test_train(self):
labeled_dataset = random_labeled_dataset()
candidate = defaultCandidate(labeled_dataset)[0]
candidate['config']['type'] = testModelType
candidate['config']['C'] = 10
candidate['config']['max_iter'] = 2
model_sel_params = defaultModelSelection()
task = {
'data': labeled_dataset,
'candidate': candidate,
'modelSelectionParams': model_sel_params
}
model = train(training_task=task)
self.assertIn('type', model)
self.assertIn('candidate', model)
self.assertIn('labels', model)
self.assertIn('learnedWeights', model)
self.assertIn('learnedFeaturizers', model)
self.assertIn('labelEncoder', model)
self.assertIn('degreeOfFreedom', model)
self.assertIn('performance', model)
def test_evaluate(self):
labeled_dataset = random_labeled_dataset()
candidate = defaultCandidate(labeled_dataset)[0]
model_sel_params = defaultModelSelection()
task = {
'data': labeled_dataset,
'candidate': candidate,
'modelSelectionParams': model_sel_params
}
model_performance = evaluate(task)
self.assertIn('classPerformances', model_performance)
self.assertIn('numInstances', model_performance)
self.assertIn('avgRecall', model_performance)
self.assertIn('avgPrecision', model_performance)
self.assertIn('avgF1', model_performance)
if __name__ == "__main__":
testModelType = modelTypes[int(sys.argv[1])]
print("Testing ", testModelType)
unittest.main(argv=[''])
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,134
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/tokenizer/PorterTokenizer.py
|
'''
Porter Tokenizer
'''
from nltk.stem.porter import PorterStemmer
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
class PorterTokenizer(object):
def __init__(self):
self.__wnl = PorterStemmer()
self.__basetokenizer = BaseTokenizer()
def __call__(self, doc):
return self.tokenize(doc)
def tokenize(self, doc):
return [self.__wnl.stem(t) for t in self.__basetokenizer.tokenize(doc)]
def __str__(self):
return '''
Porter tokenizer based on
%s
''' % self.__basetokenizer
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,135
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/feature_selection.py
|
import unittest
import sys
from app.test.setup import *
from app.logic.feature_selection import *
modelTypes = [Classifier.LR_MODEL_TYPE, Classifier.SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE, Classifier.ENSEMBLE_SVC_MODEL_TYPE]
testModelType = modelTypes[0]
class FeatureSelectionTest(unittest.TestCase):
def test_topN_correlation(self):
labeled_dataset = random_labeled_dataset()
num_top_features = rd.randint(10, 100)
num_top_features = min(num_top_features, len(labeled_dataset['data']['features']))
config = defaultModelConfiguration()
config['type'] = testModelType
config['C'] = 10.
config['max_iter'] = 2
#incl label
ranked_features = top_correlated_features(labeled_dataset, config, topN=num_top_features)
self.assertLessEqual(len(ranked_features), num_top_features + 1)
self.assertIn('id', ranked_features[0])
self.assertIn('index', ranked_features[0])
self.assertIn('name', ranked_features[0])
self.assertIn('type', ranked_features[0])
def test_topN_pct_correlation(self):
labeled_dataset = random_labeled_dataset()
pct_top_features = rd.random()
config = defaultModelConfiguration()
config['type'] = testModelType
config['C'] = 10.
config['max_iter'] = 2
ranked_features = top_pct_correlated_features(labeled_dataset, config, pct=pct_top_features)
self.assertIn('id', ranked_features[0])
self.assertIn('index', ranked_features[0])
self.assertIn('name', ranked_features[0])
self.assertIn('type', ranked_features[0])
def test_topN_backward(self):
if testModelType in [Classifier.ENSEMBLE_SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE]:
return
labeled_dataset = random_labeled_dataset()
num_top_features = rd.randint(10, 100)
config = defaultModelConfiguration()
config['type'] = testModelType
config['C'] = 10.
config['max_iter'] = 2
ranked_features = top_rfe_features(labeled_dataset, config, topN=num_top_features)
self.assertLessEqual(len(ranked_features), num_top_features+1) #label is always included
self.assertIn('id', ranked_features[0])
self.assertIn('index', ranked_features[0])
self.assertIn('name', ranked_features[0])
self.assertIn('type', ranked_features[0])
def test_topN_pct_backward(self):
if testModelType in [Classifier.ENSEMBLE_SVC_MODEL_TYPE, Classifier.ENSEMBLE_LR_MODEL_TYPE]:
return
labeled_dataset = random_labeled_dataset()
pct_top_features = rd.random()
config = defaultModelConfiguration()
config['type'] = testModelType
config['C'] = 10.
config['max_iter'] = 2
ranked_features = top_pct_rfe_features(labeled_dataset, config, pct=pct_top_features)
self.assertIn('id', ranked_features[0])
self.assertIn('index', ranked_features[0])
self.assertIn('name', ranked_features[0])
self.assertIn('type', ranked_features[0])
if __name__ == '__main__':
testModelType = modelTypes[int(sys.argv[1])]
print("Testing ", testModelType)
unittest.main(argv=[''])
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,136
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/tokenizer/__init__.py
|
import nltk
from app.settings import NLTK_DATA_DIR
nltk.data.path = [NLTK_DATA_DIR]
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,137
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/Classifier.py
|
'''
Main process for Assisted Categorization
'''
import pandas as pd
import numpy as np
from scipy.special import entr
from app.core.main.classifier.LR import LR
from app.core.main.classifier.LSVC import LSVC
from app.core.main.classifier.Ensemble import Ensemble
from app.core.main.featurizer.Featurizer import Featurizer
from app.core.main.feature_selection.LabelCorrelation import LabelCorrelation
from app.core.main.feature_selection.BackwardStepwise import BackwardStepwise
from app.core.main.evaluator.ModelEvaluator import ModelEvaluator
from sklearn.feature_extraction import stop_words
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
defaultModelConfiguration = {
"type": "LOGISTIC_REGRESSION", #or LINEAR_SVC
"class_weight": "balanced",
"tokenizer": BaseTokenizer(),
"ngram_range": (1, 1),
"sublinear_tf": True,
"smooth_idf": True,
"penalty": "l2",
"multi_class": "ovr",
"solver": "liblinear",
"dual": True,
"fit_intercept": True,
'max_df': 1.,
'min_df': 0.,
'stopwords': stop_words.ENGLISH_STOP_WORDS,
'C': 1.,
'max_iter': 1000,
}
class Classifier(object):
#supported model types
LR_MODEL_TYPE = "LOGISTIC_REGRESSION"
SVC_MODEL_TYPE = "LINEAR_SVC"
ENSEMBLE_SVC_MODEL_TYPE = "ENSEMBLE_LINEAR_SVC"
ENSEMBLE_LR_MODEL_TYPE = "ENSEMBLE_LOGISTIC_REGRESSION"
#supported feature selection mode
CC_fs_correlation = "CORRELATION"
CC_fs_backward = "RFE"
supported_feature_selection_modes = [CC_fs_backward, CC_fs_correlation]
def __init__(self, model_configuration = defaultModelConfiguration):
self.__model_configuration = model_configuration
self.__model = None
self.__featurizer = None
self.__labeler = None
def train(self, input_df, schema):
#debug
print("train starts ...")
assert isinstance(input_df, pd.DataFrame)
field_names, labelFieldName, field_types, train_data, labelData = self.__read_training_data(input_df, schema)
if self.__model_configuration["type"] == Classifier.LR_MODEL_TYPE:
m = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
elif self.__model_configuration["type"] == Classifier.SVC_MODEL_TYPE:
m = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_SVC_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_LR_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
else:
m = None
fm = Featurizer(field_names, field_types,
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
lm = Featurizer([labelFieldName], [Featurizer.FT_Label],
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
#debug
print("train: feature transforming ...")
features = fm.fit_transform(train_data)
print("train: label transforming ...")
labels = lm.fit_transform(labelData)
print("train: model fitting ...")
m.fit(features, labels)
self.__model = m
self.__featurizer = fm
self.__labeler = lm
#debug
print("train finished ...")
def get_models(self):
return (self.__model, self.__featurizer, self.__labeler)
def load_models(self, m, fm, lm):
assert isinstance(m, LR) or isinstance(m, LSVC) or isinstance(m, Ensemble), "Expect a LR model or SVM model or Ensemble model."
assert isinstance(fm, Featurizer), "Expect a Featurizer model."
assert isinstance(lm, Featurizer), "Expect a Featurizer model."
self.__model = m
self.__featurizer = fm
self.__labeler = lm
def predict_proba(self, input_df, multilabel_pred = False):
#debug
print("predict_proba starts ...")
assert isinstance(self.__model, LR) or isinstance(self.__model, LSVC) or isinstance(self.__model, Ensemble)
assert isinstance(self.__featurizer, Featurizer)
assert isinstance(self.__labeler, Featurizer)
assert isinstance(input_df, pd.DataFrame)
field_names = list(input_df.columns)
assert len(field_names) == len(self.__featurizer.get_schema()), "Input data has different schema than input schema."
predictors = self.__featurizer.transform(input_df)
probs = self.__model.predict_proba(predictors)
if multilabel_pred:
list_lbl_indices = list(map(self.__multi_labels__, probs))
assert type(list_lbl_indices) == list
assert type(list_lbl_indices[0]) == list
list_labels = list(map(self.__labeler.inverse_transform, list_lbl_indices))
predicted_labels = list(map(lambda lbls: ','.join(lbls), list_labels))
else:
predicted_labels = self.__labeler.inverse_transform(list(map(np.argmax, probs)))
class_names = self.__labeler.inverse_transform(range(len(probs[0])))
probs_to_str = list(map(lambda ps: list(map(lambda fval: "%.2f" % fval, ps)), probs))
probs_with_name = list(map(lambda ps: zip(class_names, ps), probs_to_str))
cls_probs = list(map(lambda pn: ','.join(list(map(lambda n_p: n_p[0] + ':' + n_p[1], pn))), probs_with_name))
labels = pd.DataFrame(predicted_labels, columns=["PredictedLabel"])
probs_df = pd.DataFrame(cls_probs, columns=["Probabilities"])
entropies_df = pd.DataFrame(list(map(lambda fval: round(fval, 2), entr(probs).sum(axis=1))), columns=["Entropy"])
#debug
print("predict_proba finished ...")
return pd.concat([input_df, labels, probs_df, entropies_df], axis=1)
def predict_explain(self, input_df, multilabel_pred = False, topN_features = 10):
assert isinstance(self.__model, LR) or isinstance(self.__model, LSVC) or isinstance(self.__model, Ensemble)
assert isinstance(self.__featurizer, Featurizer)
assert isinstance(self.__labeler, Featurizer)
assert isinstance(input_df, pd.DataFrame)
field_names = list(input_df.columns)
# #debug
# if len(field_names) != len(self.__featurizer.get_schema()):
# print('Input data fields:', len(field_names), sorted(field_names))
# print('Input schema:', len(self.__featurizer.get_schema()), self.__featurizer.get_schema())
assert len(field_names) == len(self.__featurizer.get_schema()), "Input data has different schema than input schema."
predictors = self.__featurizer.transform(input_df)
probs = self.__model.predict_proba(predictors)
pred_classes = list(map(np.argmax, probs))
if multilabel_pred:
list_lbl_indices = list(map(self.__multi_labels__, probs))
assert type(list_lbl_indices) == list
assert type(list_lbl_indices[0]) == list
list_labels = list(map(self.__labeler.inverse_transform, list_lbl_indices))
predicted_labels = list(map(lambda lbls: ','.join(lbls), list_labels))
else:
predicted_labels = self.__labeler.inverse_transform(pred_classes)
class_names = self.__labeler.inverse_transform(range(len(probs[0])))
probs_to_str = list(map(lambda ps: list(map(lambda fval: "%.2f" % fval, ps)), probs))
probs_with_name = list(map(lambda ps: zip(class_names, ps), probs_to_str))
cls_probs = list(map(lambda pn: ','.join(list(map(lambda n_p: n_p[0] + ':' + n_p[1], pn))), probs_with_name))
labels = pd.DataFrame(predicted_labels, columns=["PredictedLabel"])
probs_df = pd.DataFrame(cls_probs, columns=["Probabilities"])
entropies_df = pd.DataFrame(list(map(lambda fval: round(fval, 2), entr(probs).sum(axis=1))), columns=["Entropy"])
weights = self.__model.get_all_weights()
feat_names = self.__featurizer.get_all_features()
raw_contributors = list(map(lambda didx_clsno: zip(feat_names, weights[didx_clsno[1]].tolist(),
np.asarray(predictors)[didx_clsno[0]]),
enumerate(pred_classes)))
eligible_contributors = list(map(lambda contrib: [(feat, w * inp_s) for (feat, w, inp_s) in contrib if w * inp_s != 0.0 ],
raw_contributors))
top_contributors = list(map(lambda feats: ';'.join([fname + '=' + str(round(w, 2)) \
for (fname, w) in sorted(feats, key = lambda n_w: abs(n_w[1]), reverse=True)[:topN_features] ]),
eligible_contributors))
contributors = pd.DataFrame(top_contributors, columns=["TopContributors"])
return pd.concat([input_df, labels, probs_df, entropies_df, contributors], axis=1)
def learn(self, input_df):
prob_df = self.predict_proba(input_df)
sorted_prob_df = prob_df.sort_values(by = ["Entropy"], ascending = False)
return sorted_prob_df
def input_qlty(self, input_df, threshold1 = 0.3, threshold2 = 0.5, topN = 10):
assert isinstance(self.__model, LR) or isinstance(self.__model, LSVC) or isinstance(self.__model, Ensemble)
assert isinstance(self.__featurizer, Featurizer)
assert isinstance(self.__labeler, Featurizer)
assert isinstance(input_df, pd.DataFrame)
assert threshold1 <= threshold2, "Entropy threshold values are invalid: %f > %f ." % (threshold1, threshold2)
assert topN > 0, "Number of top contributors %d is invalid." % topN
weights = self.__model.get_all_weights()
feat_names = self.__featurizer.get_all_features()
assert topN <= len(feat_names), "Number of top contributors %d cannot exceed number of features %d" % (topN, len(feat_names))
field_names = list(input_df.columns)
feat_field_names = [fld for (fidx, fld) in enumerate(list(field_names))]
assert len(feat_field_names) == len(self.__featurizer.get_schema()), "Input data has different schema than input schema."
X = self.__featurizer.transform(input_df)
zero_feats = list(map(lambda xrow: xrow == 0.0, np.array(X)))
probs = self.__model.predict_proba(X)
top2Indices = np.argsort(probs, axis=1)[np.ix_(range(probs.shape[0]), range(probs.shape[1])[-2:])]
entropies = entr(probs).sum(axis=1)
input_qlty_df = pd.DataFrame(list(map(lambda e: "Good" if e <= threshold1 \
else "OK" if e <= threshold2 else "Bad", entropies)), columns=["InputQlty"])
#classes features & weights
feat_weights = list(map(lambda cls1_cls2: zip(feat_names, weights[cls1_cls2[0]], weights[cls1_cls2[1]],
range(len(feat_names))), top2Indices))
contributors = list(map(lambda fweights: list(map( lambda feat_coeff1_coeff2_fidx: \
(feat_coeff1_coeff2_fidx[0], abs(feat_coeff1_coeff2_fidx[1] - feat_coeff1_coeff2_fidx[2]), feat_coeff1_coeff2_fidx[3]),
fweights)), feat_weights))
not_existed_contributors = list(map(lambda fweights_zero_fs: [(feat, coeff) for (feat, coeff, fidx) in fweights_zero_fs[0] \
if fweights_zero_fs[1][fidx]], zip(contributors, zero_feats)))
top_contributors = list(map(lambda fweights: sorted(fweights,
key = lambda feat_coeff: -feat_coeff[1])[:topN], not_existed_contributors))
top_contributors_str = list(map(lambda fweights: ','.join([feat for (feat, w) in fweights]), top_contributors))
top_contributors_df = pd.DataFrame(top_contributors_str, columns=["SuggestedFeatures"])
return pd.concat([input_df, input_qlty_df, top_contributors_df], axis=1)
def eval(self, input_df, schema, mode, nfolds, topN = 1):
field_names, label_field_name, field_types, train_data, label_data = self.__read_training_data(input_df, schema)
if self.__model_configuration["type"] == Classifier.LR_MODEL_TYPE:
m = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
elif self.__model_configuration["type"] == Classifier.SVC_MODEL_TYPE:
m = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_SVC_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_LR_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
else:
m = None
fm = Featurizer(field_names, field_types,
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
lm = Featurizer([label_field_name], [Featurizer.FT_Label],
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
evaluator = ModelEvaluator(m, fm, lm, train_data, label_data, topN)
eval_res = evaluator.eval(mode, nfolds)
return eval_res
def eval_data(self, input_df, schema, mode, nfolds, topN = 1):
field_names, label_field_name, field_types, train_data, label_data = self.__read_training_data(input_df, schema)
if self.__model_configuration["type"] == Classifier.LR_MODEL_TYPE:
m = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
elif self.__model_configuration["type"] == Classifier.SVC_MODEL_TYPE:
m = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_SVC_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_LR_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
else:
m = None
fm = Featurizer(field_names, field_types,
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
lm = Featurizer([label_field_name], [Featurizer.FT_Label],
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
evaluator = ModelEvaluator(m, fm, lm, train_data, label_data, topN)
return evaluator.eval_data(mode, nfolds, output_dict=True)
def feature_ranking(self, input_df, schema, mode):
field_names, labelFieldName, field_types, train_data, labelData = self.__read_training_data(input_df, schema)
assert mode in Classifier.supported_feature_selection_modes, \
"Invalid feature selection mode %s. Supported modes are %s" % \
(mode, Classifier.supported_feature_selection_modes)
fm = Featurizer(field_names, field_types,
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
lm = Featurizer([labelFieldName], [Featurizer.FT_Label],
max_df=self.__model_configuration["max_df"],
min_df=self.__model_configuration["min_df"],
stop_words=self.__model_configuration["stopwords"],
sublinear_tf=self.__model_configuration["sublinear_tf"],
smooth_idf=self.__model_configuration["smooth_idf"],
ngram_range=self.__model_configuration["ngram_range"],
tokenizer=self.__model_configuration["tokenizer"]
)
X = fm.fit_transform(train_data)
Y = lm.fit_transform(labelData)
if self.__model_configuration["type"] == Classifier.LR_MODEL_TYPE:
m = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
elif self.__model_configuration["type"] == Classifier.SVC_MODEL_TYPE:
m = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_SVC_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LSVC(class_weighting=self.__model_configuration["class_weight"], C=self.__model_configuration["C"],
max_iter=self.__model_configuration["max_iter"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
elif self.__model_configuration["type"] == Classifier.ENSEMBLE_LR_MODEL_TYPE:
field_names = list(input_df.columns)
assert 'TRAIN_GROUP' == field_names[0]
assert schema[0] in ['NOOP', 'MIN_MAX_SCALER', 'LABEL_ENCODER', 'NUMERICAL', 'BOOLEAN']
base_model = LR(penalty=self.__model_configuration["penalty"], dual=self.__model_configuration["dual"],
solver=self.__model_configuration["solver"], multi_class=self.__model_configuration["multi_class"],
class_weight=self.__model_configuration["class_weight"], fit_intercept=self.__model_configuration["fit_intercept"])
m = Ensemble(base_model, group_index=field_names.index('TRAIN_GROUP'))
else:
m = None
fs = LabelCorrelation(n = 1.0) if mode == Classifier.CC_fs_correlation \
else BackwardStepwise(n = 1, step = 1, estimator=m) #(n = 1, step = max(X.shape[1]/300, 1))
feature_scores = fs.score_features(X, Y)
feature_scores_rounded = list(map(lambda val: round(val, 2), feature_scores))
feature_names = fm.get_all_features()
features_df = pd.DataFrame(zip(feature_names, feature_scores_rounded),
columns=["Feature", "Score"])
sorted_features_df = features_df.sort_values(by = ["Score"], ascending = False)
return sorted_features_df
def model_visualization(self, topN = 10):
assert topN > 0, "Number of top contributors %d is invalid." % topN
assert isinstance(self.__model, LR) or isinstance(self.__model, LSVC) or isinstance(self.__model, Ensemble)
assert isinstance(self.__featurizer, Featurizer)
assert isinstance(self.__labeler, Featurizer)
feat_names = self.__featurizer.get_all_features()
assert topN <= len(feat_names), "Number of top contributors %d cannot exceed number of features %d" \
% (topN, len(feat_names))
weights = self.__model.get_all_weights()
feature_weights = list(map(lambda ws: zip(feat_names, ws), weights))
top_weights = list(map(lambda class_weights: sorted(class_weights, key = lambda fname_w: -abs(fname_w[1]))[:topN], feature_weights))
num_classes = self.num_classes()
labels = self.__labeler.inverse_transform(range(num_classes))
flatten_lbl_weights = [(lbl, fname, w) for (lbl, f_weights) in zip(labels, top_weights)
for (fname, w) in f_weights]
res_df = pd.DataFrame(flatten_lbl_weights, columns=["Class", "Feature", "Weight"])
return res_df
def labels(self):
return self.__labeler.inverse_transform(self.__model.labels())
def num_classes(self):
return self.__model.num_classes()
def __read_training_data(self, input_df, schema):
assert isinstance(input_df, pd.DataFrame)
field_names = list(input_df.columns)
assert len(field_names) == len(schema), "Input data has different schema than input schema, e.g. length of %d vs %d" \
% (len(field_names), len(schema))
#ignore data frame field types, and make a copy
field_types = [fld for fld in schema]
assert len([type for type in field_types if type == Featurizer.FT_Label]) == 1, \
"There must be exactly one field with %s type in training data.(%s)" % (Featurizer.FT_Label, field_types)
label_field_no = field_types.index(Featurizer.FT_Label)
label_field_name = field_names[label_field_no]
label_data = input_df.filter(items=[input_df.columns[label_field_no]])
del input_df[input_df.columns[label_field_no]]
field_names.remove(field_names[label_field_no])
field_types.remove(field_types[label_field_no])
return (field_names, label_field_name, field_types, input_df, label_data)
def __multi_labels__(self, probabilities):
prob_with_index = zip(probabilities, range(len(probabilities)))
sorted_prob_with_index = sorted(prob_with_index, key = lambda prob_idx: -prob_idx[0])
err = np.zeros(len(probabilities), dtype=np.float)
for split_idx in range(len(probabilities)):
left_ = np.array([p for (p, _) in sorted_prob_with_index[:split_idx]])
right_ = np.array([p for (p, _) in sorted_prob_with_index[split_idx:]])
err1 = 0 if len(left_) < 2 else sum(abs(left_ - np.average(left_)))
err2 = 0 if len(right_) < 2 else sum(abs(right_ - np.average(right_)))
err[split_idx] = err1 + err2
opt_split_idx = max(np.argmin(err), 1)
res = [cls_idx for (_, cls_idx) in sorted_prob_with_index[:opt_split_idx]]
assert len(res) > 0
return res
def __str__(self):
return 'Classification Service.'
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,138
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/logic/feature_selection.py
|
import pandas as pd
import math
from app.logic.helpers import defaultFeatures, datasetToDataframe, defaultFeaturizers, defaultModelConfiguration
from app.core.main.Classifier import Classifier
from app.core.main.tokenizer.BaseTokenizer import BaseTokenizer
from app.core.main.tokenizer.PorterTokenizer import PorterTokenizer
from app.core.main.tokenizer.LemmaTokenizer import LemmaTokenizer
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
def top_correlated_features(labeled_dataset, config, topN = None):
labeled_inp_df = datasetToDataframe(labeled_dataset)
features = defaultFeatures(dataset=labeled_dataset)
featurizers = defaultFeaturizers(features)
stop_words = ENGLISH_STOP_WORDS if config["stopwords"] == "ENGLISH" else []
tokenizer = BaseTokenizer() if config["tokenizer"] == "WORD_TOKENIZER" \
else PorterTokenizer() if config["tokenizer"] == "STEMMER" \
else LemmaTokenizer() if config["tokenizer"] == "LEMMATIZER" \
else None
ngram_range = (1, 1) if config["ngrams"] == "UNIGRAM" \
else (2, 2) if config["ngrams"] == "BIGRAM" \
else (1, 2) if config["ngrams"] == "BOTH" \
else None
ac = Classifier(model_configuration={
"type": config['type'],
"class_weight": config['weighting'].lower(),
"tokenizer": tokenizer,
"ngram_range": ngram_range,
"sublinear_tf": config['tf']=="SUBLINEAR",
"smooth_idf": config['df']=="SMOOTH",
"penalty": config['penalty'].lower(),
"multi_class": config['multiclass'].lower(),
"solver": config['solver'].lower(),
"dual": config['primal_dual']=="DUAL",
"fit_intercept": config['fitIntercept'],
'max_df': config['max_df'],
'min_df': config['min_df'],
'stopwords': stop_words,
'C': config['C'],
'max_iter': config['max_iter']
})
res_df = ac.feature_ranking(input_df=labeled_inp_df, schema=featurizers, mode=Classifier.CC_fs_correlation)
feature_names = pd.Series(map(lambda fname: fname.split('::')[0], res_df['Feature']))
feature_scores = pd.concat([feature_names, res_df['Score']], axis=1)
feature_scores.columns = ['Feature', 'Score']
feature_sum_scores = feature_scores.groupby('Feature').sum()
sorted_features = feature_sum_scores.sort_values(by = ["Score"], ascending = False)
selected_feature_names = list(sorted_features.index)[:topN]
selected_features = []
for fname in selected_feature_names:
selected_features += [feat for feat in features if feat['name'] == fname]
return selected_features + [features[-1]]
def top_pct_correlated_features(labeled_dataset, config, pct = 1.):
features_and_label = top_correlated_features(labeled_dataset, config)
features = features_and_label[:-1]
num_features = math.ceil(len(features) * pct)
return features[:num_features] + [features_and_label[-1]]
def top_rfe_features(labeled_dataset, config, topN = None):
labeled_inp_df = datasetToDataframe(labeled_dataset)
features = defaultFeatures(dataset=labeled_dataset)
featurizers = defaultFeaturizers(features)
stop_words = ENGLISH_STOP_WORDS if config["stopwords"] == "ENGLISH" else []
tokenizer = BaseTokenizer() if config["tokenizer"] == "WORD_TOKENIZER" \
else PorterTokenizer() if config["tokenizer"] == "STEMMER" \
else LemmaTokenizer() if config["tokenizer"] == "LEMMATIZER" \
else None
ngram_range = (1, 1) if config["ngrams"] == "UNIGRAM" \
else (2, 2) if config["ngrams"] == "BIGRAM" \
else (1, 2) if config["ngrams"] == "BOTH" \
else None
ac = Classifier(model_configuration={
"type": config['type'],
"class_weight": config['weighting'].lower(),
"tokenizer": tokenizer,
"ngram_range": ngram_range,
"sublinear_tf": config['tf']=="SUBLINEAR",
"smooth_idf": config['df']=="SMOOTH",
"penalty": config['penalty'].lower(),
"multi_class": config['multiclass'].lower(),
"solver": config['solver'].lower(),
"dual": config['primal_dual']=="DUAL",
"fit_intercept": config['fitIntercept'],
'max_df': config['max_df'],
'min_df': config['min_df'],
'stopwords': stop_words,
'C': config['C'],
'max_iter': config['max_iter']
})
res_df = ac.feature_ranking(input_df=labeled_inp_df, schema=featurizers, mode=Classifier.CC_fs_backward)
feature_names = pd.Series(map(lambda fname: fname.split('::')[0], res_df['Feature']))
feature_scores = pd.concat([feature_names, res_df['Score']], axis=1)
feature_scores.columns = ['Feature', 'Score']
feature_sum_scores = feature_scores.groupby('Feature').sum()
sorted_features = feature_sum_scores.sort_values(by = ["Score"], ascending = False)
selected_feature_names = list(sorted_features.index)[:topN]
selected_features = []
for fname in selected_feature_names:
selected_features += [feat for feat in features if feat['name'] == fname]
return selected_features+ [features[-1]]
def top_pct_rfe_features(labeled_dataset, config, pct = 1.):
features_and_label = top_rfe_features(labeled_dataset, config)
features = features_and_label[:-1]
num_features = math.ceil(len(features) * pct)
return features[:num_features] + [features_and_label[-1]]
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,139
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/helpers.py
|
# coding: utf-8
import unittest
import pandas as pd
from pandas.util.testing import assert_frame_equal
from app.logic.helpers import (
dataframeToDataset,
datasetToDataframe,
getDataFieldName,
inferFeatureType,
id
)
class TestHelpers(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.features = [
{
"feature": {"index": 0, "name": "feature 0", "type": "TEXT"},
"data": [
{"text": "Hello" },
{"text": "Hello" }
]
},
{
"feature": {"index": 1, "name": "feature 1", "type": "NUMERICAL"},
"data": [
{"numerical": 1.2 },
{"numerical": 2.5 }
]
},
{
"feature": {"index": 2, "name": "feature 2", "type": "SET"},
"data": [
{"set": ["a", "b"]},
{"set": ["d", "e"]}
]
}
]
cls.ds = {
"features": cls.features
}
cls.labeled_ds = {
'id': id(),
"data": {
'id': id(),
"features": cls.features
},
"label": {
'id': id(),
"feature": {'id': id(), "index": 0, "name": "label feature", "type": "LABEL"},
"data": [
{'id': id(), "text": "value 1"},
{'id': id(), "text": "value 2"}
]
}
}
cls.df_dict = {
"feature 0": ["Hello", "Hello"],
"feature 1": [1.2, 2.5],
"feature 2": [
["a", "b"],
["d", "e"]
]
}
cls.labeled_df_dict = {
**cls.df_dict,
"label feature": [
"value 1",
"value 2"
],
}
def test_get_data_field_name(self):
self.assertEqual('numerical', getDataFieldName('NUMERICAL'))
self.assertEqual('text', getDataFieldName('CATEGORICAL'))
self.assertEqual('text', getDataFieldName('TEXT'))
self.assertEqual('set', getDataFieldName('SET'))
self.assertEqual('numerical', getDataFieldName('BOOLEAN'))
def test_infer_feature_type(self):
numerical_series_1 = pd.Series([1,2,3])
numerical_series_2 = pd.Series([0.57, 0.61])
boolean_series_1 = pd.Series(['true','true'])
boolean_series_2 = pd.Series(['false', 'false'])
set_series_1 = pd.Series([[1,2], [3,4]])
set_series_2 = pd.Series([(1,2), (3,4)])
set_series_3 = pd.Series([{1,2}, {3,4}])
text_series_1 = pd.Series(['Lorem ipsum dolor sit amet','consectetur adipiscing elit'])
text_series_2 = pd.Series(['set_field::m=0.73;set_field::t=-0.28','cat_field::Health Care=0.68;set_field::i=-0.3'])
self.assertEqual('NUMERICAL', inferFeatureType(numerical_series_1))
self.assertEqual('NUMERICAL', inferFeatureType(numerical_series_2))
self.assertEqual('BOOLEAN', inferFeatureType(boolean_series_1))
self.assertEqual('BOOLEAN', inferFeatureType(boolean_series_2))
self.assertEqual('SET', inferFeatureType(set_series_1))
self.assertEqual('SET', inferFeatureType(set_series_2))
self.assertEqual('SET', inferFeatureType(set_series_3))
self.assertEqual('TEXT', inferFeatureType(text_series_1))
self.assertEqual('TEXT', inferFeatureType(text_series_2))
def test_dataframe_to_dataset(self):
df = pd.DataFrame(self.df_dict)
result = dataframeToDataset(df)
expected = self.ds
#generated IDs can't be the same
result.pop('id')
for f in result['features']:
f.pop('id')
f['feature'].pop('id')
for ditem in f['data']:
ditem.pop('id')
self.assertEqual(expected, result)
def test_dataset_to_dataframe(self):
expected = pd.DataFrame(self.df_dict)
result = datasetToDataframe(self.ds)
assert_frame_equal(expected.reset_index(drop=True), result.reset_index(drop=True))
def test_labeled_dataset_to_dataframe(self):
expected = pd.DataFrame(self.labeled_df_dict)
result = datasetToDataframe(self.labeled_ds)
assert_frame_equal(expected.reset_index(drop=True), result.reset_index(drop=True))
if __name__ == '__main__':
unittest.main()
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,140
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/logic/classify.py
|
import pandas as pd
import datetime
from app.logic.helpers import datasetToDataframe, dataframeToDataset, id
from app.core.main.Classifier import Classifier
from app.logic.train import loadTrainedModel
from app.logic.model_selection import cachedMSR
def unpackProbs(prob):
res_dict = {}
all_labels_list = []
probList = prob.split(',')
for kv in probList:
k,v = kv.split(':')
v = float(v)
res_dict[k] = v
predictedLabel = {
'id': id(),
'label': k,
'probability': v
}
all_labels_list.append(predictedLabel)
return res_dict, all_labels_list
def unpackContribs(contrib):
res = []
if len(contrib) > 0:
contributors = contrib.split(';')
for contributor in contributors:
assert '=' in contributor, "bad contributor:" + '-->' + contributor + '<--' + ' in ' + '"' + contrib + '"'
feat, weight = contributor.split('=')
if '::' in feat:
field_name, field_value = feat.split('::')
else:
field_name, field_value = feat, ''
res.append({
'id': id(),
'featureName': field_name,
'featureValue': field_value,
'weight': float(weight)
})
return res
def unpackSuggestedFeatures(suggestions):
res = []
if len(suggestions) > 0:
suggested_features = suggestions.split(',')
for feat in suggested_features:
if '::' in feat:
field_name, field_value = feat.split('::')
else:
field_name, field_value = feat, ''
res.append({
'id': id(),
'featureName': field_name,
'featureValue': field_value,
'weight': 1.
})
return res
def classify(cachedModelID, data):
startedTime = datetime.datetime.now()
assert(cachedModelID in cachedMSR), "Model not found."
model = cachedMSR[cachedModelID]['selectedModel']
emptyResults = {
'id': -1,
'classSummaries': []
}
#debug
print('Received a dataset with ', len(data['features']), ' features to classify.')
if (len(data['features']) ==0):
print('There is no feature, empty result set is returned.')
return emptyResults
print('Received a dataset with ', len(data['features'][0]['data']), ' rows to classify.')
if (len(data['features'][0]['data']) ==0):
print('There is no data, empty result set is returned.')
return emptyResults
candidate = model["candidate"]
features = candidate["features"]
config = candidate["config"]
unlabeled_df = datasetToDataframe(data)
filtered_input_df = unlabeled_df.filter([f['name'] for f in features])
lr, fm, lm = loadTrainedModel(model)
ac = Classifier(model_configuration=config)
ac.load_models(lr, fm, lm)
res_df = ac.predict_explain(input_df=filtered_input_df, topN_features=10)
reccom_df = ac.input_qlty(input_df=filtered_input_df, topN=10)
res_df = pd.concat([res_df, reccom_df.filter(["SuggestedFeatures"])], axis=1)
plCountSeries = res_df.groupby('PredictedLabel').PredictedLabel.count()
labels = list(plCountSeries.keys())
classSummaries = []
for label in labels:
filtered_res_df = res_df[res_df.PredictedLabel == label]
entropies = []
probabilities = []
results = []
for data_index, row in filtered_res_df.iterrows():
entropies.append(float(row.Entropy))
probsDict, allLabels = unpackProbs(row.Probabilities)
probabilities.append(float(probsDict[label]))
contributors = unpackContribs(row.TopContributors)
recommends = unpackSuggestedFeatures(row.SuggestedFeatures)
input_data = []
for feat in data['features']:
input_data.append({'id': id(), 'feature': feat['feature'], 'data': [feat['data'][data_index]]})
data_instance = {
'id': id(),
'dataset': { 'id': id(),
'features': input_data},
'index': data_index
}
classificationResult = {
'id': id(),
'allLabels': allLabels,
'entropy': float(row.Entropy),
'contributors': contributors,
'dataInstance': data_instance,
'predictedLabel': {
'id': id(),
'label': label,
'probability': float(probsDict[label])
},
'recommends': recommends
}
results.append(classificationResult)
classSumary = {
'id': id(),
'label': label,
'numInstances': int(plCountSeries[label]),
'probabilities': probabilities,
'entropies': entropies,
'results': results
}
classSummaries.append(classSumary)
batchClassificationResult = {
'id': id(),
"classSummaries": classSummaries
}
print('Classification time:' + str((datetime.datetime.now() - startedTime).total_seconds()) + ' seconds ')
return batchClassificationResult
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,141
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/feature_selection/LabelCorrelation.py
|
'''
Rank features by its correlation with label
scikit learn methods:
SelectKBest
SelectPercentile
SelectFdr
SelectFpr
SelectFwe
GenericUnivariateSelect
'''
from sklearn.feature_selection import GenericUnivariateSelect
from sklearn.feature_selection import chi2
import numpy as np
class LabelCorrelation(object):
def __init__(self, n):
assert type(n) is int and n > 0 or \
type(n) is float and n > 0 and n <= 1, "Invalid parameter value %s (number of features or percentile)" % n
self.__n = n
self.__model = None
if type(n) is int and n > 0:
self.__model = GenericUnivariateSelect(chi2, mode='k_best', param = self.__n)
elif type(n) is float and n > 0 and n <= 1:
self.__model = GenericUnivariateSelect(chi2, mode='percentile', param = self.__n)
def score_features(self, X, Y):
someNegative = np.any(X < 0.0)
assert not someNegative, "Chisquare correlation requires non-negative feature values."
self.__model.fit(X, Y)
return self.__model.scores_
def select_features(self, X):
self.__model.transform(X)
def __str__(self):
return '''
Correlation feature selection using chi2 (either k-best or percentile):
Top features selected: %s
''' % self.__n
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,142
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/setup.py
|
import random as rd
from app.logic.helpers import id
words = '''
The distribution of oil and gas reserves among the world's 50 largest oil companies. The reserves of the privately
owned companies are grouped together. The oil produced by the "supermajor" companies accounts for less than 15% of
the total world supply. Over 80% of the world's reserves of oil and natural gas are controlled by national oil companies.
Of the world's 20 largest oil companies, 15 are state-owned oil companies.
The petroleum industry, also known as the oil industry or the oil patch, includes the global processes of exploration,
extraction, refining, transporting (often by oil tankers and pipelines), and marketing of petroleum products.
The largest volume products of the industry are fuel oil and gasoline (petrol). Petroleum (oil) is also the raw material
for many chemical products, including pharmaceuticals, solvents, fertilizers, pesticides, synthetic fragrances, and plastics.
The industry is usually divided into three major components: upstream, midstream and downstream. Midstream operations are
often included in the downstream category.
Petroleum is vital to many industries, and is of importance to the maintenance of industrial civilization in its
current configuration, and thus is a critical concern for many nations. Oil accounts for a large percentage of the
world’s energy consumption, ranging from a low of 32% for Europe and Asia, to a high of 53% for the Middle East.
Governments such as the United States government provide a heavy public subsidy to petroleum companies, with major
tax breaks at virtually every stage of oil exploration and extraction, including the costs of oil field leases and
drilling equipment.[2]
Principle is a term defined current-day by Merriam-Webster[5] as: "a comprehensive and fundamental law, doctrine,
or assumption", "a primary source", "the laws or facts of nature underlying the working of an artificial device",
"an ingredient (such as a chemical) that exhibits or imparts a characteristic quality".[6]
Process is a term defined current-day by the United States Patent Laws (United States Code Title 34 - Patents)[7]
published by the United States Patent and Trade Office (USPTO)[8] as follows: "The term 'process' means process,
art, or method, and includes a new use of a known process, machine, manufacture, composition of matter, or material."[9]
Application of Science is a term defined current-day by the United States' National Academies of Sciences, Engineering,
and Medicine[12] as: "...any use of scientific knowledge for a specific purpose, whether to do more science; to design
a product, process, or medical treatment; to develop a new technology; or to predict the impacts of human actions."[13]
The simplest form of technology is the development and use of basic tools. The prehistoric discovery of how to control
fire and the later Neolithic Revolution increased the available sources of food, and the invention of the wheel
helped humans to travel in and control their environment. Developments in historic times, including the printing
press, the telephone, and the Internet, have lessened physical barriers to communication and allowed humans to
interact freely on a global scale.
Technology has many effects. It has helped develop more advanced economies (including today's global economy)
and has allowed the rise of a leisure class. Many technological processes produce unwanted by-products known as
pollution and deplete natural resources to the detriment of Earth's environment. Innovations have always influenced
the values of a society and raised new questions of the ethics of technology. Examples include the rise of the
notion of efficiency in terms of human productivity, and the challenges of bioethics.
Philosophical debates have arisen over the use of technology, with disagreements over whether technology improves
the human condition or worsens it. Neo-Luddism, anarcho-primitivism, and similar reactionary movements criticize
the pervasiveness of technology, arguing that it harms the environment and alienates people; proponents of ideologies
such as transhumanism and techno-progressivism view continued technological progress as beneficial to society and
the human condition.
Health care or healthcare is the maintenance or improvement of health via the prevention, diagnosis, and treatment
of disease, illness, injury, and other physical and mental impairments in human beings. Healthcare is delivered by
health professionals (providers or practitioners) in allied health fields. Physicians and physician associates are
a part of these health professionals. Dentistry, midwifery, nursing, medicine, optometry, audiology, pharmacy,
psychology, occupational therapy, physical therapy and other health professions are all part of healthcare. It
includes work done in providing primary care, secondary care, and tertiary care, as well as in public health.
Access to health care may vary across countries, communities, and individuals, largely influenced by social and
economic conditions as well as the health policies in place. Countries and jurisdictions have different policies
and plans in relation to the personal and population-based health care goals within their societies. Healthcare
systems are organizations established to meet the health needs of targeted populations. Their exact configuration
varies between national and subnational entities. In some countries and jurisdictions, health care planning is
distributed among market participants, whereas in others, planning occurs more centrally among governments or
other coordinating bodies. In all cases, according to the World Health Organization (WHO), a well-functioning
healthcare system requires a robust financing mechanism; a well-trained and adequately paid workforce; reliable
information on which to base decisions and policies; and well maintained health facilities and logistics to deliver
quality medicines and technologies.[1]
Health care is conventionally regarded as an important determinant in promoting the general physical and mental
health and well-being of people around the world. An example of this was the worldwide eradication of smallpox
in 1980, declared by the WHO as the first disease in human history to be completely eliminated by deliberate health
care interventions.[4]
'''.split()
def random_feature():
feature_types = ['NUMERICAL', 'CATEGORICAL', 'TEXT', 'SET', 'BOOLEAN']
fIndex = rd.randint(0, 100)
fName = 'feature' + str(fIndex) + id()[:8]
fTypeIdx = rd.randint(0, len(feature_types)-1)
fType = feature_types[fTypeIdx]
return {
'id': id(),
'index': fIndex,
'name': fName,
'type': fType
}
def random_train_group_feature():
fIndex = rd.randint(0, 100)
fName = 'TRAIN_GROUP'
fType = 'NUMERICAL'
return {
'id': id(),
'index': fIndex,
'name': fName,
'type': fType
}
def random_dataentry(ftype):
if ftype in ["CATEGORICAL"]:
num_categories = rd.randint(6, 22)
dval = "string " + str(rd.randint(1, num_categories))
return {'id': id(), 'text': dval}
elif ftype in ["TEXT"]:
random_words = []
for _ in range(rd.randint(50, 300)):
random_words.append(words[rd.randint(0, len(words)-1)])
dval = " ".join(random_words)
return {'id': id(), 'text': dval}
elif ftype in ["NUMERICAL"]:
dval = rd.randint(0, 100)*1.0/100
return {'id': id(), 'numerical': dval}
elif ftype in ["BOOLEAN"]:
dval = rd.randint(0, 1)
return {'id': id(), 'numerical': dval}
else: #SET
num_categories = rd.randint(3, 10)
dval = []
for _ in range(rd.randint(1, 3)):
dval.append("string " + str(rd.randint(1, num_categories)))
return {'id': id(), 'set': dval}
def random_labeldata(num_classes):
dval = "class " + str(rd.randint(1, num_classes))
return {'id': id(), 'text': dval}
def dataentry_value(data, ftype):
if ftype in ["CATEGORICAL", "TEXT"]:
return data['text']
elif ftype in ["NUMERICAL"]:
return data['numerical']
elif ftype in ["BOOLEAN"]:
return data['numerical']
else: #SET
return data['set']
def random_dataset():
num_features = rd.randint(10, 20)
num_entries = rd.randint(100, 200)
feature_data = []
feature = random_train_group_feature()
fvalues = []
for _ in range(num_entries):
fvalues.append({'id': id(), 'numerical': rd.randint(1,5)})
feature_data.append({
'id': id(),
'feature': feature,
'data': fvalues
})
for _ in range(num_features):
feature = random_feature()
fvalues = []
for _ in range(num_entries):
fvalues.append(random_dataentry(feature['type']))
feature_data.append({
'id': id(),
'feature': feature,
'data': fvalues
})
return {'id': id(), 'features': feature_data}
def random_labeled_dataset(num_classes=rd.randint(6, 8)):
dataset = random_dataset()
num_features = len(dataset['features'])
label = {
'id': id(),
'index': num_features,
'name': "Label",
'type': "LABEL"
}
data = []
num_entries = len(dataset['features'][0]['data'])
for _ in range(num_entries):
data.append(random_labeldata(num_classes=num_classes))
feature_data = {'id': id(), 'feature': label, 'data': data}
#Must be given unique ID.
return {'id': id(),
'data': dataset,
'label': feature_data}
def random_class_weights(num_features, labels):
class_weights = []
for label in labels:
intercept = rd.randint(0, 100)*1.0/100
feature_weights = []
for _ in range(num_features):
num_weights = rd.randint(1, 10)
feature_weights.append({'id': id(), 'feature': random_feature(), 'weights': [rd.randint(0, 100)*1.0/100] * num_weights })
class_weights.append({'id': id(), 'weights': feature_weights, 'class': label, 'intercept': intercept})
return class_weights
def random_noop():
return {'id': id()}
def random_min_max_scaler():
return {
'id': id(),
'minValue': rd.random(),
'maxValue': rd.randint(1, 10) + rd.random(),
'scale': rd.random(),
'dataMin': rd.random(),
'dataMax': rd.randint(1, 10) + rd.random()
}
def random_label_binarizer(num_classes):
return {
'id': id(),
'labels': [ "Label " + str(n+1) for n in range(num_classes)]
}
def random_multilabel_binarizer(num_classes):
return {
'id': id(),
'labels': [ "Label " + str(n+1) for n in range(num_classes)]
}
def random_label_encoder(num_classes):
return {
'id': id(),
'labels': [ "Label " + str(n+1) for n in range(num_classes)]
}
def random_tfidf_vectorizer():
num_terms = rd.randint(100, 600)
term_feature_mapping = []
idfs = []
for tidx in range(num_terms):
term = "term" + str(tidx)
fidx = 100 + tidx
term_feature_mapping.append({'id': id(), 'term': term, 'featureIdx': fidx})
idfs.append(rd.random())
return {
'id': id(),
'vocab': term_feature_mapping,
'idf': idfs,
'stopwords': ['the', 'this', 'a', 'an', 'those', 'these', 'at', 'on']
}
def random_doc_to_vector():
return {
'id': id(),
'modelFile': 'fullpathText2VecBinaryFileName',
'maxNumWords': rd.randint(1000, 10000)
}
def random_model_performance(num_classes):
class_performances = []
for lblidx in range(num_classes):
label = "Label " + str(lblidx+1)
buckets = []
total_num_instances = 0
for bucket_idx in range(num_classes):
num_instances = rd.randint(20, 80)
total_num_instances += num_instances
buckets.append({
'id': id(),
'trueLabel': label,
'predictedLabel': "Label " + str(bucket_idx+1),
'numInstances': num_instances,
'weight': rd.random(),
})
perf = {
'id': id(),
'label': label,
'weight': rd.random(),
'numInstances': total_num_instances,
'classifiedAs': buckets,
'recall': rd.random(),
'precision': rd.random(),
'f1': rd.random()
}
class_performances.append(perf)
return {
'id': id(),
'classPerformances': class_performances,
'numInstances': rd.randint(50, 100),
'avgRecall': rd.random(),
'avgPrecision': rd.random(),
'avgF1': rd.random()
}
def random_batch_classification_results():
dataset = random_dataset()
num_classes = rd.randint(2, 5)
probabilities = [1.0/num_classes] * num_classes
classes = ["Class " + str(idx+1) for idx in range(num_classes)]
allPredictedLabels = [{
'id': id(),
'label': lbl,
'probability': prob
} for (lbl, prob) in zip(classes, probabilities)]
class_summaries = []
for clsidx in range(num_classes):
num_instances = rd.randint(3, 10)
results = []
for instidx in range(num_instances):
data_idx = rd.randint(0, len(dataset['features'][0]['data'])-1)
input_data = []
for feat in dataset['features']:
input_data.append({'id': id(), 'feature': feat['feature'], 'data': [feat['data'][data_idx]]})
data_instance = {'id': id(), 'features': input_data}
results.append({
'id': id(),
'dataInstance': {'id': id(), 'dataset': data_instance, 'index': instidx},
'allLabels': allPredictedLabels,
'predictedLabel': allPredictedLabels[clsidx],
'entropy': rd.random(),
'contributors': [{'id': id(), 'featureName': 'topology', 'featureValue': 'topsides', 'weight': .68}],
'recommends': [{'id': id(), 'featureName': 'topology', 'featureValue': 'subsea', 'weight': .86}]
})
class_summaries.append({
'id': id(),
'label': classes[clsidx],
'numInstances': num_instances,
'probabilities': [1.0/num_classes] * num_instances,
'entropies': [rd.random()] * num_instances,
'results': results
})
return {
'id': id(),
'classSummaries': class_summaries
}
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,143
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/feature_selection/BackwardStepwise.py
|
'''
Backward stepwise feature selection
'''
from sklearn.feature_selection import RFE
class BackwardStepwise(object):
def __init__(self, n, estimator, step = 100):
assert type(n) is int and n > 0, "Invalid parameter type or value %s (number of features)" % n
assert type(step) is int and step > 0 , "Invalid parameter type or value %s (step)" % n
self.__estimator = estimator
self.__n = n
self.__step = step
self.__model = RFE(self.__estimator, n_features_to_select = self.__n, step = self.__step)
def score_features(self, X, Y):
self.__model.fit(X, Y)
return self.__model.ranking_
def select_features(self, X):
return self.__model.transform(X)
def __str__(self):
return '''
Backward stepwise feature selection:
Top features selected: %s
Step size: %s
Estimator: %s
''' % (self.__n, self.__step, self.__estimator)
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,144
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/test/model_selection.py
|
import unittest
from app.logic.helpers import *
from app.test.setup import *
from app.logic.model_selection import model_selection
from app.logic.train import train
modelTypes = [Classifier.LR_MODEL_TYPE, Classifier.SVC_MODEL_TYPE]
class ModelSelectionTest(unittest.TestCase):
def test_model_selection(self):
labeled_dataset = random_labeled_dataset()
model_sel_params = defaultModelSelection()
trained_models = []
for _ in range(rd.randint(3, 8)):
candidate = defaultCandidate(labeled_dataset)[0]
candidate['config']['type'] = modelTypes[rd.randint(0, len(modelTypes)-1)]
candidate['config']['C'] = int(rd.random()*100)
candidate['config']['max_iter'] = 2
if rd.random() > 0.5:
#remove a random feature (must not the label)
rand_fidx = rd.randint(0, len(candidate['features']) - 2)
candidate['features'] = candidate['features'][:rand_fidx] + candidate['features'][rand_fidx+1:]
candidate['featurizers'] = candidate['featurizers'][:rand_fidx] + candidate['featurizers'][rand_fidx+1:]
#randomize configuration
candidate['config']['penalty'] = "L" + str(rd.randint(1,2))
candidate['config']['min_df'] = min(rd.random(), .20)
candidate['config']['max_df'] = max(rd.random(), .80)
candidate['config']['fitIntercept'] = rd.random() < 0.5
candidate['config']['weighting'] = 'NONE' if rd.random() < 0.5 else 'BALANCED'
candidate['config']['tf'] = 'LINEAR' if rd.random() < 0.5 else 'SUBLINEAR'
candidate['config']['df'] = 'SMOOTH' if rd.random() < 0.5 else 'DEFAULT'
task = {
'data': labeled_dataset,
'candidate': candidate,
'modelSelectionParams': model_sel_params
}
model = train(training_task=task)
trained_models.append(model)
for method in ['BEST', 'KNEE_POINT', 'ONE_STDEV', 'TWO_STDEV']:
model_sel_params['method'] = method
msr = model_selection(models=trained_models, model_sel=model_sel_params)
self.assertIn('modelSelection', msr)
self.assertIn('learnedModels', msr)
self.assertIn('selectedModel', msr)
self.assertIn('type', msr['selectedModel'])
self.assertIn('candidate', msr['selectedModel'])
self.assertIn('labels', msr['selectedModel'])
self.assertIn('learnedWeights', msr['selectedModel'])
self.assertIn('learnedFeaturizers', msr['selectedModel'])
self.assertIn('labelEncoder', msr['selectedModel'])
self.assertIn('degreeOfFreedom', msr['selectedModel'])
self.assertIn('performance', msr['selectedModel'])
if __name__ == "__main__":
unittest.main()
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,145
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/classifier/LR.py
|
'''
Logistic Regression
'''
from sklearn.linear_model import LogisticRegression
import numpy as np
class LR(object):
def __init__(self, penalty=None, dual=None, solver=None, multi_class=None, class_weight=None, fit_intercept=None):
self.__penalty = penalty
self.__dual = dual
self.__solver = solver
self.__multi_class = multi_class
self.__class_weight = class_weight
self.__fit_intercept = fit_intercept
#required by backward feature selection
self.coef_ = None
if penalty is None or dual is None or solver is None or multi_class is None or fit_intercept is None:
self.__model = None
else:
self.__model = LogisticRegression(penalty=self.__penalty, dual=self.__dual, solver=self.__solver, multi_class=self.__multi_class,
class_weight=self.__class_weight, fit_intercept=self.__fit_intercept, verbose=0)
def fit(self, X, Y):
self.__model.fit(X, Y)
#required by backward feature selection
self.coef_ = self.__model.coef_
def predict(self, X):
return self.__model.predict(X)
def predict_proba(self, X):
return self.__model.predict_proba(X)
def get_weights(self, class_no):
return self.get_all_weights()[class_no]
def get_intercepts(self):
if self.__fit_intercept:
if len(self.__model.classes_) > 2:
return self.__model.intercept_
else:
return np.array([self.__model.intercept_, self.__model.intercept_])
else:
return np.array([0.0] * len(self.__model.classes_))
def get_all_weights(self):
if len(self.__model.classes_) > 2:
return self.__model.coef_
else:
return np.array([self.__model.coef_[0], self.__model.coef_[0]])
def get_params(self, deep = True):
params = {
'penalty': self.__penalty,
'dual': self.__dual,
'solver': self.__solver,
'multi_class': self.__multi_class,
'class_weight': self.__class_weight,
'fit_intercept': self.__fit_intercept,
}
#only available after trained
if hasattr(self.__model, "coef_"):
params.update({
'classes': self.__model.classes_.tolist(),
})
if self.__fit_intercept:
if len(self.__model.classes_) > 2:
params.update({
'intercept': self.__model.intercept_.tolist(),
})
else:
params.update({
'intercept': [self.__model.intercept_[0], self.__model.intercept_[0]],
})
else:
params.update({
'intercept': [0.0] * len(self.__model.classes_),
})
if len(self.__model.classes_) > 2:
params.update({
'coef': self.__model.coef_.tolist(),
})
else:
params.update({
'coef': [self.__model.coef_.tolist(), self.__model.coef_.tolist()],
})
return params
def set_params(self, **params):
if 'classes' in params:
self.__model.classes_ = np.asarray(params['classes'], dtype=np.int32)
if 'coef' in params:
if len(self.__model.classes_) > 2:
self.__model.coef_ = np.asarray(params['coef'], dtype=np.float64)
else:
self.__model.coef_ = np.asarray(params['coef'][0], dtype=np.float64)
if 'intercept' in params:
if len(self.__model.classes_) > 2:
self.__model.intercept_ = np.asarray(params['intercept'], dtype=np.float64)
else:
self.__model.intercept_ = np.asarray(params['intercept'][:1], dtype=np.float64)
if 'penalty' in params:
self.__penalty = params['penalty']
if 'dual' in params:
self.__dual = params['dual']
if 'solver' in params:
self.__solver = params['solver']
if 'multi_class' in params:
self.__multi_class = params['multi_class']
if 'class_weight' in params:
self.__class_weight = params['class_weight']
if 'fit_intercept' in params:
self.__fit_intercept = params['fit_intercept']
return
def labels(self):
return self.__model.classes_
def num_classes(self):
return len(self.__model.classes_)
def num_weights(self):
all_weights = self.get_all_weights()
return len(all_weights[0])
def __str__(self):
return 'Logistic Regression.'
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,146
|
maana-io/IRIS-Classification
|
refs/heads/master
|
/app/core/main/featurizer/Featurizer.py
|
'''
Featurizer
'''
import numpy as np
import pandas as pd
from scipy.sparse import issparse
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import *
from app.core.main.featurizer.Doc2Vector import Doc2Vector
class Featurizer(object):
FT_Numeric = "NUMERICAL"
FT_String = "CATEGORICAL"
FT_Text = "TEXT"
FT_Set = "SET"
FT_Boolean = "BOOLEAN"
FT_Label = "LABEL"
FT_TXT2V = "TEXT2VEC"
FT_UNDEFINED = "UNDEFINED"
FT_NOOP = "NOOP"
FT_MIN_MAX_SCALER = "MIN_MAX_SCALER"
FT_LABEL_BINARIZER = "LABEL_BINARIZER"
FT_TFIDF_VECTORIZER = "TFIDF_VECTORIZER"
FT_MULTILABEL_BINARIZER = "MULTILABEL_BINARIZER"
FT_LABEL_ENCODER = "LABEL_ENCODER"
FT_TEXT_TO_VECTOR = "TEXT_TO_VECTOR"
supported_types = [FT_Numeric, FT_String, FT_Text, FT_TXT2V, FT_Set, FT_Boolean, FT_Label, FT_UNDEFINED,
FT_NOOP, FT_MIN_MAX_SCALER, FT_LABEL_BINARIZER, FT_TFIDF_VECTORIZER, FT_MULTILABEL_BINARIZER,
FT_LABEL_ENCODER, FT_TEXT_TO_VECTOR]
def __init__(self, field_names, field_types, max_df, min_df, stop_words,
sublinear_tf, smooth_idf,
ngram_range, tokenizer):
self.__field_names = field_names
self.__field_types = field_types
self.__models = []
self.__models_data = []
self.__featurizer_end_offset = []
self.__tokenizer = tokenizer
self.__max_df = max_df
self.__min_df = min_df
self.__stop_words = stop_words
self.__sublinear_tf = sublinear_tf
self.__smooth_idf = smooth_idf
self.__ngram_range = ngram_range
self.__feats = None
def __type_featurizer_map(self, type):
assert type in Featurizer.supported_types, "Invalid type %s, supported types are %s" % (type, ','.join(Featurizer.supported_types))
if type in [Featurizer.FT_Numeric, Featurizer.FT_MIN_MAX_SCALER]:
return MinMaxScaler()
elif type in [Featurizer.FT_Boolean, Featurizer.FT_String, Featurizer.FT_LABEL_BINARIZER]:
return LabelBinarizer()
elif type in [Featurizer.FT_Text, Featurizer.FT_TFIDF_VECTORIZER]:
return TfidfVectorizer(input='content', max_df=self.__max_df, min_df=self.__min_df, stop_words=self.__stop_words,
decode_error='ignore', sublinear_tf=self.__sublinear_tf, smooth_idf=self.__smooth_idf,
ngram_range = self.__ngram_range, tokenizer = self.__tokenizer)
elif type in [Featurizer.FT_Set, Featurizer.FT_MULTILABEL_BINARIZER]:
return MultiLabelBinarizer()
elif type in [Featurizer.FT_Label, Featurizer.FT_LABEL_ENCODER]:
return LabelEncoder()
elif type in [Featurizer.FT_TXT2V, Featurizer.FT_TEXT_TO_VECTOR]:
return Doc2Vector()
elif type in [Featurizer.FT_UNDEFINED, Featurizer.FT_NOOP]:
return None
def fit(self, data):
'''
:param data: must be a pandas DataFrame
:return: None
'''
assert isinstance(data, pd.DataFrame), "Expect a DataFrame object"
self.__models = []
self.__models_data = []
self.__featurizer_end_offset = []
feat_offset = 0
for (fieldNo, (fieldName, fieldData)) in enumerate(data.iteritems()):
#debug
print('Featurizer: fitting ' + fieldName)
m = self.__type_featurizer_map(self.__field_types[fieldNo])
if self.__field_types[fieldNo] in [Featurizer.FT_Numeric, Featurizer.FT_MIN_MAX_SCALER]:
m.fit(fieldData.values.reshape(-1, 1))
self.__models.append(m)
feat_offset += 1
self.__models_data.append(None)
elif self.__field_types[fieldNo] in [Featurizer.FT_String, Featurizer.FT_LABEL_BINARIZER]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += len(self.__models[-1].classes_)
self.__models_data.append(None)
elif self.__field_types[fieldNo] in [Featurizer.FT_Text, Featurizer.FT_TFIDF_VECTORIZER]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += len(self.__models[-1].vocabulary_)
self.__models_data.append(dict((fidx, w) for (w, fidx) in self.__models[-1].vocabulary_.items()))
elif self.__field_types[fieldNo] in [Featurizer.FT_Set, Featurizer.FT_MULTILABEL_BINARIZER]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += len(self.__models[-1].classes_)
self.__models_data.append(self.__models[-1].classes_)
elif self.__field_types[fieldNo] in [Featurizer.FT_Boolean]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += 1
self.__models_data.append(None)
elif self.__field_types[fieldNo] in [Featurizer.FT_Label, Featurizer.FT_LABEL_ENCODER]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += 1
self.__models_data.append(None)
elif self.__field_types[fieldNo] in [Featurizer.FT_TXT2V, Featurizer.FT_TEXT_TO_VECTOR]:
m.fit(fieldData.values)
self.__models.append(m)
feat_offset += m.vector_size()
self.__models_data.append(None)
elif self.__field_types[fieldNo] in [Featurizer.FT_UNDEFINED, Featurizer.FT_NOOP]:
self.__models.append(m)
feat_offset += 1
self.__models_data.append(None)
self.__featurizer_end_offset.append(feat_offset)
def transform(self, data):
'''
:param data: must be a pandas DataFrame
:return: numpy ndarray
'''
assert isinstance(data, pd.DataFrame), "Expect a DataFrame object"
self.__feats = None
this_col_feats = None
for (fieldNo, (fieldName, fieldData)) in enumerate(data.iteritems()):
#debug
print('Featurizer: transforming ' + fieldName)
m = self.__models[fieldNo]
if self.__field_types[fieldNo] in [Featurizer.FT_Numeric, Featurizer.FT_MIN_MAX_SCALER]:
this_col_feats = m.transform(fieldData.values.reshape(-1, 1))
elif self.__field_types[fieldNo] in [Featurizer.FT_String, Featurizer.FT_LABEL_BINARIZER]:
this_col_feats = m.transform(fieldData.values)
#transform binary encoder to one-hot encoder in case there are 2 classes
if len(m.classes_) == 2 and this_col_feats.shape[1] == 1:
this_col_feats = np.c_[1 - this_col_feats, this_col_feats]
elif self.__field_types[fieldNo] in [Featurizer.FT_Text, Featurizer.FT_TFIDF_VECTORIZER]:
this_col_feats = m.transform(fieldData.values)
elif self.__field_types[fieldNo] in [Featurizer.FT_Set, Featurizer.FT_MULTILABEL_BINARIZER]:
this_col_feats = m.transform(fieldData.values)
elif self.__field_types[fieldNo] in [Featurizer.FT_Boolean]:
this_col_feats = m.transform(fieldData.values)
elif self.__field_types[fieldNo] in [Featurizer.FT_Label, Featurizer.FT_LABEL_ENCODER]:
this_col_feats = m.transform(fieldData.values)
elif self.__field_types[fieldNo] in [Featurizer.FT_TXT2V, Featurizer.FT_TEXT_TO_VECTOR]:
this_col_feats = m.transform(fieldData.values)
elif self.__field_types[fieldNo] in [Featurizer.FT_UNDEFINED, Featurizer.FT_NOOP]:
this_col_feats = fieldData.values
if issparse(this_col_feats):
this_col_feats = this_col_feats.todense()
if self.__feats is None:
self.__feats = this_col_feats
else:
self.__feats = np.c_[self.__feats, this_col_feats]
return self.__feats
def inverse_transform(self, data):
'''
Use only for LABEL data.
:param data: array like
:return: array like
'''
assert len(self.__models)==1, "Expect to have exactly one model."
lbls = self.__models[0].inverse_transform(data)
return lbls
def fit_transform(self, data):
self.fit(data)
return self.transform(data)
def set_params(self, models, model_data, featurizer_offsets, tokenizer):
self.__models = models
self.__models_data = model_data
self.__featurizer_end_offset = featurizer_offsets
self.__tokenizer = tokenizer
def get_params(self):
return self.__models, self.__models_data, self.__featurizer_end_offset, self.__tokenizer
def __remove_special_chars(self, inp_str):
return inp_str.replace(':', ' ').replace('=', ' ').replace(',', ' ').replace(';', ' ')
def get_feature(self, featNo):
assert featNo < self.__featurizer_end_offset[-1], "Feature number %d is out of range!" % featNo
begin_offset = 0
for(fieldNo, m) in enumerate(self.__models):
if self.__featurizer_end_offset[fieldNo] > featNo:
feat_offset = featNo - begin_offset
if self.__field_types[fieldNo] in [Featurizer.FT_Numeric, Featurizer.FT_MIN_MAX_SCALER]:
return self.__remove_special_chars(self.__field_names[fieldNo])
elif self.__field_types[fieldNo] in [Featurizer.FT_String, Featurizer.FT_LABEL_BINARIZER]:
return self.__remove_special_chars(self.__field_names[fieldNo]) + "::" + \
self.__remove_special_chars(self.__models[fieldNo].classes_[feat_offset])
elif self.__field_types[fieldNo] in [Featurizer.FT_Text, Featurizer.FT_TFIDF_VECTORIZER]:
return self.__remove_special_chars(self.__field_names[fieldNo]) + "::" + \
self.__remove_special_chars(self.__models_data[fieldNo][feat_offset])
elif self.__field_types[fieldNo] in [Featurizer.FT_Set, Featurizer.FT_MULTILABEL_BINARIZER]:
return self.__remove_special_chars(self.__field_names[fieldNo]) + "::" + \
self.__remove_special_chars(self.__models_data[fieldNo][feat_offset])
elif self.__field_types[fieldNo] in [Featurizer.FT_Boolean]:
return self.__remove_special_chars(self.__field_names[fieldNo])
elif self.__field_types[fieldNo] in [Featurizer.FT_Label, Featurizer.FT_LABEL_ENCODER]:
return self.__remove_special_chars(self.__field_names[fieldNo])
elif self.__field_types[fieldNo] in [Featurizer.FT_TXT2V, Featurizer.FT_TEXT_TO_VECTOR]:
return self.__remove_special_chars(self.__field_names[fieldNo]) + "::" + \
self.__remove_special_chars(str(feat_offset))
elif self.__field_types[fieldNo] in [Featurizer.FT_UNDEFINED, Featurizer.FT_NOOP]:
return self.__remove_special_chars(self.__field_names[fieldNo])
else:
begin_offset = self.__featurizer_end_offset[fieldNo]
def get_all_features(self):
f_names = []
for(fieldNo, m) in enumerate(self.__models):
if self.__field_types[fieldNo] in [Featurizer.FT_Numeric, Featurizer.FT_MIN_MAX_SCALER]:
f_names.append(self.__remove_special_chars(self.__field_names[fieldNo]))
elif self.__field_types[fieldNo] in [Featurizer.FT_String, Featurizer.FT_LABEL_BINARIZER]:
f_names += [self.__remove_special_chars(self.__field_names[fieldNo]) + "::" +
self.__remove_special_chars(cls) for cls in self.__models[fieldNo].classes_]
elif self.__field_types[fieldNo] in [Featurizer.FT_Text, Featurizer.FT_TFIDF_VECTORIZER]:
f_names += [self.__remove_special_chars(self.__field_names[fieldNo]) + "::" +
self.__remove_special_chars(word) for (widx, word) in \
sorted(self.__models_data[fieldNo].items(), key = lambda widx_w: widx_w[0])]
elif self.__field_types[fieldNo] in [Featurizer.FT_Set, Featurizer.FT_MULTILABEL_BINARIZER]:
f_names += [self.__remove_special_chars(self.__field_names[fieldNo]) + "::" +
self.__remove_special_chars(cls) for cls in self.__models_data[fieldNo]]
elif self.__field_types[fieldNo] in [Featurizer.FT_Boolean]:
f_names.append(self.__remove_special_chars(self.__field_names[fieldNo]))
elif self.__field_types[fieldNo] in [Featurizer.FT_Label, Featurizer.FT_LABEL_ENCODER]:
f_names.append(self.__remove_special_chars(self.__field_names[fieldNo]))
elif self.__field_types[fieldNo] in [Featurizer.FT_TXT2V, Featurizer.FT_TEXT_TO_VECTOR]:
f_names += [self.__remove_special_chars(self.__field_names[fieldNo]) + "::" +
self.__remove_special_chars(str(fidx)) for fidx in range(self.__models[fieldNo].vector_size())]
elif self.__field_types[fieldNo] in [Featurizer.FT_UNDEFINED, Featurizer.FT_NOOP]:
f_names.append(self.__remove_special_chars(self.__field_names[fieldNo]))
return f_names
def get_schema(self):
return self.__field_types
def __str__(self):
return '''
Featurizer
Supported data types: %s
For Tfidf vectorizer:
Min DF: %s
Max DF: %s
Sublinear TF: %s
Smooth IDF: %s
Stop words: %s
Tokenizer: %s
ngrams range: %s
''' % (Featurizer.supported_types, self.__min_df, self.__max_df, self.__sublinear_tf, self.__smooth_idf,
self.__stop_words, self.__tokenizer, self.__ngram_range)
|
{"/app/resolvers.py": ["/app/logic/helpers.py", "/app/logic/classify.py", "/app/logic/feature_selection.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/logic/helpers.py": ["/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/core/main/tokenizer/LemmaTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/core/test/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/Classifier.py"], "/app/test/classify.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/classify.py", "/app/logic/train.py"], "/app/logic/model_selection.py": ["/app/logic/train.py", "/app/logic/helpers.py", "/app/settings.py"], "/app/logic/train.py": ["/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py", "/app/core/main/featurizer/Doc2Vector.py", "/app/core/main/Classifier.py", "/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/logic/helpers.py"], "/app/core/main/featurizer/Doc2Vector.py": ["/app/settings.py"], "/app/test/train.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/train.py", "/app/core/main/Classifier.py"], "/app/core/main/tokenizer/PorterTokenizer.py": ["/app/core/main/tokenizer/BaseTokenizer.py"], "/app/test/feature_selection.py": ["/app/test/setup.py", "/app/logic/feature_selection.py"], "/app/core/main/tokenizer/__init__.py": ["/app/settings.py"], "/app/core/main/Classifier.py": ["/app/core/main/classifier/LR.py", "/app/core/main/classifier/LSVC.py", "/app/core/main/classifier/Ensemble.py", "/app/core/main/featurizer/Featurizer.py", "/app/core/main/feature_selection/LabelCorrelation.py", "/app/core/main/feature_selection/BackwardStepwise.py", "/app/core/main/evaluator/ModelEvaluator.py", "/app/core/main/tokenizer/BaseTokenizer.py"], "/app/logic/feature_selection.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/core/main/tokenizer/BaseTokenizer.py", "/app/core/main/tokenizer/PorterTokenizer.py", "/app/core/main/tokenizer/LemmaTokenizer.py"], "/app/test/helpers.py": ["/app/logic/helpers.py"], "/app/logic/classify.py": ["/app/logic/helpers.py", "/app/core/main/Classifier.py", "/app/logic/train.py", "/app/logic/model_selection.py"], "/app/test/setup.py": ["/app/logic/helpers.py"], "/app/test/model_selection.py": ["/app/logic/helpers.py", "/app/test/setup.py", "/app/logic/model_selection.py", "/app/logic/train.py"], "/app/core/main/featurizer/Featurizer.py": ["/app/core/main/featurizer/Doc2Vector.py"]}
|
17,148
|
Affirm/promise
|
refs/heads/master
|
/tests/test_issues.py
|
# This tests reported issues in the Promise package
from promise import Promise
def test_issue_11():
# https://github.com/syrusakbary/promise/issues/11
def test(x):
def my(resolve, reject):
if x > 0:
resolve(x)
else:
reject(Exception(x))
return Promise(my)
promise_resolved = test(42).then(lambda x: x)
assert promise_resolved.value == 42
promise_rejected = test(-42).then(lambda x: x, lambda e: str(e))
assert promise_rejected.value == "-42"
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,149
|
Affirm/promise
|
refs/heads/master
|
/tests/test_complex_threads.py
|
import time
import concurrent.futures
from promise import Promise
executor = concurrent.futures.ThreadPoolExecutor(max_workers=40000);
def combine(r,n):
return r * n
def promise_factorial(n):
if n < 2:
return 1
time.sleep(.02)
a = executor.submit(promise_factorial, n - 1)
return Promise.promisify(a).then(lambda r: combine(r, n))
def test_factorial():
p = promise_factorial(10)
assert p.get() == 3628800
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,150
|
Affirm/promise
|
refs/heads/master
|
/tests/test_extra.py
|
# This exercises some capabilities above and beyond
# the Promises/A+ test suite
import time
import pytest
from promise import (
Promise,
is_thenable,
promisify as free_promisify,
promise_for_dict as free_promise_for_dict,
)
from concurrent.futures import Future
from threading import Thread
from .utils import assert_exception
class DelayedFulfill(Thread):
def __init__(self, d, p, v):
self.delay = d
self.promise = p
self.value = v
Thread.__init__(self)
def run(self):
time.sleep(self.delay)
self.promise.fulfill(self.value)
class DelayedRejection(Thread):
def __init__(self, d, p, r):
self.delay = d
self.promise = p
self.reason = r
Thread.__init__(self)
def run(self):
time.sleep(self.delay)
self.promise.reject(self.reason)
class FakeThenPromise():
def __init__(self, raises=True):
self.raises = raises
def then(self, s=None, f=None):
if self.raises:
raise Exception("FakeThenPromise raises in 'then'")
class FakeDonePromise():
def __init__(self, raises=True):
self.raises = raises
def done(self, s=None, f=None):
if self.raises:
raise Exception("FakeDonePromise raises in 'done'")
def df(value, dtime):
p = Promise()
t = DelayedFulfill(dtime, p, value)
t.start()
return p
def dr(reason, dtime):
p = Promise()
t = DelayedRejection(dtime, p, reason)
t.start()
return p
# Static methods
def test_fulfilled():
p = Promise.fulfilled(4)
assert p.is_fulfilled
assert p.value == 4
def test_rejected():
p = Promise.rejected(Exception("Static rejected"))
assert p.is_rejected
assert_exception(p.reason, Exception, "Static rejected")
# Fulfill
def test_fulfill_self():
p = Promise()
with pytest.raises(TypeError) as excinfo:
p.fulfill(p)
# Exceptions
def test_exceptions():
def throws(v):
assert False
p1 = Promise()
p1.add_callback(throws)
p1.fulfill(5)
p2 = Promise()
p2.add_errback(throws)
p2.reject(Exception())
with pytest.raises(Exception) as excinfo:
p2.get()
def test_fake_promise():
p = Promise()
p.fulfill(FakeThenPromise())
assert p.is_rejected
assert_exception(p.reason, Exception, "FakeThenPromise raises in 'then'")
# WAIT
def test_wait_when():
p1 = df(5, 0.01)
assert p1.is_pending
p1.wait()
assert p1.is_fulfilled
def test_wait_if():
p1 = Promise()
p1.fulfill(5)
p1.wait()
assert p1.is_fulfilled
def test_wait_timeout():
p1 = df(5, 0.1)
assert p1.is_pending
p1.wait(timeout=0.05)
assert p1.is_pending
p1.wait()
assert p1.is_fulfilled
# GET
def test_get_when():
p1 = df(5, 0.01)
assert p1.is_pending
v = p1.get()
assert p1.is_fulfilled
assert 5 == v
def test_get_if():
p1 = Promise()
p1.fulfill(5)
v = p1.get()
assert p1.is_fulfilled
assert 5 == v
def test_get_timeout():
p1 = df(5, 0.1)
assert p1.is_pending
try:
v = p1.get(timeout=0.05)
assert False
except ValueError:
pass # We expect this
assert p1.is_pending
v = p1.get()
assert p1.is_fulfilled
assert 5 == v
# Promise.all
def test_promise_all_when():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, p2])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.fulfill(5)
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.fulfill(10)
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.value
assert 10 == p2.value
assert 5 == pl.value[0]
assert 10 == pl.value[1]
def test_promise_all_when_mixed_promises():
p1 = Promise()
p2 = Promise()
pl = Promise.all([p1, 32, p2, False, True])
assert p1.is_pending
assert p2.is_pending
assert pl.is_pending
p1.fulfill(5)
assert p1.is_fulfilled
assert p2.is_pending
assert pl.is_pending
p2.fulfill(10)
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pl.is_fulfilled
assert 5 == p1.value
assert 10 == p2.value
assert pl.value == [5, 32, 10, False, True]
def test_promise_all_when_if_no_promises():
pl = Promise.all([10, 32, False, True])
assert pl.is_fulfilled
assert pl.value == [10, 32, False, True]
def test_promise_all_if():
p1 = Promise()
p2 = Promise()
pd1 = Promise.all([p1, p2])
pd2 = Promise.all([p1])
pd3 = Promise.all([])
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
assert pd3.is_fulfilled
p1.fulfill(5)
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.fulfill(10)
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.value
assert 10 == p2.value
assert 5 == pd1.value[0]
assert 5 == pd2.value[0]
assert 10 == pd1.value[1]
assert [] == pd3.value
# promise_for_dict
@pytest.fixture(params=[
Promise.for_dict,
free_promise_for_dict,
])
def promise_for_dict(request):
return request.param
def test_dict_promise_when(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd1 = promise_for_dict(d)
pd2 = promise_for_dict({"a": p1})
pd3 = promise_for_dict({})
assert p1.is_pending
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_pending
assert pd3.is_fulfilled
p1.fulfill(5)
assert p1.is_fulfilled
assert p2.is_pending
assert pd1.is_pending
assert pd2.is_fulfilled
p2.fulfill(10)
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd1.is_fulfilled
assert pd2.is_fulfilled
assert 5 == p1.value
assert 10 == p2.value
assert 5 == pd1.value["a"]
assert 5 == pd2.value["a"]
assert 10 == pd1.value["b"]
assert {} == pd3.value
def test_dict_promise_if(promise_for_dict):
p1 = Promise()
p2 = Promise()
d = {"a": p1, "b": p2}
pd = promise_for_dict(d)
assert p1.is_pending
assert p2.is_pending
assert pd.is_pending
p1.fulfill(5)
assert p1.is_fulfilled
assert p2.is_pending
assert pd.is_pending
p2.fulfill(10)
assert p1.is_fulfilled
assert p2.is_fulfilled
assert pd.is_fulfilled
assert 5 == p1.value
assert 10 == p2.value
assert 5 == pd.value["a"]
assert 10 == pd.value["b"]
def test_done():
counter = [0]
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
p = Promise()
p.done(inc, dec)
p.fulfill(4)
assert counter[0] == 1
p = Promise()
p.done(inc, dec)
p.done(inc, dec)
p.reject(Exception())
assert counter[0] == -1
def test_done_all():
counter = [0]
def inc(_):
counter[0] += 1
def dec(_):
counter[0] -= 1
p = Promise()
p.done_all()
p.done_all([(inc, dec)])
p.done_all([
(inc, dec),
(inc, dec),
{'success': inc, 'failure': dec},
])
p.fulfill(4)
assert counter[0] == 4
p = Promise()
p.done_all()
p.done_all([inc])
p.done_all([(inc, dec)])
p.done_all([
(inc, dec),
{'success': inc, 'failure': dec},
])
p.reject(Exception())
assert counter[0] == 1
def test_then_all():
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{'success': (lambda x: x + x), 'failure': (lambda r: 2)},
]
results = p.then_all() + p.then_all([lambda x: x]) + p.then_all([(lambda x: x * x, lambda r: 1)]) + p.then_all(handlers)
p.fulfill(4)
assert [r.value for r in results] == [4, 16, 16, 8]
p = Promise()
handlers = [
((lambda x: x * x), (lambda r: 1)),
{'success': (lambda x: x + x), 'failure': (lambda r: 2)},
]
results = p.then_all() + p.then_all([(lambda x: x * x, lambda r: 1)]) + p.then_all(handlers)
p.reject(Exception())
assert [r.value for r in results] == [1, 1, 2]
def test_do_resolve():
p1 = Promise(lambda resolve, reject: resolve(0))
assert p1.is_fulfilled
assert p1.value == 0
def test_do_resolve_fail_on_call():
def raises(resolve, reject):
raise Exception('Fails')
p1 = Promise(raises)
assert not p1.is_fulfilled
assert str(p1.reason) == 'Fails'
def test_catch():
p1 = Promise(lambda resolve, reject: resolve(0))
p2 = p1.then(lambda value: 1 / value) \
.catch(lambda e: e) \
.then(lambda e: type(e))
assert p2.is_fulfilled
assert p2.value == ZeroDivisionError
def test_is_thenable_promise():
promise = Promise()
assert is_thenable(promise)
def test_is_thenable_then_object():
promise = FakeThenPromise()
assert is_thenable(promise)
def test_is_thenable_done_object():
promise = FakeDonePromise()
assert is_thenable(promise)
def test_is_thenable_future():
promise = Future()
assert is_thenable(promise)
def test_is_thenable_simple_object():
assert not is_thenable(object())
@pytest.fixture(params=[free_promisify, Promise.promisify])
def promisify(request):
return request.param
def test_promisify_promise(promisify):
promise = Promise()
assert promisify(promise) == promise
def test_promisify_then_object(promisify):
promise = FakeThenPromise(raises=False)
p = promisify(promise)
assert isinstance(p, Promise)
def test_promisify_then_object_exception(promisify):
promise = FakeThenPromise()
with pytest.raises(Exception) as excinfo:
promisify(promise)
assert str(excinfo.value) == "FakeThenPromise raises in 'then'"
def test_promisify_done_object(promisify):
promise = FakeDonePromise(raises=False)
p = promisify(promise)
assert isinstance(p, Promise)
def test_promisify_done_object_exception(promisify):
promise = FakeDonePromise()
with pytest.raises(Exception) as excinfo:
promisify(promise)
assert str(excinfo.value) == "FakeDonePromise raises in 'done'"
def test_promisify_future(promisify):
future = Future()
promise = promisify(future)
assert promise.is_pending
future.set_result(1)
assert promise.is_fulfilled
assert promise.value == 1
def test_promisify_future_rejected(promisify):
future = Future()
promise = promisify(future)
assert promise.is_pending
future.set_exception(Exception('Future rejected'))
assert promise.is_rejected
assert_exception(promise.reason, Exception, 'Future rejected')
def test_promisify_object(promisify):
with pytest.raises(TypeError) as excinfo:
promisify(object())
assert str(excinfo.value) == "Object is not a Promise like object."
def test_promisify_promise_subclass():
class MyPromise(Promise):
pass
p = Promise()
p.fulfill(10)
m_p = MyPromise.promisify(p)
assert isinstance(m_p, MyPromise)
assert m_p.get() == p.get()
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,151
|
Affirm/promise
|
refs/heads/master
|
/promise/__init__.py
|
from .promise import Promise, promise_for_dict, promisify, is_thenable
__all__ = ['Promise', 'promise_for_dict', 'promisify', 'is_thenable']
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,152
|
Affirm/promise
|
refs/heads/master
|
/tests/conftest.py
|
import sys
collect_ignore = []
if sys.version_info[:2] < (3, 4):
collect_ignore.append('test_awaitable.py')
if sys.version_info[:2] < (3, 5):
collect_ignore.append('test_awaitable_35.py')
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,153
|
Affirm/promise
|
refs/heads/master
|
/tests/utils.py
|
def assert_exception(exception, expected_exception_cls, expected_message):
assert isinstance(exception, expected_exception_cls)
assert str(exception) == expected_message
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,154
|
Affirm/promise
|
refs/heads/master
|
/tests/test_awaitable_35.py
|
import pytest
import asyncio
from promise import Promise, promisify, is_thenable
@pytest.mark.asyncio
async def test_await():
assert await Promise.resolve(True)
@pytest.mark.asyncio
async def test_promisify_coroutine():
async def my_coroutine():
await asyncio.sleep(.01)
return True
assert await promisify(my_coroutine())
@pytest.mark.asyncio
async def test_coroutine_is_thenable():
async def my_coroutine():
await asyncio.sleep(.01)
return True
assert is_thenable(my_coroutine())
@pytest.mark.asyncio
async def test_promisify_future():
future = asyncio.Future()
future.set_result(True)
assert await promisify(future)
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,155
|
Affirm/promise
|
refs/heads/master
|
/promise/promise.py
|
import functools
from threading import Event, RLock
from .compat import Future, iscoroutine, ensure_future, iterate_promise # type: ignore
from typing import Callable, Optional, Iterator, Any, Dict, Tuple, Union # flake8: noqa
class CountdownLatch(object):
__slots__ = ('_lock', 'count')
def __init__(self, count):
# type: (CountdownLatch, int) -> None
assert count >= 0, "count needs to be greater or equals to 0. Got: %s" % count
self._lock = RLock()
self.count = count
def dec(self):
# type: (CountdownLatch) -> int
with self._lock:
assert self.count > 0, "count needs to be greater or equals to 0. Got: %s" % self.count
self.count -= 1
# Return inside lock to return the correct value,
# otherwise an other thread could already have
# decremented again.
return self.count
class Promise(object):
"""
This is the Promise class that complies
Promises/A+ specification and test suite:
http://promises-aplus.github.io/promises-spec/
"""
__slots__ = ('state', 'value', 'reason', '_cb_lock', '_callbacks', '_errbacks', '_event', '_future')
# These are the potential states of a promise
PENDING = -1
REJECTED = 0
FULFILLED = 1
def __init__(self, fn=None):
# type: (Promise, Callable) -> None
"""
Initialize the Promise into a pending state.
"""
self.state = self.PENDING # type: int
self.value = None # type: Any
self.reason = None # type: Optional[Exception]
self._cb_lock = RLock()
self._callbacks = [] # type: List[Callable]
self._errbacks = [] # type: List[Callable]
self._event = Event()
self._future = None # type: Optional[Future]
if fn:
self.do_resolve(fn)
def __iter__(self):
# type: (Promise) -> Iterator
return iterate_promise(self)
__await__ = __iter__
@property
def future(self):
# type: (Promise) -> Future
if not self._future:
self._future = Future()
self.add_callback(self._future.set_result)
self.add_errback(self._future.set_exception)
return self._future
def do_resolve(self, fn):
try:
fn(self.fulfill, self.reject)
except Exception as e:
self.reject(e)
@classmethod
def fulfilled(cls, x):
# type: (Any) -> Promise
p = cls()
p.fulfill(x)
return p
@classmethod
def rejected(cls, reason):
# type: (Any) -> Promise
p = cls()
p.reject(reason)
return p
def fulfill(self, x):
# type: (Promise, Any) -> None
"""
Fulfill the promise with a given value.
"""
if self is x:
raise TypeError("Cannot resolve promise with itself.")
elif is_thenable(x):
try:
self.promisify(x).done(self.fulfill, self.reject)
except Exception as e:
self.reject(e)
else:
self._fulfill(x)
resolve = fulfilled
def _fulfill(self, value):
# type: (Promise, Any) -> None
with self._cb_lock:
if self.state != self.PENDING:
return
self.value = value
self.state = self.FULFILLED
callbacks = self._callbacks
# We will never call these callbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._callbacks = None
# Notify all waiting
self._event.set()
for callback in callbacks:
try:
callback(value)
except Exception:
# Ignore errors in callbacks
pass
def reject(self, reason):
# type: (Promise, Exception) -> None
"""
Reject this promise for a given reason.
"""
assert isinstance(reason, Exception), ("The reject function needs to be called with an Exception. "
"Got %s" % reason)
with self._cb_lock:
if self.state != self.PENDING:
return
self.reason = reason
self.state = self.REJECTED
errbacks = self._errbacks
# We will never call these errbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._errbacks = None
# Notify all waiting
self._event.set()
for errback in errbacks:
try:
errback(reason)
except Exception:
# Ignore errors in errback
pass
@property
def is_pending(self):
# type: (Promise) -> bool
"""Indicate whether the Promise is still pending. Could be wrong the moment the function returns."""
return self.state == self.PENDING
@property
def is_fulfilled(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been fulfilled. Could be wrong the moment the function returns."""
return self.state == self.FULFILLED
@property
def is_rejected(self):
# type: (Promise) -> bool
"""Indicate whether the Promise has been rejected. Could be wrong the moment the function returns."""
return self.state == self.REJECTED
def get(self, timeout=None):
# type: (Promise, int) -> Any
"""Get the value of the promise, waiting if necessary."""
self.wait(timeout)
if self.state == self.PENDING:
raise ValueError("Value not available, promise is still pending")
elif self.state == self.FULFILLED:
return self.value
raise self.reason
def wait(self, timeout=None):
# type: (Promise, int) -> None
"""
An implementation of the wait method which doesn't involve
polling but instead utilizes a "real" synchronization
scheme.
"""
self._event.wait(timeout)
def add_callback(self, f):
# type: (Promise, Callable) -> None
"""
Add a callback for when this promis is fulfilled. Note that
if you intend to use the value of the promise somehow in
the callback, it is more convenient to use the 'then' method.
"""
assert callable(f), "A function needs to be passed into add_callback. Got: %s" % f
with self._cb_lock:
if self.state == self.PENDING:
self._callbacks.append(f)
return
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self.state == self.FULFILLED:
f(self.value)
def add_errback(self, f):
# type: (Promise, Callable) -> None
"""
Add a callback for when this promis is rejected. Note that
if you intend to use the rejection reason of the promise
somehow in the callback, it is more convenient to use
the 'then' method.
"""
assert callable(f), "A function needs to be passed into add_errback. Got: %s" % f
with self._cb_lock:
if self.state == self.PENDING:
self._errbacks.append(f)
return
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self.state == self.REJECTED:
f(self.reason)
def catch(self, on_rejection):
# type: (Promise, Callable) -> Promise
"""
This method returns a Promise and deals with rejected cases only.
It behaves the same as calling Promise.then(None, on_rejection).
"""
return self.then(None, on_rejection)
def done(self, success=None, failure=None):
# type: (Promise, Callable, Callable) -> None
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In contrast to then,
the return value of these callback is ignored and nothing is
returned.
"""
with self._cb_lock:
if success is not None:
self.add_callback(success)
if failure is not None:
self.add_errback(failure)
def done_all(self, handlers=None):
# type: (Promise, List[Callable]) -> List[Promise]
"""
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
"""
if not handlers:
return []
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
self.done(s, f)
else:
self.done(success=handler)
def then(self, success=None, failure=None):
# type: (Promise, Callable, Callable) -> Promise
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively.
:type success: (Any) -> object
:type failure: (Any) -> object
:rtype : Promise
"""
ret = self.__class__()
def call_and_fulfill(v):
"""
A callback to be invoked if the "self promise"
is fulfilled.
"""
try:
if callable(success):
ret.fulfill(success(v))
else:
ret.fulfill(v)
except Exception as e:
ret.reject(e)
def call_and_reject(r):
"""
A callback to be invoked if the "self promise"
is rejected.
"""
try:
if callable(failure):
ret.fulfill(failure(r))
else:
ret.reject(r)
except Exception as e:
ret.reject(e)
self.done(call_and_fulfill, call_and_reject)
return ret
def then_all(self, handlers=None):
# type: (Promise, List[Callable]) -> List[Promise]
"""
Utility function which calls 'then' for each handler provided. Handler can either
be a function in which case it is used as success handler, or a tuple containing
the success and the failure handler, where each of them could be None.
:type handlers: list[(Any) -> object] | list[((Any) -> object, (Any) -> object)]
:param handlers
:rtype : list[Promise]
"""
if not handlers:
return []
promises = [] # type: List[Promise]
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
promises.append(self.then(s, f))
else:
promises.append(self.then(success=handler))
return promises
@classmethod
def all(cls, values_or_promises):
# Type: (Iterable[Promise, Any]) -> Promise
"""
A special function that takes a bunch of promises
and turns them into a promise for a vector of values.
In other words, this turns an list of promises for values
into a promise for a list of values.
"""
_len = len(values_or_promises)
if _len == 0:
return cls.fulfilled(values_or_promises)
promises = (cls.promisify(v_or_p) if is_thenable(v_or_p) else cls.resolve(v_or_p) for
v_or_p in values_or_promises) # type: Iterator[Promise]
all_promise = cls() # type: Promise
counter = CountdownLatch(_len)
values = [None] * _len # type: List[Any]
def handle_success(original_position, value):
# type: (int, Any) -> None
values[original_position] = value
if counter.dec() == 0:
all_promise.fulfill(values)
for i, p in enumerate(promises):
p.done(functools.partial(handle_success, i), all_promise.reject) # type: ignore
return all_promise
@classmethod
def promisify(cls, obj):
# type: (Any) -> Promise
if isinstance(obj, cls):
return obj
add_done_callback = get_done_callback(obj) # type: Optional[Callable]
if callable(add_done_callback):
promise = cls()
add_done_callback(_process_future_result(promise))
return promise
done = getattr(obj, "done", None) # type: Optional[Callable]
if callable(done):
p = cls()
done(p.fulfill, p.reject)
return p
then = getattr(obj, "then", None) # type: Optional[Callable]
if callable(then):
p = cls()
then(p.fulfill, p.reject)
return p
if iscoroutine(obj):
return cls.promisify(ensure_future(obj))
raise TypeError("Object is not a Promise like object.")
@classmethod
def for_dict(cls, m):
# type: (Dict[Any, Promise]) -> Promise
"""
A special function that takes a dictionary of promises
and turns them into a promise for a dictionary of values.
In other words, this turns an dictionary of promises for values
into a promise for a dictionary of values.
"""
if not m:
return cls.fulfilled({})
keys, values = zip(*m.items())
dict_type = type(m)
def handle_success(resolved_values):
return dict_type(zip(keys, resolved_values))
return cls.all(values).then(handle_success)
promisify = Promise.promisify
promise_for_dict = Promise.for_dict
def _process_future_result(promise):
def handle_future_result(future):
exception = future.exception()
if exception:
promise.reject(exception)
else:
promise.fulfill(future.result())
return handle_future_result
def is_future(obj):
# type: (Any) -> bool
return callable(get_done_callback(obj))
def get_done_callback(obj):
# type: (Any) -> Callable
return getattr(obj, "add_done_callback", None)
def is_thenable(obj):
# type: (Any) -> bool
"""
A utility function to determine if the specified
object is a promise using "duck typing".
"""
return isinstance(obj, Promise) or is_future(obj) or (
hasattr(obj, "done") and callable(getattr(obj, "done"))) or (
hasattr(obj, "then") and callable(getattr(obj, "then"))) or (
iscoroutine(obj))
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,156
|
Affirm/promise
|
refs/heads/master
|
/tests/test_awaitable.py
|
import asyncio
import pytest
import time
from promise import Promise
@pytest.mark.asyncio
@asyncio.coroutine
def test_await():
yield from Promise.resolve(True)
@pytest.mark.asyncio
@asyncio.coroutine
def test_await_time():
def resolve_or_reject(resolve, reject):
time.sleep(.1)
resolve(True)
p = Promise(resolve_or_reject)
assert p.get() is True
|
{"/tests/test_issues.py": ["/promise/__init__.py"], "/tests/test_complex_threads.py": ["/promise/__init__.py"], "/tests/test_extra.py": ["/promise/__init__.py", "/tests/utils.py"], "/promise/__init__.py": ["/promise/promise.py"], "/tests/test_awaitable_35.py": ["/promise/__init__.py"], "/tests/test_awaitable.py": ["/promise/__init__.py"]}
|
17,157
|
PEtab-dev/petab_web_validator
|
refs/heads/main
|
/petab_web_validator.py
|
from typing import Optional
import libsbml
import pandas as pd
import petab
from app import app
def get_petab_problem(sbml_str: str = None,
condition_df: Optional[pd.DataFrame] = None,
measurement_df: Optional[pd.DataFrame] = None,
parameter_df: Optional[pd.DataFrame] = None,
observable_df: Optional[pd.DataFrame] = None
) -> 'petab.Problem':
"""
load petab problem.
Arguments:
sbml_str: PEtab SBML file
condition_df: PEtab condition table
measurement_df: PEtab measurement table
parameter_df: PEtab parameter table
observable_df: PEtab observables tables
"""
sbml_model = sbml_document = sbml_reader = None
if condition_df:
condition_df = petab.conditions.get_condition_df(condition_df)
if measurement_df:
# TODO: If there are multiple tables, we will merge them
measurement_df = petab.measurements.get_measurement_df(measurement_df)
if parameter_df:
parameter_df = petab.parameters.get_parameter_df(parameter_df)
if sbml_str:
sbml_reader = libsbml.SBMLReader()
sbml_document = sbml_reader.readSBMLFromString(sbml_str)
sbml_model = sbml_document.getModel()
if observable_df:
# TODO: If there are multiple tables, we will merge them
observable_df = petab.observables.get_observable_df(observable_df)
return petab.Problem(condition_df=condition_df,
measurement_df=measurement_df,
parameter_df=parameter_df,
observable_df=observable_df,
sbml_model=sbml_model,
sbml_document=sbml_document,
sbml_reader=sbml_reader)
|
{"/petab_web_validator.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/forms.py"]}
|
17,158
|
PEtab-dev/petab_web_validator
|
refs/heads/main
|
/app/forms.py
|
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed
from wtforms.fields import SubmitField
class PEtabForm(FlaskForm):
sbml_file = FileField(
' sbml file',
validators=[
FileAllowed(['xml'],
'Only files with the *.xml extension are allowed')])
condition_file = FileField(
' condition file',
validators=[
FileAllowed(['tsv'],
'Only files with the *.tsv extension are allowed')])
measurement_file = FileField(
' measurement file',
validators=[
FileAllowed(['tsv'],
'Only files with the *.tsv extension are allowed')])
parameters_file = FileField(
' parameters file',
validators=[
FileAllowed(['tsv'],
'Only files with the *.tsv extension are allowed')])
observables_file = FileField(
'observables file',
validators=[
FileAllowed(['tsv'],
'Only files with the *.tsv extension are allowed')])
submit = SubmitField('Upload')
|
{"/petab_web_validator.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/forms.py"]}
|
17,159
|
PEtab-dev/petab_web_validator
|
refs/heads/main
|
/app/__init__.py
|
from flask import Flask
from flask_bootstrap import Bootstrap
app = Flask(__name__)
bootstrap = Bootstrap(app)
app.secret_key = 'secret password'
from app import routes, errors
|
{"/petab_web_validator.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/forms.py"]}
|
17,160
|
PEtab-dev/petab_web_validator
|
refs/heads/main
|
/app/routes.py
|
import logging
import os
import re
import tempfile
from petab.C import *
from petab.lint import lint_problem
import petab
from flask import render_template, flash
from markupsafe import Markup
import libsbml
import pandas as pd
from app import app
from app.forms import PEtabForm
@app.route('/', methods=['GET', 'POST'])
def index():
form = PEtabForm()
if form.validate_on_submit():
with tempfile.TemporaryDirectory(dir=f"{app.root_path}") as tmpdirname:
fn = tempfile.mktemp(".log", dir=f"{tmpdirname}")
file_handler = logging.FileHandler(fn, mode='w')
file_handler.setLevel('DEBUG')
petab.lint.logger.addHandler(file_handler)
try:
petab_problem = get_problem(form.sbml_file.data,
form.condition_file.data,
form.measurement_file.data,
form.parameters_file.data,
form.observables_file.data)
except Exception as e:
flash(Markup(f'<p> Not valid: </p> {e} '), category='error')
return render_template('index.html', form=form)
try:
res = lint_problem(petab_problem)
if res:
with open(fn) as f:
error_log = f.read()
p = re.compile('\n')
error_log = p.sub('<br>', error_log)
flash(Markup(f'<p> Not valid: </p> <p> {error_log} </p>'), category='error')
else:
flash(Markup(f'<p> Great! Your model is valid. </p>'), category='success')
except Exception as e:
flash(Markup(f'<p> Error: </p> {e} '), category='error')
return render_template('index.html', form=form)
def get_problem(sbml_file, condition_file, measurement_file, parameters_file,
observables_file):
"""
will be removed
:return:
"""
if sbml_file:
sbml_reader = libsbml.SBMLReader()
sbml_str = str(sbml_file.stream.read(), "utf-8")
sbml_document = sbml_reader.readSBMLFromString(sbml_str)
sbml_model = sbml_document.getModel()
else:
sbml_reader = None
sbml_document = None
sbml_model = None
if condition_file:
condition_df = pd.read_csv(condition_file, sep='\t')
try:
condition_df.set_index([CONDITION_ID], inplace=True)
except KeyError:
raise KeyError(
f'Condition table missing mandatory field {CONDITION_ID}.')
else:
condition_df = None
if measurement_file:
measurement_df = petab.measurements.get_measurement_df(pd.read_csv(measurement_file, sep='\t'))
else:
measurement_df = None
if parameters_file:
parameters_df = pd.read_csv(parameters_file, sep='\t')
try:
parameters_df.set_index([PARAMETER_ID], inplace=True)
except KeyError:
raise KeyError(
f"Parameter table missing mandatory field {PARAMETER_ID}.")
else:
parameters_df = None
if observables_file:
observables_df = pd.read_csv(observables_file, sep='\t')
try:
observables_df.set_index([OBSERVABLE_ID], inplace=True)
except KeyError:
raise KeyError(
f"Observable table missing mandatory field {OBSERVABLE_ID}.")
else:
observables_df = None
petab_problem = petab.Problem(sbml_reader=sbml_reader,
sbml_document=sbml_document,
sbml_model=sbml_model,
condition_df=condition_df,
measurement_df=measurement_df,
parameter_df=parameters_df,
observable_df=observables_df)
return petab_problem
|
{"/petab_web_validator.py": ["/app/__init__.py"], "/app/routes.py": ["/app/__init__.py", "/app/forms.py"]}
|
17,161
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/tests/test_model_methods.py
|
import unittest
from create_app import create_app
from fill_db import UpdateDb
from databases.sql_db import db
from models.category import Category
from models.vacancy import Vacancy
class ModelMethodsTestCase(unittest.TestCase):
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app("testing")
self.client = self.app.test_client()
self.app_context = self.app.app_context()
with self.app_context:
update = UpdateDb(xml_file_path='data/test_feed.xml')
update.recreate_db()
def test_get_category_by_name(self):
with self.app_context:
category = Category.find_by_name('Medizin')
self.assertEqual('Medizin', category.name)
def test_get_jobs_by_category(self):
with self.app_context:
category = Category.find_by_name('Medizin')
jobs = category.get_vacancies(3)
self.assertEqual(3, len(list(jobs)))
for job in jobs:
category_names = [category.name for category in job.categories]
self.assertIn('Medizin', category_names)
def test_get_job_by_id(self):
with self.app_context:
job = Vacancy.find_by_id(3)
self.assertEqual(3, job.id)
def test_get_job_data(self):
with self.app_context:
job = Vacancy.find_by_id(3).dict()
self.assertEqual(dict, type(job))
self.assertIn('id', job)
self.assertIn('title', job)
self.assertIn('location', job)
def tearDown(self):
"""teardown all initialized variables."""
with self.app_context:
db.session.remove()
db.drop_all()
if __name__ == "__main__":
unittest.main()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,162
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/fill_db.py
|
from xml.dom import minidom
from databases.sql_db import db
from models.category import Category
from models.vacancy import Vacancy
from create_app import create_app
import datetime
class UpdateDb:
def __init__(self, xml_file_path):
self.xml_file_path = xml_file_path
@staticmethod
def clear_db():
db.drop_all()
db.create_all()
def _parse_xml(self):
data = minidom.parse(self.xml_file_path)
records = data.getElementsByTagName('record')
return records
def fill_categories(self):
records = self._parse_xml()
categories = set()
for record in records:
category_names = record.getElementsByTagName('category')[0].firstChild.data
for name in category_names.split(', '):
if name not in categories:
categories.add(name)
category = Category(name=name)
category.save_to_db()
def fill_vacancies(self):
records = self._parse_xml()
for record in records:
title = record.getElementsByTagName('titel')[0].firstChild.data
firm = record.getElementsByTagName('firma')[0].firstChild.data
text = record.getElementsByTagName('volltext')[0].firstChild.data
postcode = record.getElementsByTagName('plz_arbeitsort')[0].firstChild.data
location = record.getElementsByTagName('arbeitsort')[0].firstChild.data
from_date = record.getElementsByTagName('vondatum')[0].firstChild.data
job_link = record.getElementsByTagName('stellenlink')[0].firstChild.data
job_type = record.getElementsByTagName('jobtype')[0].firstChild.data
category_names = record.getElementsByTagName('category')[0].firstChild.data
day, month, year = from_date.split('-')
from_date = datetime.date(int(year), int(month), int(day))
vacancy = Vacancy(title=title, firm=firm, description=text,
location_postcode=postcode, location=location,
from_date=from_date, job_link=job_link, job_type=job_type)
vacancy.save_to_db()
for name in category_names.split(', '):
category = Category.find_by_name(name=name)
category.vacancies.append(vacancy)
category.save_to_db()
def recreate_db(self):
self.clear_db()
self.fill_categories()
self.fill_vacancies()
if __name__ == '__main__':
app = create_app('development')
with app.app_context():
update = UpdateDb(xml_file_path='data/jobs_feed.xml')
update.recreate_db()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,163
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/job_app/views.py
|
from flask import Blueprint, render_template, redirect, url_for
from models.vacancy import Vacancy
job_page = Blueprint('job_page', __name__,
url_prefix='/jobs',
static_folder='static',
static_url_path='/',
template_folder='templates')
@job_page.route('/<int:job_id>')
def job(job_id):
vacancy = Vacancy.find_by_id(job_id)
if vacancy:
return render_template('job.html', job=vacancy)
else:
return redirect(url_for('search_page.index'))
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,164
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/create_app.py
|
from flask import Flask
from flask_cors import CORS
from databases.sql_db import db
from configs.app_configs.config import app_config
from search_app.views import search_page
from job_app.views import job_page
def create_app(config_name):
flask_app = Flask(__name__)
flask_app.config.from_object(app_config[config_name])
flask_app.config.from_pyfile('configs/app_configs/config.py')
db.init_app(flask_app)
flask_app.secret_key = flask_app.config['SECRET']
CORS(flask_app)
flask_app.register_blueprint(search_page)
flask_app.register_blueprint(job_page)
return flask_app
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,165
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/tests/tests.py
|
import unittest
from tests.test_search_page import SearchPageTestCase
from tests.test_model_methods import ModelMethodsTestCase
if __name__ == "__main__":
search = unittest.TestLoader().loadTestsFromModule(SearchPageTestCase)
models = unittest.TestLoader().loadTestsFromModule(ModelMethodsTestCase)
unittest.TextTestRunner(verbosity=2).run(search)
unittest.TextTestRunner(verbosity=2).run(models)
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,166
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/models/category.py
|
from databases.sql_db import db
vacancy_categories = db.Table('vacancy_categories',
db.Column('vacancy_id',
db.Integer,
db.ForeignKey('vacancies.id'),
primary_key=True),
db.Column('category_id',
db.Integer,
db.ForeignKey('categories.id'),
primary_key=True)
)
class Category(db.Model):
__tablename__ = "categories"
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(128), nullable=False, unique=True)
vacancies = db.relationship('Vacancy', secondary=vacancy_categories, lazy='dynamic',
backref=db.backref('categories', lazy=True))
def __init__(self, name):
self.name = name
def __repr__(self):
return f'{self.name}'
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def get_vacancies(self, quantity):
return self.vacancies.limit(quantity).all()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,167
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/migrations/versions/c539d02f890a_rename_workplace_to_location.py
|
"""Rename workplace to location.
Revision ID: c539d02f890a
Revises:
Create Date: 2021-03-12 09:33:21.267981
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c539d02f890a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('vacancies', 'workplace', nullable=False, new_column_name='location')
op.alter_column('vacancies', 'workplace_postcode', nullable=False, new_column_name='location_postcode')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('vacancies', sa.Column('workplace_postcode', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('vacancies', sa.Column('workplace', sa.VARCHAR(length=128), autoincrement=False, nullable=True))
op.drop_column('vacancies', 'location_postcode')
op.drop_column('vacancies', 'location')
# ### end Alembic commands ###
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,168
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/app.py
|
from flask import render_template, jsonify, request, redirect, url_for
from create_app import create_app
from models.category import Category
from models.vacancy import Vacancy
app = create_app('development')
@app.errorhandler(404)
def page_not_found(error):
return redirect(url_for('search_page.index'))
if __name__ == "__main__":
app.run(host='0.0.0.0')
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,169
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/search_app/views.py
|
from flask import Blueprint, render_template, request, jsonify
from models.vacancy import Vacancy
search_page = Blueprint('search_page', __name__,
static_folder='static/search/dist/',
static_url_path='/')
@search_page.route('/')
def index():
return search_page.send_static_file('index.html')
@search_page.route('/search')
def search():
page = request.args.get('page', 1, type=int)
keywords = request.args.getlist('keyword')
locations = request.args.getlist('where')
search_lst = Vacancy.search_vacancies(page, 10, keywords, locations)
pages = [page for page in search_lst.iter_pages(left_edge=2, left_current=2, right_current=3, right_edge=2)]
return jsonify(jobs=[vacancy.dict() for vacancy in search_lst.items], totalPages=pages)
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,170
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/tests/test_search_page.py
|
import unittest
import json
from create_app import create_app
from fill_db import UpdateDb
from databases.sql_db import db
from models.category import Category
class SearchPageTestCase(unittest.TestCase):
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app("testing")
self.client = self.app.test_client()
self.test_data = 5
self.app_context = self.app.app_context()
with self.app_context:
update = UpdateDb(xml_file_path='data/test_feed.xml')
update.recreate_db()
def test_get_jobs(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(list, type(data['totalPages']))
self.assertEqual(self.test_data, len(data['jobs']))
self.assertEqual(1, len(data['totalPages']))
def test_get_jobs_with_location(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'where': 'Schwerin'})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(1, len(data['jobs']))
self.assertEqual(1, len(data['totalPages']))
def test_get_jobs_with_keyword(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'keyword': 'Oberarzt'})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(4, len(data['jobs']))
self.assertEqual(1, len(data['totalPages']))
def test_get_jobs_with_keyword_and_location(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'where': 'Schwerin',
'keyword': 'Oberarzt',
'page': 1})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(1, len(data['jobs']))
self.assertEqual(1, len(data['totalPages']))
def test_get_jobs_with_wrong_page(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'page': 2})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(0, len(data['jobs']))
self.assertEqual(1, len(data['totalPages']))
def test_get_jobs_with_wrong_location(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'where': 'dddddddd'})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(0, len(data['jobs']))
self.assertEqual(0, len(data['totalPages']))
def test_get_jobs_with_wrong_keyword(self):
with self.app_context:
res = self.client.get('/search',
headers={"Content-Type": "application/json"},
query_string={'keyword': 'aaaaaaa'})
data = json.loads(res.data.decode())
self.assertEqual(res.status_code, 200)
self.assertEqual(list, type(data['jobs']))
self.assertEqual(0, len(data['jobs']))
self.assertEqual(0, len(data['totalPages']))
def tearDown(self):
"""teardown all initialized variables."""
with self.app_context:
db.session.remove()
db.drop_all()
if __name__ == "__main__":
unittest.main()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,171
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/configs/app_configs/config.py
|
import os
from dotenv import load_dotenv
load_dotenv('configs/env_configs/.env')
class Config(object):
"""Parent configuration class."""
DEBUG = False
SECRET = os.getenv('SECRET')
user = os.getenv("POSTGRES_USER")
password = os.getenv("POSTGRES_PASSWORD")
hostname = os.getenv("POSTGRES_HOSTNAME")
port = os.getenv("POSTGRES_PORT")
database = os.getenv("APPLICATION_DB")
SQLALCHEMY_DATABASE_URI = (
f"postgresql+psycopg2://{user}:{password}@{hostname}:{port}/{database}"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
class DevelopmentConfig(Config):
"""Configurations for Development."""
DEBUG = True
PROPAGATE_EXCEPTIONS = True
class TestingConfig(Config):
"""Configurations for Testing, with a separate test database."""
TESTING = True
DEBUG = True
user = os.getenv("POSTGRES_USER")
password = os.getenv("POSTGRES_PASSWORD")
hostname = os.getenv("POSTGRES_HOSTNAME")
port = os.getenv("POSTGRES_PORT")
database = os.getenv("TEST_DB")
SQLALCHEMY_DATABASE_URI = (
f"postgresql+psycopg2://{user}:{password}@{hostname}:{port}/{database}"
)
class ProductionConfig(Config):
"""Configurations for Production."""
DEBUG = False
TESTING = False
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
}
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,172
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/manage.py
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from app import app
from databases.sql_db import db
"""
To create migrations folder: python manage.py db init
To make migrations: python manage.py db migrate
To update database: python manage.py db upgrade
"""
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,173
|
MariaAfanaseva/mini_jobninja
|
refs/heads/main
|
/models/vacancy.py
|
from databases.sql_db import db
class Vacancy(db.Model):
__tablename__ = "vacancies"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
firm = db.Column(db.String(128), nullable=False)
description = db.Column(db.Text(), nullable=False)
location_postcode = db.Column(db.Integer())
location = db.Column(db.String(128))
from_date = db.Column(db.Date())
job_link = db.Column(db.String(128))
job_type = db.Column(db.String(80))
def __init__(self, title, firm, description,
location_postcode, location,
from_date, job_link, job_type):
self.title = title
self.firm = firm
self.description = description
self.location_postcode = location_postcode
self.location = location
self.from_date = from_date
self.job_link = job_link
self.job_type = job_type
def __repr__(self):
return f'{self.title}'
def dict(self):
return {
'id': self.id,
'title': self.title,
'location': self.location
}
@classmethod
def find_by_id(cls, vacancy_id):
return cls.query.filter_by(id=vacancy_id).first()
@classmethod
def _create_keywords_conditions(cls, keywords):
conditions = []
for keyword in keywords:
conditions.append(cls.title.ilike(f'%{keyword}%'))
conditions.append(cls.description.ilike(f'%{keyword}%'))
conditions.append(cls.firm.ilike(f'%{keyword}%'))
return conditions
@classmethod
def _create_location_conditions(cls, locations):
conditions = []
for location in locations:
conditions.append(cls.location.ilike(f'%{location}%'))
return conditions
@classmethod
def search_vacancies(cls, page, quantity, keywords, locations):
data = cls.query
if locations:
locations_conditions = cls._create_location_conditions(locations)
data = data.filter(db.and_(*locations_conditions))
if keywords:
keywords_conditions = cls._create_keywords_conditions(keywords)
data = data.filter(db.or_(*keywords_conditions))
return data.order_by(cls.from_date.desc()).paginate(page, quantity, error_out=False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
{"/tests/test_model_methods.py": ["/create_app.py", "/fill_db.py", "/models/category.py", "/models/vacancy.py"], "/fill_db.py": ["/models/category.py", "/models/vacancy.py", "/create_app.py"], "/job_app/views.py": ["/models/vacancy.py"], "/create_app.py": ["/configs/app_configs/config.py", "/search_app/views.py", "/job_app/views.py"], "/tests/tests.py": ["/tests/test_search_page.py", "/tests/test_model_methods.py"], "/app.py": ["/create_app.py", "/models/category.py", "/models/vacancy.py"], "/search_app/views.py": ["/models/vacancy.py"], "/tests/test_search_page.py": ["/create_app.py", "/fill_db.py", "/models/category.py"], "/manage.py": ["/app.py"]}
|
17,186
|
allenyang79/member-system
|
refs/heads/master
|
/app/auth.py
|
# -*- coding: utf-8 -*-
import functools
import json
import os
import sys
import time
import traceback
import binascii
import flask
#from flask_jwt import JWT, jwt_required, current_identity
from werkzeug.security import safe_str_cmp
import jwt
import jwt.exceptions
from app.error import InvalidError
#from app.auth import AuthManager
from app.models.models import Admin
class LoginFailError(InvalidError):
def __init__(self, message='Login fail.', status_code=403):
super(LoginFailError, self).__init__(message, status_code)
class UnauthorizedError(InvalidError):
def __init__(self, message='Unauthorized.', status_code=403):
super(UnauthorizedError, self).__init__(message, status_code)
class AuthManager(object):
"""A auth plugin for flask.
AuthManger auth the user by JWT encode/decode. and put a jwt key on cookie.
if auth fail, it will riase UnauthorizedError or LoginFailError.
and you can custom errorHandler on flask app to handler this type of error.
"""
def __init__(self, app=None):
self.app = None
self.mode = 'deny_first'
self.whitelist = []
if app is not None:
self.app = app
self.init_app(self.app)
def init_app(self, app):
self.app = app
self.app.extensions['jwt'] = self
self.app.before_request(self.before_request)
def before_request(self):
if hasattr(flask.request, 'url_rule') and hasattr(flask.request.url_rule, 'endpoint'):
endpoint = flask.request.url_rule.endpoint
else:
endpoint = None
#print "==before_login on AuthManager=="
#print self.app.view_functions.keys()
#if not endpoint:
# pass
#elif endpoint in self.whitelist:
# pass
#elif self.auth():
# pass
def login_user(self, payload):
"""
.. code-block:: python
payload = {
...
'exp': '(Expiration Time) Claim',
}
"""
if 'exp' not in payload:
payload['exp'] = int(time.time()) + self.app.config.get('JWT_EXPIRE_TIME', 86400)
token = jwt.encode(payload, self.app.config['JWT_SECRET'], algorithm='HS256', headers={'salt': binascii.hexlify(os.urandom(16))})
resp = flask.make_response()
resp.headers['content-type'] = 'application/json; charset=utf-8'
resp.set_cookie('jwt', token, expires=payload['exp'])
#username = payload['username']
if getattr(self, '_load_user') and hasattr(self._load_user, '__call__'):
flask.g.me = self._load_user(payload)
return resp
def logout_user(self):
resp = flask.make_response()
resp.headers['content-type'] = 'application/json; charset=utf-8'
resp.set_cookie('jwt', '', expires=0)
return resp
#flask.request.set_cookie('jwt', None, expires=0)
#return True
def auth(self):
try:
encoded = flask.request.cookies.get('jwt')
if not encoded:
return False, 'No JWT token.'
payload = jwt.decode(encoded, self.app.config['JWT_SECRET'], algorithms=['HS256'])
if not payload:
return False, 'Payload is empty.'
if int(time.time()) > payload['exp'] > int(time.time()):
return False, 'JWT token expired.'
#username = payload['username']
if getattr(self, '_load_user') and hasattr(self._load_user, '__call__'):
flask.g.me = self._load_user(payload)
else:
raise Exception('please implement load_user to mixin.')
return True, None
except jwt.exceptions.DecodeError as e:
return False, 'Jwt deocode fail.'
##jwt.encode({'exp': 1371720939}, 'secret')
# def add_whitelist(self, rule):
# self.whitelist.append(rule.func_name)
# return rule
def load_user(self, func):
self._load_user = func
def login_required(self, func):
"""Wrap a endpoint function to validate before __call__.
.. code-block:: python
@app.route('/hello')
@am.login_required
def hello():
return {}
"""
@functools.wraps(func)
def decorated_view(*args, **kwargs):
if flask.request.method in ('OPTIONS'):
return func(*args, **kwargs)
is_auth, message = self.auth()
if is_auth:
return func(*args, **kwargs)
raise UnauthorizedError(message)
# elif current_app.login_manager._login_disabled:
# return func(*args, **kwargs)
# elif not current_user.is_authenticated:
# return current_app.login_manager.unauthorized()
return decorated_view
def me(self, silence=False):
if flask.has_request_context():
if hasattr(flask.g, 'me'):
return flask.g.me
if not silence:
raise UnauthorizedError('current user has not login.')
return None
def init_auth(app):
am = AuthManager(app)
@am.load_user
def load_user(payload):
"""Payload from jwt decode."""
_id = payload['username']
user = Admin.get_one(_id)
return user
return am
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,187
|
allenyang79/member-system
|
refs/heads/master
|
/app/models/orm.py
|
# -*- coding: utf-8 -*-
import copy
import functools
import os
import sys
import weakref
import datetime
import bson
import logging
from collections import namedtuple
from app.error import InvalidError
from app.db import db
logger = logging.getLogger()
class ModelError(InvalidError):
"""Base model operator error."""
pass
class ModelDeclareError(ModelError):
"""Error on declare a new Model"""
pass
class ModelInvaldError(InvalidError):
"""Invalid model operator."""
pass
class ModelParserError(InvalidError):
"""Parse from dict fail."""
pass
class ModelSaveError(InvalidError):
"""Base model operator error."""
pass
class Field(object):
"""Decalre a propery for Model"""
field_key = None
raw_field_key = None
def __init__(self, raw_field_key=None, **kw):
"""
:param str raw_field_key:
:param default: value or function
"""
self.raw_field_key = raw_field_key
if 'default' in kw:
self.default = kw['default']
def __get__(self, instance, cls):
if not instance:
return self
else:
if self.raw_field_key not in instance._attrs:
if hasattr(self, 'default'):
# if has `default`, then use this `default` to generate value
if hasattr(self.default, '__call__'):
instance._attrs[self.raw_field_key] = self.value_in(instance, self.default())
else:
instance._attrs[self.raw_field_key] = self.value_in(instance, self.default)
else:
return None
return self.value_out(instance, instance._attrs[self.raw_field_key])
def __set__(self, instance, value):
""" set value to instance's field.
TODO: how to handle none value???
"""
if value is None:
instance._attrs[self.raw_field_key] = None
else:
instance._attrs[self.raw_field_key] = self.value_in(instance, value)
def register(self, cls, field_key):
""" Bind the property name with model cls.
When declare complete. this function will call by Model's Meta. and bind by the property name.
"""
self.field_key = field_key
if self.raw_field_key is None:
self.raw_field_key = field_key
cls._config[field_key] = self
def value_in(self, instance, value):
"""The value from external to instance._attrs"""
return value
def value_out(self, instance, value):
""" The value from instance._attrs to external"""
return value
def encode(self, instance, target):
""" Encode external value to another data type that json.dumps can process. """
if self.raw_field_key in instance._attrs:
target[self.field_key] = getattr(instance, self.field_key)
def decode(self, instance, payload):
""" decode external value from another data type that json.loads can process. """
if self.field_key in payload:
value = payload[self.field_key]
setattr(instance, self.field_key, value)
class IDField(Field):
def __init__(self, raw_field_key='_id', **kw):
if 'default' not in kw:
kw['default'] = lambda: str(bson.ObjectId())
kw['raw_field_key'] = raw_field_key
super(IDField, self).__init__(**kw)
class StringField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
super(StringField, self).__init__(**kw)
def value_in(self, instance, value):
if isinstance(value, basestring):
return value
return "%s" % (value)
def value_out(self, instance, value):
return value
class BoolField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = False
super(BoolField, self).__init__(**kw)
def value_in(self, instance, value):
return bool(value)
def value_out(self, instance, value):
return value
class IntField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = 0
super(IntField, self).__init__(**kw)
def value_in(self, instance, value):
return int(value)
def value_out(self, instance, value):
return value
class DateField(Field):
def __init__(self, **kw):
""" DateField
:param datetime default: default can be like ex: lamda: datetime.date.today()
"""
# if 'default' not in kw:
# kw['default'] = datetime.datetime.now().replace(minute=0, hour=0, second=0, microsecond=0)
super(DateField, self).__init__(**kw)
def value_in(self, instance, value):
if isinstance(value, datetime.date):
return datetime.datetime.combine(value, datetime.datetime.min.time())
elif isinstance(value, datetime.datetime):
return value.replace(minute=0, hour=0, second=0, microsecond=0)
raise ModelInvaldError('`DateField` only accept `date` value, not `%s`' % repr(value))
def value_out(self, instance, value):
return value.date()
def encode(self, instance, target):
if self.raw_field_key in instance._attrs:
target[self.field_key] = getattr(instance, self.field_key).strftime('%Y-%m-%d')
def decode(self, instance, payload):
if self.field_key in payload:
try:
value = datetime.datetime.strptime(payload[self.field_key], '%Y-%m-%d').date()
setattr(instance, self.field_key, value)
except Exception as e:
logger.warning(e)
logger.warning('can not decode `%s` `%s`', self.field_key, payload[self.field_key])
class ListField(Field):
def __init__(self, **kw):
""" ListField.
"""
if 'default' not in kw:
kw['default'] = lambda: []
super(ListField, self).__init__(**kw)
def value_in(self, instance, value):
return list(value)
class TupleField(Field):
def __init__(self, np, **kw):
""" TupleField.
:param namedtuple np: ex: namedtuple('Point', ['x', 'y'], verbose=True)
"""
if not np:
raise ModelDeclareError('Declare a tuple field without namedtuple `np`.')
super(TupleField, self).__init__(**kw)
self.np = np
def value_in(self, instance, value):
return value.__dict__
def value_out(self, instance, value):
if isinstance(value, dict):
return self.np(**value)
return None
def encode(self, instance, target):
if self.raw_field_key in instance._attrs:
target[self.field_key] = getattr(instance, self.field_key).__dict__
def decode(self, instance, payload):
if self.field_key in payload:
try:
value = self.np(**payload[self.field_key])
setattr(instance, self.field_key, value)
except Exception as e:
logger.warning(e)
logger.warning('can not decode `%s` `%s`', self.field_key, payload[self.field_key])
class ClassReadonlyProperty(object):
"""a propery declare on class, and it is readonly and share with all instance.
It is good to declare _table or _config.
"""
def __init__(self, default_value=lambda: None):
self.default_value = default_value
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, cls):
if cls not in self.values:
if hasattr(self.default_value, '__call__'):
self.values[cls] = self.default_value()
else:
self.values[cls] = self.default_value
return self.values[cls]
def __set__(self, instance, value):
raise ModelInvaldError('`ClassReadonlyProperty` is readonly.')
class InstanceReadonlyProperty(object):
def __init__(self, default_value=lambda: None):
self.default_value = default_value
self.values = weakref.WeakKeyDictionary()
def __get__(self, instance, cls):
if instance:
if instance not in self.values:
if hasattr(self.default_value, '__call__'):
self.values[instance] = self.default_value()
else:
self.values[instance] = self.default_value
return self.values[instance]
raise ModelInvaldError('`InstanceReadonlyProperty` can not work on class level.')
def __set__(self):
raise ModelInvaldError('`InstanceReadonlyProperty` is readonly.')
class Meta(type):
def __new__(meta_cls, cls_name, cls_bases, cls_dict):
cls = type.__new__(meta_cls, cls_name, cls_bases, cls_dict)
if cls_name == 'Base':
return cls
primary_key_exists = False
for field_key, field in cls_dict.items():
if isinstance(field, Field):
field.register(cls, field_key)
if isinstance(field, IDField):
if primary_key_exists:
raise ModelDeclareError('model %s can not set primary_key `%s` twice.' % (cls_name, field_key))
primary_key_exists = True
if cls._table is None:
raise ModelDeclareError('declare Moedl without _table.')
if cls._primary_key is None:
raise ModelDeclareError('declare Moedl without IDField.')
return cls
class FetchResult(object):
def __init__(self, cls, cursor):
self.cls = cls
self.root_cursor = cursor
self.cursor = self.root_cursor.clone()
def __iter__(self):
return self
def __getitem__(self, key):
return self.cls.get_one(raw=self.cursor[key])
def next(self):
return self.cls.get_one(raw=next(self.cursor))
def sort(self, key, sort):
self.cursor = self.cursor.sort(key, sort)
return self
def limit(self, limit):
self.cursor = self.cursor.limit(limit)
return self
def skip(self, skip):
self.cursor = self.cursor.skip(skip)
return self
def rewind(self):
self.cursor.rewind()
return self
def reset(self):
self.cursor = self.root_cursor.clone()
return self
@property
def total(self):
return self.root_cursor.count()
class Base(object):
__metaclass__ = Meta
_config = ClassReadonlyProperty(lambda: {})
_attrs = InstanceReadonlyProperty(lambda: {})
_table = ClassReadonlyProperty()
_primary_key = ClassReadonlyProperty()
@classmethod
def _find(cls, query={}):
"""Proxy to db.collection.find."""
return db[cls._table].find(query)
@classmethod
def _insert_one(cls, payload):
"""Proxy to db.collection.insert_one."""
result = db[cls._table].insert_one(payload)
if not result.inserted_id:
raise ModelInvaldError('create instance fail.')
return result.inserted_id
@classmethod
def _update_one(cls, query={}, payload={}):
"""Proxy to db.collection.update_one."""
if not query:
raise ModelInvaldError('can update by empty query.')
if not payload:
raise ModelInvaldError('can update by empty payload.')
result = db[cls._table].update_one(query, {
'$set': payload
})
if result.matched_count == 1:
return True
else:
return False
@classmethod
def get_one(cls, _id=None, raw=None):
if _id and raw is None:
raw = db[cls._table].find_one({'_id': _id}, projection={field.raw_field_key: True for field in cls._config.values()})
if not raw:
return None
elif raw and _id is None:
pass
else:
raise ModelInvaldError('get_one arguemtn errors.')
instance = cls({})
instance._attrs.update(raw)
return instance
@classmethod
def fetch(cls, query={}, sort=None, offset=None, limit=None):
cursor = cls._find(query)
return FetchResult(cls, cursor)
@classmethod
def create(cls, payload={}):
for field_key, field in cls._config.iteritems():
if field_key not in payload:
if hasattr(field, 'default'):
if hasattr(field.default, '__call__'):
payload[field_key] = field.default()
else:
payload[field_key] = field.default
instance = cls(payload)
instance.save()
return instance
def __init__(self, payload={}):
""" Do not use Foo() to create new instance.
instate cls.create or cls.get_one() is better.
"""
for field_key, value in payload.items():
if field_key in self._config:
setattr(self, field_key, value)
else:
raise ModelError('create a `%s` instance with unfield key, value (%s, %s).' % (type(self).__name__, field_key, value))
# self._attrs.update(_attrs)
# for k, v in values.items():
# if k not in self._config:
# raise ModelError('create a `%s` instance with unfield key,value (%s, %s).' % (type(self), k, v))
# setattr(self, k, v)
def is_new(self):
#primary_field = self._config[self._primary_key]
# if primary_field.raw_field_key not in self._attrs:
# return True
if db[self._table].find_one({'_id': self.get_id()}, ('_id')):
return False
return True
def get_id(self):
return getattr(self, self._primary_key)
def save(self, allow_fields=None):
"""Save _attrs in to database.
:param list allow_fields: it will only save allow_fields.
"""
cls = type(self)
payload = {}
fields = set(self._config.keys())
if allow_fields:
fields = set(allow_fields) & fields
for k in fields:
# if self.is_new() and isinstance(self._config[k], IDField):
# pass # pass if primary_key
if k == self._primary_key:
continue
elif k in self._attrs:
payload[k] = self._attrs[k]
if self.is_new():
primary_field = self._config[self._primary_key]
payload['_id'] = self._attrs[primary_field.raw_field_key]
if cls._insert_one(payload):
return True
else:
if cls._update_one({'_id': self.get_id()}, payload):
return True
raise ModelSaveError('can not save instance of `%s`' % type(self))
def to_jsonify(self):
""" return a dict, that can be dump to json.
"""
result = {
'__class__': type(self).__name__
}
for field_key, field in self._config.iteritems():
field.encode(self, result)
return result
def update_from_jsonify(self, payload, allow_fields=None):
"""update a value from external dict by json.loads()."""
for field_key, field in self._config.iteritems():
field.decode(self, payload)
return self
@classmethod
def from_jsonify(cls, payload):
if '__class__' in payload and payload['__class__'] == cls.__name__:
instance = cls({})
for field_key, field in cls._config.iteritems():
field.decode(instance, payload)
return instance
raise ModelParserError('can not parse `%s` to `%s` instance.' % (payload, cls.__name__))
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,188
|
allenyang79/member-system
|
refs/heads/master
|
/app/models/models.py
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import app.error as error
from passlib.hash import pbkdf2_sha256
from app.db import db
from . import Base
from . import IDField, StringField, DateField, BoolField, ListField, TupleField
class Admin(Base):
_table = 'admins'
_primary_key = 'admin_id'
admin_id = IDField(raw_field_key='_id')
enabled = BoolField()
@classmethod
def login(cls, _id, password):
raw = db[cls._table].find_one({'_id': _id}, {'_id': 1, 'password': 1, 'enabled': 1})
if raw and raw.get('enabled', False):
return cls.valid_password(password, raw['password'])
return False
@classmethod
def hash_password(cls, password):
if isinstance(password, unicode):
password = password.encode('utf-8')
return pbkdf2_sha256.encrypt(password, rounds=10 ** 5, salt_size=16)
@classmethod
def valid_password(cls, password, encoded):
if isinstance(password, unicode):
password = password.encode('utf-8')
return pbkdf2_sha256.verify(password, encoded)
def update_password(self, password):
result = db[self._table].update_one({'_id': self.get_id()}, {
'$set': {'password': self.hash_password(password)}
})
if result.matched_count:
return True
raise error.InvalidError('update password fail')
class Person(Base):
_table = 'persons'
_primary_key = 'person_id'
person_id = IDField(raw_field_key='_id')
social_id = StringField()
name = StringField()
birthday = DateField()
gender = StringField()
phone_0 = StringField()
phone_1 = StringField()
phone_2 = StringField()
address_0 = StringField()
address_1 = StringField()
email_0 = StringField()
email_1 = StringField()
education = StringField()
job = StringField()
register_date = DateField()
unregister_date = DateField()
baptize_date = DateField()
baptize_priest = StringField()
gifts = ListField() # ['aa', 'bb', 'cc']
groups = ListField() # [group_id, group_id, group_id]
events = ListField() # {date:'', 'title': 'bala...'}
relations = ListField() # {rel: 'parent', person_id: '1231212'}
#TupleField(namedtuple('Relation', ('rel', 'person_id')), {'rel':None, 'person_id':None})
note = StringField()
def get_relations(self):
rows = {row['_id']: row for row in self.relations}
persons, total = Person.fetch({'_id': {'$in': rows.keys()}})
for p in persons:
if p._id in rows:
rows[p._id]['person'] = p
return rows.values()
# return Person.fetch({'_id': {'$in': _ids}})
def build_relation(self, rel, other_person_id, due=False):
item = {'rel': rel, 'person_id': other_person_id}
other_person_ids = [item['person_id'] for item in self.relations]
if other_person_id in other_person_ids:
raise error.InvalidError('relation is existed.')
else:
self.relations.append(item)
self.save(allow_fields=('relations',))
if due:
other_person = type(self).get_one(other_person_id)
other_person.build_relation(rel, self.get_id())
return True
class Group(Base):
_table = 'groups'
_primary_key = 'group_id'
group_id = IDField()
name = StringField()
note = StringField()
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,189
|
allenyang79/member-system
|
refs/heads/master
|
/app/logger.py
|
# -*- coding: utf-8 -*-
import os
import sys
import argparse
import logging
import logging.handlers # import RotatingFileHandler
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(filename)s:%(lineno)d\t[%(thread)8.8s][%(levelname)5.5s] - %(message)s',
datefmt="%Y-%m-%d %H:%M:%S"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
log_path = os.path.join(os.path.dirname(__file__), "../logs/log.log")
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=1000 * 1000, backupCount=10)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
#loggers[logID] = logger
"""
access_logger = logging.getLogger('werkzeug')
log_path = os.path.join(os.path.dirname(__file__),"../logs/access.log")
access_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=1000 * 1000, backupCount=10)
logger.addHandler(access_handler)
"""
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,190
|
allenyang79/member-system
|
refs/heads/master
|
/tests/test_db.py
|
# -*- coding: utf-8 -*-
import os
import sys
from app.config import config
#from app.db import db
from app.db import db
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_db(self):
db.tests.insert_one({'_id': '1234'})
one = db.tests.find_one()
self.assertTrue(one)
db.tests.drop()
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,191
|
allenyang79/member-system
|
refs/heads/master
|
/app/db.py
|
import os
import sys
import functools
import pymongo
from werkzeug.local import LocalProxy
from app.config import config
_db = None
_client = None
def _init_db():
client = pymongo.MongoClient(config['DB_HOST'], config['DB_PORT'])
db = client[config['DB_NAME']]
return client, db
def find_db():
global _client, _db
if not _db:
_client, _db = _init_db()
return _db
db = LocalProxy(find_db)
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,192
|
allenyang79/member-system
|
refs/heads/master
|
/app/config.py
|
# -*- coding: utf-8 -*-
import os
import sys
import argparse
config = {}
parser = argparse.ArgumentParser(
description='custom config'
)
parser.add_argument(
'--config', '-f',
help='load custom config in configs',
default='default'
)
parser.add_argument(
'--debug',
action='store_true',
help='debug mode',
default=False
)
def _parse_args():
"""parse args from cli.
You can mock this function for unittest.
"""
args = parser.parse_args()
return args
def load_config():
global config
if config:
print 'pass load_config if loaded.'
return
args = _parse_args()
m = __import__('configs.default', fromlist=['default'])
config.update(m.config)
m = __import__('configs.%s' % args.config, fromlist=[args.config])
config.update(m.config)
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,193
|
allenyang79/member-system
|
refs/heads/master
|
/tests/__init__.py
|
# -*- coding: utf-8 -*-
import os, sys
import atexit
import re
import unittest
import mock
#import app.config as config
from app.config import parser, config
import app.server
box = None
def setup():
print "custom config"
def side_effect():
return parser.parse_args(['--config', 'test', '--debug'])#from
mock_load_config = mock.patch('app.config._parse_args', side_effect=side_effect)
mock_load_config.start()
print "custom db"
from mongobox import MongoBox
global box, db, client
box = MongoBox()
box.start()
def side_effect():
client = box.client() # pymongo client
db = client['test']
return client, db
mock_init_db = mock.patch('app.db._init_db', side_effect=side_effect)
mock_init_db.start()
def bye():
global box
if box:
box.stop()
atexit.register(bye)
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,194
|
allenyang79/member-system
|
refs/heads/master
|
/configs/test.py
|
import os, sys
config = {
}
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,195
|
allenyang79/member-system
|
refs/heads/master
|
/app/server.py
|
# -*- coding: utf-8 -*-
import json
import sys
from flask import Flask
from flask import current_app, g
from flask import request, jsonify
import app.config as config
import app.utils as utils
import app.auth as auth
from app.logger import logger
from app.error import InvalidError
from app.auth import AuthManager
from app.view import blueprint
from app.models.models import Admin
class CustomFlask(Flask):
def make_response(self, rv):
if isinstance(rv, (dict, list)):
return super(CustomFlask, self).make_response((json.dumps(rv, cls=self.json_encoder), 200, {
'Content-type': 'application/json; charset=utf-8'
}))
elif isinstance(rv, tuple) and isinstance(rv[0], (list, dict)):
resp = super(CustomFlask, self).make_response((json.dumps(rv[0], cls=self.json_encoder),) + rv[1:])
resp.headers['Content-type'] = 'application/json; charset=utf-8'
return resp
return super(CustomFlask, self).make_response(rv)
def main():
config.load_config()
main_app = CustomFlask(__name__)
main_app.config.update(config.config)
main_app.json_encoder = utils.BSONJSONEncoder
main_app.json_decoder = utils.BSONJSONDecoder
main_app.url_map.converters['ObjectId'] = utils.ObjectIdConverter
am = auth.init_auth(main_app)
main_app.register_blueprint(blueprint)
# init admin
admin = Admin.get_one('admin')
if not admin:
admin = Admin.create({
'admin_id': config.config['DEFAULT_ADMIN_USERNAME'],
'enabled': True,
})
admin.update_password(config.config['DEFAULT_ADMIN_PASSWORD'])
###############################################
# CORS OPTIONS request fix
###############################################
@main_app.before_request
def option_autoreply():
if request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
h = resp.headers
# Allow the origin which made the XHR
h['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')
# Allow Credentials
h['Access-Control-Allow-Credentials'] = 'true'
# Allow the actual method
h['Access-Control-Allow-Methods'] = request.headers['Access-Control-Request-Method']
# Allow for cache $n seconds
h['Access-Control-Max-Age'] = 3600 if config.config["MODE"] == "production" else 1
# We also keep current headers
if 'Access-Control-Request-Headers' in request.headers:
h['Access-Control-Allow-Headers'] = request.headers.get('Access-Control-Request-Headers', '')
return resp
@main_app.after_request
def allow_origin(response):
if request.method == 'OPTIONS':
return response
response.headers['Access-Control-Allow-Headers'] = request.headers.get('Access-Control-Request-Headers', '')
response.headers['Access-Control-Allow-Origin'] = request.headers.get('Origin', '*')
response.headers['Access-Control-Allow-Credentials'] = 'true'
response.headers['Access-Control-Allow-Methods'] = 'GET,POST,OPTIONS'
response.headers['Access-Control-Max-Age'] = 1728000
return response
@main_app.errorhandler(InvalidError)
def handle_invalid_error(error):
if config.config['MODE'] == 'production':
if isinstance(error, auth.UnauthorizedError):
return {'success': False, 'message': 'Unauthorized.'}, error.status_code
elif isinstance(error, auth.LoginFailError):
return {'success': False, 'message': 'Login fail.'}, error.status_code
return error.to_dict(), error.status_code
@main_app.route('/')
@main_app.route('/index')
def index():
return {
'success': True,
}
@main_app.route('/login', methods=['POST'])
def login():
payload = request.json
username = str(payload.get('username', '')).strip()
password = str(payload.get('password', '')).strip()
if not username or not password:
return {'success': False, 'message': 'login fail'}, 403
if Admin.login(username, password):
resp = am.login_user({'username': username})
resp.data = json.dumps({
'success': True,
'message': 'login success',
'data': am.me().to_jsonify()
})
return resp
return {'success': False, 'message': 'login fail'}, 403
@main_app.route('/logout')
def logout():
resp = am.logout_user()
resp.data = json.dumps({
'success': True,
'message': 'logout success',
'data': None
})
return resp
@main_app.route('/user/me')
@am.login_required
def user_me():
#me = g.me
return {
'success': True,
'data': am.me().to_jsonify()
}
@main_app.route('/error')
def rasie_error():
raise InvalidError('error', 400)
return main_app
if __name__ == '__main__':
main_app = main()
main_app.run(debug=True)
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,196
|
allenyang79/member-system
|
refs/heads/master
|
/configs/default.py
|
import os, sys
config = {
'MODE': 'development',
#'DB_PATH': os.path.join(os.path.dirname(__file__),'../db')
'DB_HOST': 'localhost',
'DB_PORT': 27017,
'DB_NAME': 'church',
'DEFAULT_ADMIN_USERNAME': 'admin',
'DEFAULT_ADMIN_PASSWORD': '1234',
'JWT_SECRET': '1&2,s@#sa;jd9',
'JWT_EXPIRE': 86400
}
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,197
|
allenyang79/member-system
|
refs/heads/master
|
/tests/test_server.py
|
# -*- coding: utf-8 -*-
import os
import sys
import json
import unittest
import bson
from app.config import config
from app.server import main
from app.db import db
from app.models.models import Person
from app.models.models import Group
class TestServer(unittest.TestCase):
@classmethod
def setUpClass(self):
self.main_app = main()
self.main_app.debug = True
self.client = self.main_app.test_client()
def tearDown(self):
db.persons.delete_many({})
db.groups.delete_many({})
def test_person_create_update(self):
"""/person/create"""
post = {
'name': 'Bill',
'phone_0': '0988'
}
r = self.client.post('/person/create', data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 200)
person_id = json.loads(r.data)['data']['person_id']
r = self.client.get('/person/one/%s' % person_id)
self.assertEqual(r.status_code, 200)
_person = json.loads(r.data)['data']
self.assertEqual(_person['name'], post['name'])
self.assertEqual(_person['phone_0'], post['phone_0'])
post = {
'phone_1': 'phine_1_update',
'address_0': 'address_0_update'
}
r = self.client.post('/person/one/%s/update' % person_id, data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 200)
r = self.client.get('/person/one/%s' % person_id)
self.assertEqual(r.status_code, 200)
_person = json.loads(r.data)['data']
self.assertEqual(_person['phone_1'], post['phone_1'])
self.assertEqual(_person['address_0'], post['address_0'])
def test_person_build_relation(self):
"""/person/<_id>/relation"""
db.persons.insert_many([{
'_id': 'id_0',
'name': 'Bill'
}, {
'_id': 'id_1',
'name': 'John'
}])
post = {
'rel': 'family',
'person_id': 'id_1'
}
r = self.client.post('/person/id_0/relation', data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 200)
for row in db.persons.find():
if row['_id'] == 'id_0':
self.assertIn({'rel': 'family', 'person_id': 'id_1'}, row['relations'])
if row['_id'] == 'id_1':
self.assertIn({'rel': 'family', 'person_id': 'id_0'}, row['relations'])
r = self.client.post('/person/id_0/relation', data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 400)
def test_person_list(self):
"""/person/list"""
db.persons.insert_many([{
'_id': 'id_1',
'name': 'Bill'
}, {
'_id': 'id_2',
'name': 'John'
}, {
'_id': 'id_3',
'name': 'Mary',
}])
r = self.client.get('/person/list')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
for row in result:
if row['person_id'] == 'id_1':
self.assertEqual(row['name'], 'Bill')
elif row['person_id'] == 'id_2':
self.assertEqual(row['name'], 'John')
elif row['person_id'] == 'id_3':
self.assertEqual(row['name'], 'Mary')
r = self.client.get('/person/list?term=john')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
self.assertEqual(result[0]['name'], 'John')
def test_person_one(self):
"""/person/one/<_id>"""
db.persons.insert_many([{
'_id': 'id_1',
'name': 'Bill'
}, {
'_id': 'id_2',
'name': 'John'
}])
r = self.client.get('/person/one/id_1')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
self.assertEqual(result['person_id'], 'id_1')
self.assertEqual(result['name'], 'Bill')
def test_group(self):
"""/group/create"""
payload = {
'name': 'group-1',
'note': 'this is note'
}
r = self.client.post('/group/create', data=json.dumps(payload), content_type='application/json')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
group_id = result['group_id']
_group = db.groups.find_one({'_id': group_id})
self.assertEqual(_group['name'], payload['name'])
self.assertEqual(_group['note'], payload['note'])
payload = {
'name': 'group-1-update',
}
r = self.client.post('/group/one/%s/update' % group_id, data=json.dumps(payload), content_type='application/json')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
_group = db.groups.find_one({'_id': result['group_id']})
self.assertEqual(_group['name'], payload['name'])
def test_group_list(self):
"""/group/list"""
db.groups.insert_many([{
'_id': 'id_0',
'name': 'group-0'
}, {
'_id': 'id_1',
'name': 'group-1'
}])
r = self.client.get('/group/list')
result = json.loads(r.data)['data']
for row in result:
if row['group_id'] == 'id_0':
self.assertEqual(row['name'], 'group-0')
elif row['group_id'] == 'id_1':
self.assertEqual(row['name'], 'group-1')
r = self.client.get('/group/one/id_1')
result = json.loads(r.data)['data']
self.assertEqual(result['name'], 'group-1')
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,198
|
allenyang79/member-system
|
refs/heads/master
|
/app/view.py
|
# -*- coding: utf-8 -*-
import re
from flask import Blueprint#, render_template, abort
from flask import request, jsonify
from app.logger import logger
from app.error import InvalidError
from app.models.models import Person, Group
blueprint = Blueprint('view', __name__)
######################
# Person
######################
@blueprint.route('/person/one/<_id>')
def person_one(_id):
person = Person.get_one(_id)
return {
'success': True,
'data': person.to_jsonify()
}
@blueprint.route('/person/one/<_id>/update', methods=['POST'])
def person_one_update(_id):
person = Person.get_one(_id)
if not person:
raise InvalidError('Person(%s) is not existed.' % _id)
payload = request.json
allow_field = (
'name',
'phone_0',
'phone_1',
'phone_2',
'address_0',
'address_1',
'email_0',
'email_1',
'education',
'job',
'birthday',
'register_day',
'unregister_day',
'baptize_date',
'baptize_priest',
'gifts',
'groups',
'events',
'note'
)
person.update_from_jsonify(payload, allow_field)
person.save()
return {
'success': True,
'data': person.to_jsonify()
}
@blueprint.route('/person/<_id>/relation', methods=['POST'])
def person_build_relation(_id):
payload = request.json
if 'rel' not in payload or 'person_id' not in payload:
raise InvalidError('`rel` and `person_id` should in payload.')
person = Person.get_one(_id)
if not person:
raise InvalidError('Person(%s) is not existed.' % _id)
person.build_relation(payload['rel'], payload['person_id'], due=True)
return {'success': True}
@blueprint.route('/person/list')
def person_list():
term = str(request.values.get('term', ''))
group = str(request.values.get('group', ''))
#limit = int(request.values.get('limit', 0))
#offset = int(request.values.get('offset', 0))
query = {}
if term:
query['name'] = {'$regex': re.escape(term), '$options': 'i'}
if group:
pass
#query['name'] = {'$regex': re.escape(term), '$options': 'i'}
result = Person.fetch(query)
data = []
for person in result:
data.append(person.to_jsonify())
return {
'success': True,
'data': data,
}
@blueprint.route('/person/create', methods=['POST'])
def person_create():
payload = request.json
p = Person.create(payload)
return {
'success': True,
'data': p.to_jsonify()
}
######################
# group
######################
@blueprint.route('/group/create', methods=['POST'])
def group_create():
payload = request.json
group = Group.create(payload)
return {
'success': True,
'data': group.to_jsonify()
}
@blueprint.route('/group/one/<_id>/update', methods=['POST'])
def group_one_update(_id):
group = Group.get_one(_id)
if not group:
raise InvalidError('Group(%s) is not existed.' % _id)
payload = request.json
group.update_from_jsonify(payload)
group.save()
return {
'success': True,
'data': group.to_jsonify()
}
@blueprint.route('/group/one/<_id>')
def group_one(_id):
group = Group.get_one(_id)
return {
'success': True,
'data': group.to_jsonify()
}
@blueprint.route('/group/list')
def group_list():
result = Group.fetch()
data = []
for group in result:
data.append(group.to_jsonify())
return {
'success': True,
'data': data,
}
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,199
|
allenyang79/member-system
|
refs/heads/master
|
/tests/test_models.py
|
# -*- coding: utf-8 -*-
import os
import sys
import datetime
import unittest
from app.config import config
from app.db import db
from app.models.models import Person
from app.models.models import Group
class TestModel(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_person(self):
"""Person"""
p = Person.create({
'name': 'John',
'phone_0': '0988'
})
self.assertEqual(p.name, 'John')
self.assertEqual(p.phone_0, '0988')
p.name = 'Bill'
self.assertEqual(p.name, 'Bill')
p.phone_1 = '0989'
self.assertEqual(p.phone_1, '0989')
p.birthday = datetime.datetime.strptime('2016-11-12', '%Y-%m-%d')
p.save()
raw = db.persons.find_one({'_id': p.get_id()})
self.assertEqual(raw['name'], 'Bill')
self.assertEqual(raw['phone_0'], '0988')
self.assertEqual(raw['birthday'], datetime.datetime.strptime('2016-11-12', '%Y-%m-%d'))
with self.assertRaises(Exception) as ctx:
# can only assign datetime object to `birthday`
p.birthday = 'anything'
p = Person.get_one(p.get_id())
p_other = Person.create({
'name': 'Mary'
})
p.build_relation('family', p_other.get_id(), due=True)
p = Person.get_one(p.get_id())
self.assertIn({
'rel': 'family',
'person_id': p_other.get_id()
}, p.relations)
p_other = Person.get_one(p_other.get_id())
self.assertIn({
'rel': 'family',
'person_id': p.get_id()
}, p_other.relations)
# test fetch
fetch_result = Person.fetch()
self.assertEqual(fetch_result.total, 2)
for p in fetch_result:
self.assertIsInstance(p, Person)
def test_group(self):
"""Group"""
payload = {
'name': 'group-01',
'note': 'this is my group'
}
g = Group.create(payload)
self.assertEqual(g.name, payload['name'])
self.assertEqual(g.note, payload['note'])
raw = db.groups.find_one({'_id': g.get_id()})
self.assertEqual(g.name, raw['name'])
self.assertEqual(g.note, raw['note'])
g.name = 'group-01-fix'
g.save()
raw = db.groups.find_one({'_id': g.get_id()})
self.assertEqual(g.name, 'group-01-fix')
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,200
|
allenyang79/member-system
|
refs/heads/master
|
/app/utils.py
|
# -*- coding: utf-8 -*-
from werkzeug.routing import BaseConverter
import json
import bson
import bson.json_util
class BSONJSONEncoder(json.JSONEncoder):
def default(self, o):
try:
return bson.json_util.default(o)
except Exception as e:
return super(BSONJSONEncoder, self).default(o)
class BSONJSONDecoder(json.JSONDecoder):
""" Do nothing custom json decoder """
def __init__(self, *args, **kargs):
_ = kargs.pop('object_hook', None)
super(BSONJSONDecoder, self).__init__(object_hook=bson.json_util.object_hook, *args, **kargs)
class ObjectIdConverter(BaseConverter):
def to_python(self, value):
return bson.ObjectId(value)
def to_url(self, value):
return BaseConverter.to_url(value['$oid'])
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,201
|
allenyang79/member-system
|
refs/heads/master
|
/app/models/__init__.py
|
# -*- coding: utf-8 -*-
from .orm import *
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,202
|
allenyang79/member-system
|
refs/heads/master
|
/tests/test_auth.py
|
# -*- coding: utf-8 -*-
import Cookie
import os
import sys
import json
import unittest
import bson
import werkzeug.http
import jwt
import app.server as server
from app.config import config
from app.db import db
from app.error import InvalidError
from app.auth import UnauthorizedError, LoginFailError
from app.models.models import Admin
import mock
class TestServer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.main_app = server.main()
cls.main_app.debug = True
def setUp(self):
self.client = self.main_app.test_client()
def tearDown(self):
db.persons.delete_many({})
db.groups.delete_many({})
def test_unauthorized_error(self):
"""Test UnauthorizedError is a InvalidError."""
err = UnauthorizedError("unauthorized.")
self.assertIsInstance(err, InvalidError)
err = LoginFailError("login fail.")
self.assertIsInstance(err, InvalidError)
def test_heartbeat(self):
"""Test heartbeat."""
r = self.client.get('/')
self.assertEqual(json.loads(r.data), {
'success': True
})
r = self.client.get('/index')
self.assertEqual(json.loads(r.data), {
'success': True
})
def test_default_admin(self):
"""Test default admin."""
admin = db.admins.find_one({'_id': config['DEFAULT_ADMIN_USERNAME']})
self.assertTrue(admin)
post = {
'username': config['DEFAULT_ADMIN_USERNAME'],
'password': config['DEFAULT_ADMIN_PASSWORD']
}
r = self.client.post('/login', data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 200)
def test_auth(self):
db.admins.insert_many([{
'_id': 'john',
'password': Admin.hash_password('1234'),
'enabled': True
}])
post = {
'username': 'john',
'password': '1234'
}
r = self.client.post('/login', data=json.dumps(post), content_type='application/json')
self.assertEqual(r.status_code, 200)
cookies = r.headers.getlist('Set-Cookie')
encoded = None
for cookie in cookies:
key, value = werkzeug.http.parse_cookie(cookie).items()[0]
if key == 'jwt':
encoded = value
payload = jwt.decode(encoded, self.main_app.config['JWT_SECRET'], algorithms=['HS256'])
self.assertEqual(payload['username'], 'john')
r = self.client.get('/user/me')
self.assertEqual(r.status_code, 200)
result = json.loads(r.data)['data']
self.assertEqual(result['admin_id'], 'john')
def test_unauth(self):
"""Test unauth."""
r = self.client.get('/user/me')
self.assertEqual(r.status_code, 403)
def test_raise_error(self):
"""Test raise error."""
r = self.client.get('/error')
self.assertEqual(r.status_code, 400)
self.assertEqual(json.loads(r.data), {'success': False, 'message': 'error'})
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,203
|
allenyang79/member-system
|
refs/heads/master
|
/tests/test_orm.py
|
# -*- coding: utf-8 -*-
import json
import os
import sys
import datetime
from collections import namedtuple
import unittest
from app.config import config
from app.db import db
from app.models import ModelError, ModelInvaldError, ModelDeclareError
from app.models import Meta, Base, ClassReadonlyProperty
from app.models import Field, IDField, StringField, BoolField, IntField, DateField, ListField, TupleField
class TestDB(unittest.TestCase):
def tearDown(self):
for col_name in db.collection_names():
db[col_name].drop()
def test_db(self):
""" Test basic db operator. """
db.tests.insert_one({'name': 'test-name'})
r = db.tests.find_one({'name': 'test-name'})
self.assertEqual(r['name'], 'test-name')
db.tests.insert_one({'_id': '_id', 'a': 'A', 'b': 'B', 'c': 'c'})
def test_operator(self):
""" Test declare a ModelClass. """
Point = namedtuple('Point', ['x', 'y'], False)
class Foo(Base):
_table = ClassReadonlyProperty('foos')
_primary_key = ClassReadonlyProperty('foo_id')
foo_id = IDField('_id')
str_field = StringField()
default_str_field = StringField(default='hello')
date_field = DateField()
int_field = IntField()
bool_field = BoolField()
list_field = ListField()
tuple_field = TupleField(np=Point, default=lambda: Point(x=0, y=0))
for field_key in ('foo_id', 'str_field', 'default_str_field', 'date_field', 'int_field', 'bool_field', 'list_field', 'tuple_field'):
self.assertIn(field_key, Foo._config)
class Bar(Base):
_table = ClassReadonlyProperty('bars')
_primary_key = ClassReadonlyProperty('_id')
self.assertNotEqual(Foo._config, Bar._config)
self.assertEqual(Foo._primary_key, 'foo_id')
self.assertEqual(Foo._table, 'foos')
self.assertEqual(Foo.foo_id.raw_field_key, '_id')
foo = Foo()
self.assertEqual(foo._config, Foo._config)
self.assertTrue(foo.is_new())
self.assertEqual(foo.default_str_field, 'hello')
foo = Foo.create({'str_field': 'any string'})
self.assertFalse(foo.is_new())
self.assertIsNotNone(foo.foo_id)
self.assertEqual(foo.str_field, 'any string')
self.assertEqual(foo.int_field, 0)
foo.int_field = 100
self.assertEqual(foo.int_field, 100)
foo.int_field = '200'
self.assertEqual(foo.int_field, 200)
self.assertIsNone(foo.date_field)
foo.date_field = datetime.datetime(2016, 12, 01, 1, 2, 3, 4)
self.assertEqual(foo.date_field, datetime.date(2016, 12, 1))
self.assertEqual(foo.list_field, [])
foo.list_field = [0, 1, 2, 3]
self.assertEqual(foo.list_field, [0, 1, 2, 3])
foo.str_field = None
self.assertEqual(foo._attrs['str_field'], None)
foo.save()
_foo = db.foos.find_one({'_id': foo.foo_id})
self.assertEqual(_foo, foo._attrs)
with self.assertRaises(ModelInvaldError):
foo.date_field = 1234
with self.assertRaises(ModelError) as ctx:
foo = Foo.create({'other': 'other'})
def test_jsonify_encode(self):
""" Test jsonify encode to dict for json dumps."""
Point = namedtuple('Point', ['x', 'y'], False)
class Foo(Base):
_table = ClassReadonlyProperty('foos')
_primary_key = ClassReadonlyProperty('foo_id')
foo_id = IDField('_id')
str_field = StringField(default='this is default')
date_field = DateField()
int_field = IntField()
bool_field = BoolField()
list_field = ListField()
tuple_field = TupleField(np=Point)
foo = Foo.create({
'int_field': 100,
'list_field': [1, 2, 3],
})
_foo = foo.to_jsonify()
self.assertEqual('Foo', _foo['__class__'])
self.assertEqual(_foo['foo_id'], foo.foo_id,)
self.assertEqual(_foo['str_field'], 'this is default')
self.assertEqual(_foo['int_field'], 100)
self.assertEqual(_foo['list_field'], [1, 2, 3])
self.assertNotIn('tuple_field', _foo)
self.assertNotIn('date_field', _foo)
Point = namedtuple('Point', ['x', 'y'], False)
class Foo(Base):
_table = ClassReadonlyProperty('foos')
_primary_key = ClassReadonlyProperty('foo_id')
foo_id = IDField('_id')
list_field = ListField()
tuple_field = TupleField(np=Point, default=lambda: Point(x=1, y=2))
foo = Foo.create({})
_foo = foo.to_jsonify()
self.assertEqual(_foo['tuple_field'], {'x': 1, 'y': 2})
self.assertEqual(_foo['list_field'], [])
def test_jsonify_decode(self):
""" Test jsonify decode from dict for json loads."""
Point = namedtuple('Point', ['x', 'y'], False)
class Foo(Base):
_table = ClassReadonlyProperty('foos')
_primary_key = ClassReadonlyProperty('foo_id')
foo_id = IDField('_id')
str_field = StringField(default='this is default')
date_field = DateField()
int_field = IntField()
bool_field = BoolField()
list_field = ListField()
tuple_field = TupleField(np=Point)
json_str = '''{
"__class__": "Foo",
"foo_id": "1234",
"str_field": "anything",
"int_field": 123,
"date_field": "2014-12-13",
"bool_field": false,
"tuple_field":{
"x": 1,
"y": 2
}
}'''
foo = Foo.from_jsonify(json.loads(json_str))
self.assertEqual(foo.foo_id, '1234')
self.assertEqual(foo.int_field, 123)
self.assertEqual(foo.bool_field, False)
self.assertEqual(foo.date_field, datetime.date(2014, 12, 13))
Point = namedtuple('Point', ['x', 'y'], False)
self.assertEqual(foo.tuple_field, Point(x=1, y=2))
def test_declare_error(self):
""" Test by error case."""
with self.assertRaises(ModelDeclareError) as ctx:
class Foo(Base):
pass
with self.assertRaises(ModelDeclareError) as ctx:
class Foo(Base):
_id = IDField()
_id_2 = IDField()
def test_fetch(self):
"""Test fetch by Model."""
class Foo(Base):
_table = ClassReadonlyProperty('foos')
_primary_key = ClassReadonlyProperty('_id')
_id = IDField()
name = StringField()
age = IntField()
foos = [{
'_id': 'id_0',
'name': 'Bill',
'age': 10,
}, {
'_id': 'id_1',
'name': 'John',
'age': 30
}, {
'_id': 'id_2',
'name': 'Mary',
'age': 20
}, {
'_id': 'id_3',
'name': 'Tommy',
'age': 40
}]
db.foos.insert_many(foos)
r = Foo.fetch({})
self.assertEqual(r.total, 4)
self.assertItemsEqual([f.name for f in r], [f['name'] for f in foos])
r = Foo.fetch({'_id': 'id_2'})
self.assertEqual(r.total, 1)
self.assertEqual(r[0]._id, 'id_2')
self.assertEqual(r[0].name, 'Mary')
self.assertEqual(r[0].age, 20)
r = Foo.fetch({'age': {'$gt': 20}})
self.assertEqual(r.total, 2)
self.assertTrue(r[0].age > 20)
self.assertTrue(r[1].age > 20)
r = Foo.fetch({'name': 'John'})
self.assertEqual(r.total, 1)
self.assertEqual(r[0].name, 'John')
|
{"/app/auth.py": ["/app/models/models.py"], "/app/models/orm.py": ["/app/db.py"], "/app/models/models.py": ["/app/db.py", "/app/models/__init__.py"], "/tests/test_db.py": ["/app/config.py", "/app/db.py"], "/app/db.py": ["/app/config.py"], "/app/server.py": ["/app/config.py", "/app/utils.py", "/app/auth.py", "/app/logger.py", "/app/view.py", "/app/models/models.py"], "/tests/test_server.py": ["/app/config.py", "/app/server.py", "/app/db.py", "/app/models/models.py"], "/app/view.py": ["/app/logger.py", "/app/models/models.py"], "/tests/test_models.py": ["/app/config.py", "/app/db.py", "/app/models/models.py"], "/app/models/__init__.py": ["/app/models/orm.py"], "/tests/test_auth.py": ["/app/server.py", "/app/config.py", "/app/db.py", "/app/auth.py", "/app/models/models.py"]}
|
17,205
|
mhussainphys/J.A.R.V.I.S.
|
refs/heads/master
|
/AutoPilot.py
|
import ParseFunctions as pf
import TCP_com as tp #in-built 5s delay in all of them
import AllModules as am
################################### Important #########################################
######## This parameter defines at what time it is safe to start a new run ############
######## It should be about 30 seconds before the arrival time of each spill ##########
######## Since spills come every minute, this is defined as a number of seconds #######
######## after the start of each clock minute (only meaningful modulo 60 seconds) #####
################ Periodically make sure this value makes sense. #######################
#######################################################################################
StartSeconds = 9
StopSeconds = 40
NumSpillsPerRun = 2
#################################Parsing arguments######################################
parser = argparse.ArgumentParser(description='Information for running the AutoPilot program.
/n /n General Instructions: If using OTSDAQ make sure the start and stop seconds in the beginning of the program are hard coded correctly. /n Make sure to add sensor and configuration after each controlled access and pass it as an argument to this script. /n
/n /n TekScope Specific Instructions: /n Make sure you hard code the dpo_fastframe path. /n If using the OTSDAQ with TekScope make sure the Use_otsdaq boolean is True in dpo_fastframe script. /n Make Sure you pass all four Scope trigger and channel settings.
/n /n Other Digitizer Specific Instructions: /n If not running the TekScope make sure that the run file name in TCP_com is correct.')
parser.add_argument('-rtm', '--RunTableMode', type=int, default = 0, help='Give 1 if you are using the run table', required=False)
parser.add_argument('-ac', '--AlreadyConfigured', type=int, default = 0, help='Give 1 if the OTSDAQ is already configured', required=False)
parser.add_argument('-de', '--Debug', type=int, default = 0, required=False)
parser.add_argument('-io', '--IsOTSDAQ', type=int, default=0, help = 'Give 1 if using OTSDAQ',required=False)
parser.add_argument('-it', '--IsTelescope', type=int,default=0, help = 'Give 1 if using the telescope',required=False)
parser.add_argument('-di', '--Digitizer', type=str,default= 'TekScope', help = 'Give VME or DT5742 or TekScope', required =False)
parser.add_argument('-se', '--Sensor', type=int, help = 'Make sure to add the sensor record in the run table. Give sensor S/N from the run table',required=False)
parser.add_argument('-conf', '--Configuration', type=int, help = 'Make sure to add the configuration in the run table. Give COnfiguration S/N from the run table',required=False)
parser.add_argument('-sac', '--StopAndContinue', type=int, default = 0, help = 'This bool should be 1 if the OTSDAQ is already in the running state and you want to stop and it and continue running it.',required=False)
######################### Only care about this if using TekScope #########################
parser.add_argument('-tl', '--TriggerLevel', type=float,default= -0.01, help = 'Trigger level in volts', required =False)
parser.add_argument('-tc', '--TriggerChannel', type=str, deafult= 'CH4', help = 'Channel to trigger on',required=False)
parser.add_argument('-ne', '--NumEvents', type=int,default=50000, help = 'Number of events',required=False)
parser.add_argument('-tne', '--TotalNumEvents', type=int,default=50000, help = 'Total number of events',required=False)
args = parser.parse_args()
RunTableMode = args.RunTableMode
AlreadyConfigured = args.AlreadyConfigured
Debug = args.Debug
IsOTS = args.IsOTSDAQ
IsTelescope = args.IsTelescope
Digitizer = args.Digitizer
Sensor = args.Sensor
Configuration = args.Configuration
StopAndContinue = args.StopAndContinue
TriggerLevel = args.TriggerLevel
TriggerChannel = args.TriggerChannel
NumEvents = args.NumEvents
TotalNumEvents = args.TotalNumEvents
########################### Only when Run table is used ############################
if RunTableMode:
if IsTelescope:
Tracking = 'Not started'
else:
Tracking = 'N/A'
if Digitizer == 'TekScope':
IsScope = True
Conversion = 'Not started'
StartScopeCMD = "python %s --trig=%f --trigCh=%s --numFrames=%i --totalNumber=%i" % (am.DPOFastFramePath, TriggerLevel, TriggerChannel, NumEvents, TotalNumEvents)
else:
Conversion = 'N/A'
TimingDAQ = 'Not started'
TimingDAQNoTracks = 'Not started'
# Get Sensor ID and Configuration ID list
if pf.QueryGreenSignal(True):
SensorID = pf.GetFieldIDOtherTable('Sensor', 'Configuration number', str(Sensor), False)
ConfigID = pf.GetFieldIDOtherTable('Config', 'Configuration number', str(Configuration), False)
if not SensorID or not ConfigID:
raise Exception('\n The sensor and configuration you passed as argument are not in the table!!!!!!!!!!!!!!!!!!!! \n')
##### Exit the program ######
#################### CONFIGURING AND INITIALIZING THE OTSDAQ ######################
if not Debug and not AlreadyConfigured and UseOTS:
print 'INTITIALIZING THE OTS-DAQ'
init_ots()
if not Debug and not AlreadyConfigured and UseOTS:
print 'CONFIGURING THE OTS-DAQ'
config_ots()
time.sleep(25)
while True:
if not IsScope and UseOTS and StopAndContinue:
############### Wait until stop time ##################
am.wait_until(StopSeconds)
print "Stopping run at %s" % (am.datetime.now().time())
if not debug: stop_ots(False)
StopAndContinue = False
time.sleep(20)
elif not StopAndContinue:
############ Wait for safe time to start run ##########
am.wait_until(StartSeconds)
if not Debug and IsScope:
# In case of the scope, running the dpo_fastframe script which will take care of the otsdaq.
os.system(StartScopeCMD)
time.sleep(1)
elif not Debug and not IsScope:
################### Starting the run ###################
StartTime = str(am.datetime.now())
print "Starting run at %s" % (StartTime)
RunNumber = tp.start_ots(False)
time.sleep(60*(NumSpillsPerRun-1))
am.wait_until(StopSeconds)
StopTime = str(am.datetime.now())
print "Stopping run at %s" % (StopTime)
if not debug: tp.stop_ots(False)
if RunTableMode:
Duration = StopTime - StartTime
if pf.QueryGreenSignal(True): pf.NewRunRecord(RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID, ConfigID, False)
|
{"/ProcessExec.py": ["/ProcessCMDs.py", "/ParseFunctions.py"], "/ProcessCMDs.py": ["/ParseFunctions.py"]}
|
17,206
|
mhussainphys/J.A.R.V.I.S.
|
refs/heads/master
|
/ProcessExec.py
|
import AllModules as am
import ProcessCMDs as pc
import ParseFunctions as pf
def ProcessLog(ProcessName, RunNumber, ProcessOutput):
ProcessFile_handle = open("/home/daq/fnal_tb_18_11/ProcessLog/%s/run%d.txt" % (ProcessName, RunNumber), "a+")
ProcessFile_handle.write(ProcessOutput)
ProcessFile_handle.close()
def exists_remote(host, path):
status = subprocess.call(['ssh', host, 'test -f {0}'.format(pipes.quote(path))])
if status == 0:
return True
if status == 1:
return False
raise Exception('SSH failed')
def TrackFileRemoteExists(RunNumber):
TrackFilePathRulinux = am.BaseTrackDirRulinux +'CMSTimingConverted/Run%i_CMSTiming_converted.root' % RunNumber
return exists_remote(am.RulinuxSSH, am.TrackFilePathRulinux), am.TrackFilePathRulinux
def TrackFileLocalExists(RunNumber):
TrackFilePathLocal = am.BaseTrackDirLocal + 'Run%i_CMSTiming_converted.root' % RunNumber
return am.os.path.exists(TrackFilePathLocal), TrackFilePathLocal
def FileSizeBool(FilePath, SizeCut):
if am.os.path.exists(FilePath):
return am.os.stat(FilePath).st_size < SizeCut
else: return True
def ProcessExec(OrderOfExecution, PID, SaveWaveformBool, Version): #PID is 1 for Tracking, 2 for Conversion, 3 for TimingDAQ
SaveWaveformBool = SaveWaveformBool
Version = Version
if PID == 1:
ProcessName = 'Tracking'
CMDList, ResultFileLocationList, RunList, FieldIDList = pc.TrackingCMDs(False)
SizeCut = 10000
elif PID == 2:
ProcessName = 'Conversion'
CMDList, ResultFileLocationList, RunList, FieldIDList = pc.ConversionCMDs(False)
SizeCut = 10000
elif PID == 3:
ProcessName = 'TimingDAQ'
DoTracking = True
CMDList, ResultFileLocationList, RunList, FieldIDList = pc.TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, False)
SizeCut = 20000
elif PID == 4:
ProcessName = 'TimingDAQNoTracks'
DoTracking = False
CMDList, ResultFileLocationList, RunList, FieldIDList = pc.TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, False)
SizeCut = 20000
RunListInt = map(int,RunList)
if OrderOfExecution == 1:
RunListInt.sort() #Ascending Sorting
else:
RunListInt.sort(reverse = True)
if CMDList:
for run in RunListInt:
index = RunList.index(run)
CMD = CMDList[index]
ResultFileLocation = ResultFileLocationList[index]
BadProcessExec = False
RawStageTwoFilePath = am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root'
if PID == 1:
if pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)
session = am.subprocess.Popen(["ssh", am.RulinuxSSH, str(CMD)], stderr=am.subprocess.PIPE, stdout=am.subprocess.PIPE)
elif PID == 2:
if pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)
session = am.subprocess.Popen('source %s; %s' % (am.EnvSetupPath,str(CMD)),stdout=am.subprocess.PIPE, stderr=am.PIPE, shell=True)
elif PID == 3 or PID == 4:
if pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Processing', False)
session = am.subprocess.Popen('cd %s; source %s; %s;cd -' % (am.TimingDAQDir,am.EnvSetupPath,str(CMD)),stdout=am.PIPE, stderr=am.subprocess.PIPE, shell=True)
stdout, stderr = session.communicate()
ProcessLog(ProcessName, run, stdout)
if FileSizeBool(ResultFileLocation,SizeCut) or not am.os.path.exists(ResultFileLocation): BadProcessExec = True
if BadProcessExec:
if pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Failed', False)
else:
if pf.QueryGreenSignal(True): pf.UpdateAttributeStatus(str(FieldIDList[index]), ProcessName, 'Complete', False)
|
{"/ProcessExec.py": ["/ProcessCMDs.py", "/ParseFunctions.py"], "/ProcessCMDs.py": ["/ParseFunctions.py"]}
|
17,207
|
mhussainphys/J.A.R.V.I.S.
|
refs/heads/master
|
/TCP_com.py
|
import socket
import sys
import time
import os
import AllModules as am
def init_ots():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
MESSAGE = "OtherRuns0,Initialize"
sock.sendto(MESSAGE, (am.ip_address, am.use_socket))
time.sleep(5)
def config_ots():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
MESSAGE = "OtherRuns0,Configure,T992Config"
sock.sendto(MESSAGE, (am.ip_address, am.use_socket))
time.sleep(5)
def start_ots(Delay=False):
copy_cmd = 'scp otsdaq@ftbf-daq-08.fnal.gov:' + am.runFileName + ' ' + am.localRunFileName
os.system(copy_cmd)
runFile = open(am.localRunFileName)
nextRun = int(runFile.read().strip())
runFile.close()
incrementRunFile = open(am.localRunFileName,"w")
incrementRunFile.write(str(nextRun+1)+"\n")
incrementRunFile.close()
copy_cmd = 'scp ' + am.localRunFileName +' otsdaq@ftbf-daq-08.fnal.gov:' + am.runFileName
os.system(copy_cmd)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
MESSAGE = "OtherRuns0,Start, %d" % (nextRun)
sock.sendto(MESSAGE, (am.ip_address, am.use_socket))
return nextRun
if Delay: time.sleep(5)
def stop_ots(Delay=True):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
MESSAGE = "OtherRuns0,Stop"
sock.sendto(MESSAGE, (am.ip_address, am.use_socket))
if Delay: time.sleep(5)
|
{"/ProcessExec.py": ["/ProcessCMDs.py", "/ParseFunctions.py"], "/ProcessCMDs.py": ["/ParseFunctions.py"]}
|
17,208
|
mhussainphys/J.A.R.V.I.S.
|
refs/heads/master
|
/ProcessCMDs.py
|
import ParseFunctions as pf
import ProcessRuns as pr
import AllModules as am
################################################################################################################################################################################################################
################################################################################################################################################################################################################
##################################These Functions get run lists for various processes from the run table and returns the list of the respective process running commands #######################################
################################################################################################################################################################################################################ ################################################################################################################################################################################################################
def TrackingCMDs(Debug):
RunList, FieldIDList = pr.TrackingRuns(False)
TrackingCMDList = []
ResultFileLocationList = []
if RunList:
for run in RunList:
TrackingCMDList.append('source %s %d' % (am.HyperscriptPath, run))
ResultFileLocationList.append(am.BaseTrackDirLocal + 'Run%d_CMSTiming_converted.root' % run)
return TrackingCMDList, ResultFileLocationList, RunList, FieldIDList
def ConversionCMDs(Debug):
RunList, FieldIDList = pr.ConversionRuns(False)
ConversionCMDList = []
ResultFileLocationList = []
if RunList:
for run in RunList:
ConversionCMDList.append(am.ConversionCMD + str(run))
ResultFileLocationList.append(am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root')
return ConversionCMDList, ResultFileLocationList, RunList, FieldIDList
def TimingDAQCMDs(SaveWaveformBool, Version, DoTracking, Debug):
DoTracking = DoTracking
RunList, FieldIDList, DigitizerList, RedoList, VersionList = pr.TimingDAQRuns(DoTracking, False)
DatToRootCMDList = []
ResultFileLocationList = []
if RunList:
for run in RunList:
RecoLocalPath = None
RawLocalPath = None
Digitizer = []
Index = RunList.index(run)
Digitizer = (DigitizerList[Index])[0]
if RedoList[Index] == 'Redo':
Version = VersionList[Index]
else:
Version = Version
if Digitizer == 'TekScope': Digitizer = 'NetScopeStandalone'
RecoBaseLocalPath = am.RecoBaseLocalPath + Digitizer+ '/' + Version + '/'
if not DoTracking: RecoBaseLocalPath = RecoBaseLocalPath + 'RecoWithoutTracks/'
if not am.os.path.exists(RecoBaseLocalPath): am.os.system('mkdir -p %s' % RecoBaseLocalPath)
if Digitizer == 'VME' or Digitizer == 'DT5742':
RawBaseLocalPath = am.RawBaseLocalPath + Digitizer + '/' + Version + '/'
ListRawRunNumber = [(x.split("_Run")[1].split(".dat")[0].split("_")[0]) for x in am.glob.glob(RawBaseLocalPath + '*_Run*')]
ListRawFilePath = [x for x in am.glob.glob(RawBaseLocalPath + '*_Run*')]
RawLocalPath = ListRawFilePath[ListRawRunNumber.index(run)]
RecoLocalPath = RecoBaseLocalPath + RawLocalPath.split(".dat")[0].split("%s/" % Version)[1] + '.root'
elif Digitizer == 'NetScopeStandalone':
RawLocalPath = am.RawStageTwoLocalPathScope + 'run_scope' + str(run) + '.root'
RecoLocalPath = RecoBaseLocalPath + 'run_scope' + str(run) + '_converted.root'
ResultFileLocationList.append(RecoLocalPath)
ConfigFilePath = am.ConfigFileBasePath + Digitizer + '_%s.config' % Version
DatToRootCMD = './' + Digitizer + 'Dat2Root' + ' --config_file=' + ConfigFilePath + ' --input_file=' + RawLocalPath + ' --output_file=' + RecoLocalPath
if SaveWaveformBool: DatToRootCMD = DatToRootCMD + ' --save_meas'
if DoTracking:
TrackFilePathLocal = am.BaseTrackDirLocal + 'Run%i_CMSTiming_converted.root' % run
DatToRootCMD = DatToRootCMD + ' --pixel_input_file=' + TrackFilePathLocal
DatToRootCMDList.append(DatToRootCMD)
return DatToRootCMDList, ResultFileLocationList, RunList, FieldIDList
|
{"/ProcessExec.py": ["/ProcessCMDs.py", "/ParseFunctions.py"], "/ProcessCMDs.py": ["/ParseFunctions.py"]}
|
17,209
|
mhussainphys/J.A.R.V.I.S.
|
refs/heads/master
|
/ParseFunctions.py
|
import AllModules as am
################################################################################################################################################################################################################
################################################################################################################################################################################################################
#########################################These Functions parse the run table and performs function such as record query, record update, record Addition etc ###################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################### Unicode Operations to form CURL commands ###################
def QueryAllow():
QueryFile = open(am.QueryFilePath,"a+")
ScanLines = [line.rstrip('\n') for line in open(am.QueryFilePath)]
QueryNumberList = []
QueryTimeList = []
TimeToWait = -1
if ScanLines:
for entry in ScanLines:
if ScanLines.index(entry) % 2 == 0:
QueryNumberList.append(int(entry))
else:
QueryTimeList.append(entry) #Absolute time
else:
QueryNumberList.append(0)
LastQueryNumber = QueryNumberList[len(QueryNumberList - 1)]
if LastQueryNumber < 5:
AllowQuery = True
QueryFile.write(str(LastQueryNumber + 1) + "\n")
QueryFile.write(str(datetime.now()) + "\n")
QueryFile.close()
elif LastQueryNumber == 5:
TimeSinceFirstQuery = (datetime.now() - datetime.strptime(QueryTimeList[0],"%Y-%m-%d %H:%M:%S.%f")).total_seconds()
if TimeSinceFirstQuery > 60:
AllowQuery = True
os.system("rm %s" % am.QueryFilePath)
QueryFile = open(am.QueryFilePath,"a+")
QueryFile.write(str(1) + "\n")
QueryFile.write(str(datetime.now()) + "\n")
QueryFile.close()
else:
TimeToWait = 65 - TimeSinceFirstQuery
AllowQuery = False
return AllowQuery, TimeToWait
def QueryGreenSignal(Bypass):
while True:
if Bypass == True:
return True
break
IsQueryAllowed, TimeToWait = QueryAllow()
if IsQueryAllowed:
return True
break
else:
time.sleep(TimeToWait)
def DoubleQuotes(string):
return '%%22%s%%22' % string
def Curly(string):
return '%%7B%s%%7D' % string
def EqualToFunc(string1,string2):
return '%s%%3D%s' % (string1,string2)
def ANDFunc(AttributeNameList, AttributeStatusList):
Output = 'AND('
index = 0
for AttributeName in AttributeNameList:
AttributeStatus = AttributeStatusList[index]
Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))
if index > 0: Output = Output + ','
Output = Output + Condition
index = index + 1
Output = Output + ')'
return Output
def ORFunc(AttributeNameList, AttributeStatusList):
Output = 'OR('
index = 0
for AttributeName in AttributeNameList:
AttributeStatus = AttributeStatusList[index]
Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))
if index > 0: Output = Output + ','
Output = Output + Condition
index = index + 1
Output = Output + ')'
return Output
##################### Main Run Table Operaton functions #########################
def ParsingQuery(NumberOfConditions, ConditionAttributeNames, ConditionAttributeStatus, QueryAttributeName, Debug):
Output = []
FieldID = []
FilterByFormula = None
headers = {'Authorization': 'Bearer %s' % am.MyKey, }
for i in range (0, NumberOfConditions):
if i > 0: FilterByFormula = FilterByFormula + ','
FilterByFormula = FilterByFormula + EqualToFunc(Curly(ConditionAttributeNames[i]), DoubleQuotes(ConditionAttributeStatus[i]))
if NumberOfConditions > 1: FilterByFormula = 'AND(' + FilterByFormula + ')'
response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['fields'][QueryAttributeName])
for i in ResponseDict["records"]: FieldID.append(i['id'])
return Output, FieldID
def GetFieldID(ConditionAttributeName, ConditionAttributeStatus, Debug):
Output = []
FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))
headers = {'Authorization': 'Bearer %s' % am.MyKey, }
response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['id'])
return Output
def UpdateAttributeStatus(FieldID, UpdateAttributeName, UpdateAttributeStatus, Debug):
headers = {
'Authorization': 'Bearer %s' % am.MyKey,
'Content-Type': 'application/json',
}
data = '{"fields":{"%s": ["%s"]}}' % (UpdateAttributeName,UpdateAttributeStatus)
response = am.requests.patch(am.CurlBaseCommand + '/' + FieldID, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict
def GetFieldIDOtherTable(TableName,ConditionAttributeName, ConditionAttributeStatus, Debug):
if TableName == 'Sensor' :
CurlBaseCommand = am.CurlBaseCommandSensor
elif TableName == 'Config':
CurlBaseCommand = am.CurlBaseCommandConfig
Output = []
FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))
headers = {'Authorization': 'Bearer %s' % am.MyKey, }
response = am.requests.get(CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['id'])
return Output
def NewRunRecord(RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID, ConfigID, Debug):
headers = {
'Authorization': 'Bearer %s' % am.MyKey,
'Content-Type': 'application/json',
}
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s"], "Tracking": ["%s"], "Conversion": ["%s"],"TimingDAQ": ["%s"],"TimingDAQNoTracks": ["%s"], "Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer, Tracking, Conversion, TimingDAQ, TimingDAQNoTracks, SensorID[0], ConfigID[0])
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict
|
{"/ProcessExec.py": ["/ProcessCMDs.py", "/ParseFunctions.py"], "/ProcessCMDs.py": ["/ParseFunctions.py"]}
|
17,215
|
Alesia099/skill4u
|
refs/heads/master
|
/registration/models.py
|
from django.db import models
from django.core.mail import send_mail
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.base_user import AbstractBaseUser, BaseUserManager
from django.utils.translation import ugettext_lazy as _
from .validators import validate_birthday, FullNameValidator
from phonenumber_field.modelfields import PhoneNumberField
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
full_name_validator = FullNameValidator
email = models.EmailField(_('email'), unique=True)
full_name = models.CharField(_('full_name'), max_length=100, blank=True)
#birthday = models.DateField(_("birthday"), validators=[validate_birthday], blank=True)
region = models.CharField(_("region"), max_length=50)
city = models.CharField(_("city"), max_length=50)
phone = PhoneNumberField(_("phone"))
place_of_study = models.CharField(_("place of study"), max_length=50)
place_of_work = models.CharField(_("place of work"), max_length=50)
education = models.CharField(_("education"), max_length=50)
motivation = models.TextField(blank=True, verbose_name=_("about"))
is_active = models.BooleanField(default=True, verbose_name=_("active"))
is_staff = models.BooleanField(default=False, verbose_name=_("staff"))
is_teacher = models.BooleanField(default=False, verbose_name=_("teacher"))
date_joined = models.DateTimeField(auto_now_add=True, verbose_name=_("date joined"))
avatar = models.ImageField(upload_to='avatars/', default='none/blank.jpg', height_field=None, width_field=None)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def __str__(self):
return self.full_name
def email_user(self, subject, message, from_email=None, **kwargs):
'''
Отправляет электронное письмо этому пользователю.
'''
send_mail(subject, message, from_email, [self.email], **kwargs)
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,216
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/migrations/0002_auto_20190703_2136.py
|
# Generated by Django 2.2.2 on 2019-07-03 21:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('olympiad', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='team',
old_name='invated',
new_name='invited',
),
migrations.AlterField(
model_name='olympiad',
name='team',
field=models.ManyToManyField(to='olympiad.Team', verbose_name='team'),
),
migrations.AlterField(
model_name='task',
name='creater',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='creater'),
),
migrations.AlterField(
model_name='team',
name='capitan',
field=models.ForeignKey(on_delete='capitan', to=settings.AUTH_USER_MODEL),
),
]
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,217
|
Alesia099/skill4u
|
refs/heads/master
|
/registration/trash.py
|
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(
self,
email,
username,
full_name,
birthday,
region,
city,
phone,
password,
**extra_fields
):
email = self.normalize_email(email)
full_name = self.model.normalize_username(full_name)
extra_fields.setdefault("is_staff", False)
extra_fields.setdefault("is_superuser", False)
user = self.model(
email=email,
full_name=full_name,
birthday=birthday,
region=region,
city=city,
phone=phone,
**extra_fields
)
user.set_password(password)
user.save()
return user
def create_superuser(
self,
email,
password,
**extra_fields
):
extra_fields.setdefault("is_staff", True)
extra_fields.setdefault("is_superuser", True)
if extra_fields.get("is_staff") is not True:
raise ValueError("Superuser must have is_staff=True.")
if extra_fields.get("is_superuser") is not True:
raise ValueError("Superuser must have is_superuser=True.")
return self._create_user(
email,
password,
**extra_fields
)
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,218
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/migrations/0001_initial.py
|
# Generated by Django 2.2.2 on 2019-06-29 18:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('max_participations', models.IntegerField(verbose_name='max participations')),
('capitan', models.ForeignKey(on_delete='capitam', to=settings.AUTH_USER_MODEL)),
('invated', models.ManyToManyField(related_name='invated', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('task', models.TextField()),
('input_data', models.CharField(max_length=250, verbose_name='input data')),
('answer', models.CharField(max_length=250, verbose_name='answer')),
('creater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='crater')),
],
),
migrations.CreateModel(
name='Olympiad',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='')),
('duration', models.IntegerField(verbose_name='')),
('start_olympiad', models.DateTimeField(verbose_name='date of the start olympiad')),
('end_olympiad', models.DateTimeField(verbose_name='date of the end olympiad')),
('participation_count', models.IntegerField(verbose_name='participation count')),
('max_teams', models.IntegerField(verbose_name='max teams')),
('creater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('task', models.ManyToManyField(to='olympiad.Task')),
('team', models.ManyToManyField(to='olympiad.Team', verbose_name='teams')),
],
),
]
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,219
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/views.py
|
from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from rest_framework.authentication import TokenAuthentication
from .serializers import (TaskSerializer, TeamSerializer, OlympiadSerializer)
from .models import (Task, Team, Olympiad)
from registration.models import User
class TaskAPI(APIView):
"""Olympiad task"""
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
def get(self, request):
task = Task.objects.all()
serializer = TaskSerializer(task, many=True)
return Response(serializer.data)
def post(self, request):
task = request.data.get("task")
input_data = request.data.get("input_data")
answer = request.data.get("answer")
if not task or not input_data or not answer:
return Response({'error': 'Enter all fields.'},
status=400)
else:
Task.objects.create(creater=request.user, task=task, input_data=input_data, answer=answer)
return Response(status=201)
class TeamAPI(APIView):
"""Return teams list"""
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
def get(self, request):
team = Team.objects.all()
serializer = TeamSerializer(team, many=True)
return Response(serializer.data)
def post(self, request):
olympiad_id = request.data.get("olympiad_id")
invited = request.data.get("invited")
max_participations = request.data.get("max_participations")
olympiad = Olympiad.objects.get(id=olympiad_id)
if not olympiad_id:
return Response({'error': 'Olympiad is absent.'},
status=400)
else:
Team.objects.create(capitan=request.user, max_participations=max_participations)
team = Team.objects.get(capitan=request.user)
olympiad.team.add(team)
return Response(status=201)
class InviteToTeam(APIView):
""""Invite user to Team"""
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
def get(self, request):
user = request.user
team = Team.objects.filter(capitan=user)
serializer = TeamSerializer(team, many=True)
return Response(serializer.data)
def post(self, request):
team_id = request.data.get("team_id")
invited_id = request.data.get("invited_id")
if not team_id or not invited_id:
return Response({'error': 'Enter all fields.'},
status=400)
else:
team = Team.objects.get(id=team_id)
user = User.objects.get(id=invited_id)
if not team or not user:
return Response(status=400)
else:
team.invited.add(user)
return Response(status=201)
class OlympiadAPI(APIView):
"""Return Olympiad list"""
permission_classes = (permissions.IsAuthenticated,)
authentication_classes = (TokenAuthentication,)
def get(self, request):
olympiad = Olympiad.objects.all()
serializer = OlympiadSerializer(olympiad, many=True)
return Response(serializer.data)
def post(self, request):
name = request.data.get("name")
creater = request.user
duration = request.data.get("duration")
start_olympiad = request.data.get("start_olympiad")
end_olympiad = request.data.get("end_olympiad")
participation_count = request.data.get("participation_count")
max_teams = request.data.get("max_teams")
if not name or not creater or not duration or not start_olympiad or not end_olympiad or not participation_count or not max_teams:
return Response({"error":"Enter all fields."})
else:
Olympiad.objects.create(name=name,
creater=creater,
duration=duration,
start_olympiad=start_olympiad,
end_olympiad=end_olympiad,
participation_count=participation_count,
max_teams=max_teams
)
return Response(status=201)
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,220
|
Alesia099/skill4u
|
refs/heads/master
|
/registration/serializer.py
|
from rest_framework import serializers
from .models import User
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(write_only=True, style={'input_type': 'password'})
password_confirm = serializers.CharField(write_only=True, style={'input_type': 'password'})
class Meta:
model = User
fields = ('email', 'full_name', 'password', 'is_teacher', 'password_confirm', 'avatar')
def validate(self, attrs):
data = super(UserSerializer, self).validate(attrs)
if data['password'] != data['password_confirm']:
raise serializers.ValidationError('Password mismatch')
del data['password_confirm']
return data
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,221
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/models.py
|
from django.db import models
from registration.models import User
from django.utils.translation import ugettext_lazy as _
class Task(models.Model):
""""Olympiad task"""
task = models.TextField(blank=False)
creater = models.ForeignKey(User, verbose_name=_("creater"), on_delete=models.CASCADE)
input_data = models.CharField(_("input data"), blank=False, max_length=250)
answer = models.CharField(_("answer"), blank=False, max_length=250)
objects = models.Manager()
class Team(models.Model):
"""Teams for Olympiad"""
capitan = models.ForeignKey(User, _("capitan"),)
invited = models.ManyToManyField(User, _("invated"),)
max_participations = models.IntegerField(_("max participations"), blank=False)
def __str__(self):
return '%s %s' % (self.capitan, self.invited)
class Olympiad(models.Model):
name = models.CharField(_(""), max_length=50)
creater = models.ForeignKey(User, on_delete=models.CASCADE)
task = models.ManyToManyField(Task)
duration = models.IntegerField(_("")) # in minutes
start_olympiad = models.DateTimeField(_("date of the start olympiad"),
auto_now=False,
auto_now_add=False
)
end_olympiad = models.DateTimeField(_("date of the end olympiad"),
auto_now=False,
auto_now_add=False
)
participation_count = models.IntegerField(_("participation count"))
team = models.ManyToManyField(Team, verbose_name=_("team"),)
max_teams = models.IntegerField(_("max teams"))
def __str__(self):
return self.name
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,222
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/apps.py
|
from django.apps import AppConfig
class OlympiadConfig(AppConfig):
name = 'olympiad'
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,223
|
Alesia099/skill4u
|
refs/heads/master
|
/registration/views.py
|
from .models import User
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny
from registration.serializer import UserSerializer
from django.contrib.auth import authenticate
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
@csrf_exempt
@api_view(["POST"])
@permission_classes((AllowAny,))
def login(request):
email = request.data.get("email")
password = request.data.get("password")
if email is None or password is None:
return Response({'error': 'Please provide both email and password'},
status=HTTP_400_BAD_REQUEST)
try:
user = User.objects.get(email=email, password=password)
except User.DoesNotExist:
return Response({'error': 'Invalid Credentials'},
status=HTTP_404_NOT_FOUND)
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': token.key},
status=HTTP_200_OK)
@api_view(['GET', 'POST'])
@permission_classes((AllowAny,))
def create(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'GET':
user = User.objects.all()
serializer = UserSerializer(user, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = UserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors)
@api_view(['GET'])
@authentication_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
def example_view(request, format=None):
return Response('Hello')
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,224
|
Alesia099/skill4u
|
refs/heads/master
|
/registration/migrations/0002_auto_20190628_1837.py
|
# Generated by Django 2.2.2 on 2019-06-28 18:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='full_name',
field=models.CharField(blank=True, max_length=100, verbose_name='full_name'),
),
]
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,225
|
Alesia099/skill4u
|
refs/heads/master
|
/olympiad/serializers.py
|
from rest_framework import serializers
from .models import Task, Team, Olympiad
from registration.serializer import UserSerializer
class TaskSerializer(serializers.ModelSerializer):
class Meta:
model = Task
fields = ('task', 'input_data')
class TeamSerializer(serializers.ModelSerializer):
capitan = UserSerializer()
invited = UserSerializer(many=True)
class Meta:
model = Team
fields = ('id', 'capitan', 'invited', 'max_participations')
class OlympiadSerializer(serializers.ModelSerializer):
team = TeamSerializer(many=True)
class Meta:
model = Olympiad
fields = ('id', 'team', 'name', 'participation_count')
|
{"/olympiad/views.py": ["/olympiad/serializers.py", "/olympiad/models.py", "/registration/models.py"], "/registration/serializer.py": ["/registration/models.py"], "/olympiad/models.py": ["/registration/models.py"], "/registration/views.py": ["/registration/models.py", "/registration/serializer.py"], "/olympiad/serializers.py": ["/olympiad/models.py", "/registration/serializer.py"]}
|
17,226
|
myoung859/Tank_Attack_580.200
|
refs/heads/master
|
/TankAttack.py
|
import pygame
import random as rd
from helpers import Tank
from helpers import Shell
import helpers as TA
#Initial parameter setup
filer=open('options.csv', 'r',newline = '')
x_dim = int(filer.readline())
y_dim = int(filer.readline())
gravity = float(filer.readline())
drag = float(filer.readline())
wind_max = float(filer.readline())
filer.close()
pygame.init()
pygame.display.set_caption("Tank Attack")
print("Welcome to Tank Attack!")
def show(p1, p2, screen):
#Sets up tanks on screen
screen.fill([0,0,156])
Font = pygame.font.SysFont(None, 14)
pygame.draw.rect(screen, [0,56,0],(0,y_dim-50,x_dim,y_dim),0)
screen.blit(p1.showtank(), (p1.position(),y_dim-85))
text = Font.render('P1', True, (255, 0, 0), None)
screen.blit(text, (p1.position()+15,y_dim-50))
text2 = Font.render('P2', True, (0, 255, 0), None)
screen.blit(p2.showtank(), (p2.position(),y_dim-85))
screen.blit(text2, (p2.position()+15,y_dim-50))
return
#Repeatedly prompts the user until they type 'o' or 'p'
while(True):
start = input("To begin, type (P)lay. To change parameters type (O)ptions.")
#if options, redo the parameters
if start[0].lower() == 'o':
TA.options_prompt('options.csv',x_dim,y_dim,gravity,drag, wind_max)
filer=open('options.csv', 'r',newline = '')
x_dim = int(filer.readline())
y_dim = int(filer.readline())
gravity = float(filer.readline())
drag = float(filer.readline())
wind_max = float(filer.readline())
filer.close()
if start[0].lower() == 'p':
field = [int(x_dim) , int(y_dim)]
ip1 = rd.randint(50,int(x_dim) - 50)
ip2 = rd.randint(50,int(x_dim) - 50)
#Adds in the players
p1 = Tank(ip1, x_dim, y_dim, 1, 'p1tank.png')
p2 = Tank(ip2, x_dim, y_dim, 2, 'p2tank.png')
pygame.init()
b=rd.random()
windy=b*wind_max
p = 1
screen = pygame.display.set_mode(field)
show(p1,p2, screen)
pygame.display.flip()
col = False
a=rd.random()
b=rd.random()
windy=b*wind_max
if a<0.5:
v_wind=windy
print('The wind is blowing %.2f mph to the right.'%windy)
else:
v_wind=windy*-1
print('The wind is blowing %.2f mph to the left.'%windy)
while col == False:
#Checks for window closing, then updates display
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.display.quit()
pygame.quit()
break
screen = pygame.display.set_mode(field)
show(p1,p2, screen)
pygame.display.flip()
#Prompts the user until they select a mode
opt = 'IFYOUREADTHISGIVEUSANA'
while (not (opt[0].lower() in ['f','m','q'])):
print("---Player " + str(p) +"---")
print("If you want to fire a shell from your tank, input (F)ire.")
print("If you want to move your tank up to 50 meters, input (M)ove.")
opt = str(input())
#Sets up shell spawning
if (opt[0].lower() == 'f'):
v_0 = float(input("Input the initial velocity: "))
angle = float(input("Input the angle of your shot (degrees): "))
pygame.display.flip()
#Fires shell, then checks after each iteration fot outofbounds/hit
if p == 1:
shot = Shell(v_0, angle, p1)
while shot.y_pos < 450 and shot.x_pos > 0 and shot.y_pos > -1*(y_dim-50) and shot.x_pos < shot.Tank.x_max and col==False:
shot.Fire(drag, v_wind, gravity, 0.05)
yposition = shot.y_pos
if shot.y_pos < 0:
yposition = shot.y_pos*-1
screen = pygame.display.set_mode(field)
show(p1,p2, screen)
fire = pygame.draw.rect(screen,shot.color,[shot.x_pos,yposition,10,10],0)
col = pygame.Rect.colliderect(fire, p2.rect)
if col == True:
screen.blit(pygame.image.load('dead.png'), (p2.position(),y_dim-85))
pygame.display.flip()
elif p == 2: #...and does the same if its player 2's turn
shot = Shell(v_0, angle, p2)
col = False
while shot.y_pos < 450 and shot.x_pos > 0 and shot.y_pos > -1*(y_dim-50) and shot.x_pos < shot.Tank.x_max and col==False:
shot.Fire(drag, v_wind, gravity, 0.05)
yposition = shot.y_pos
if shot.y_pos < 0:
yposition = shot.y_pos*-1
screen = pygame.display.set_mode(field)
show(p1,p2, screen)
fire = pygame.draw.rect(screen,shot.color,[shot.x_pos,yposition,10,10],0)
col = pygame.Rect.colliderect(fire, p1.rect)
if col == True:
screen.blit(pygame.image.load('dead.png'), (p1.position(),y_dim-85))
pygame.display.flip()
if col == True:
print("Congratulations, Player " + str(p) +".")
print("You totally annihilated the other player.")
print("I hope you're happy with yourself.")
break
elif (opt[0].lower() == 'm'):
if p == 1:
p1.move() #defined in helpers.py
elif p == 2:
p2.move()
screen = pygame.display.set_mode(field)
show(p1,p2, screen)
pygame.display.flip()
#Switches player and recalculates wind
if p == 1:
p = 2
elif p == 2:
p = 1
a=rd.random()
b=rd.random()
windy=b*wind_max
if a<0.5:
v_wind=windy
print('The wind is blowing %.2f mph to the right.'%windy)
else:
v_wind=windy*-1
print('The wind is blowing %.2f mph to the left.'%windy)
|
{"/TankAttack.py": ["/helpers.py"]}
|
17,227
|
myoung859/Tank_Attack_580.200
|
refs/heads/master
|
/helpers.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 13 17:35:38 2018
@author: Mike
"""
import pygame
from math import radians,sin,cos
import csv
import random
def options_prompt(filename, x_dim, y_dim, gravity, drag,wind_max):
filew = open(filename, 'w',newline = '')
output = csv.writer(filew)
output.writerow([int(input("Please input the horizontal window size (Current value is "+ str(x_dim) +"): "))])
output.writerow([int(input("Please input the vertical window size (Current value is "+ str(y_dim) +"): "))])
output.writerow([float(input("Please input the gravity strength (Current value is "+ str(gravity) +"): "))])
output.writerow([float(input("Please input the drag constant (Current value is "+ str(drag) +"): "))])
output.writerow([float(input("Please input the maximum wind speed (Current value is "+ str(wind_max) +"): "))])
class Tank(pygame.sprite.Sprite):
def __init__(self, pos_x, x_dim, y_dim, player, img):
super().__init__()
self.image = pygame.image.load(img)
self.rect = self.image.get_rect()
self.ymax = y_dim
self.positx= pos_x
self.rect.center = (self.positx + 15, y_dim-63) #bottom of tank is on ground
self.posx = pos_x+15
if player == 1:
self.color = [255,0,0]
elif player == 2:
self.color = [0,255,0]
self.posy = y_dim-63
self.player = player
self.x_max = x_dim
def move(self):
dist = 516
while (dist > 50 or dist < -50):
dist = int(input("Please enter the distance (positive-RIGHT or negative-LEFT) to move, up to 50 meters: "))
self.positx = self.positx + int(2.5*dist) #Inspired by https://bit.ly/2KkNOp8
if (self.positx <= 20):
self.positx = 0
print("You can't get out of this one.")
if (self.positx >= self.x_max - 20):
self.positx = self.x_max
print("You can't get out of this one.")
return self.positx
def showtank(self):
pic = self.image
return pic
def position(self):
return self.positx
def color(self):
return self.color
def fire(self):
None
class Shell(pygame.sprite.Sprite):
def __init__(self, v_0, angle, Tank):
super().__init__()
self.image = pygame.image.load('bullet.png')
self.color = [255,0,255]
self.rect = self.image.get_rect()
self.Tank = Tank
self.rect.center = (self.Tank.rect.centerx, self.Tank.rect.centery - 6)
self.player = getattr(self.Tank, 'player')
self.v_x = cos(radians(angle)) * v_0
self.v_y = sin(radians(angle)) * v_0
self.mass = 10
self.x_pos=self.Tank.posx
self.y_pos=self.Tank.posy
def Fire(self,drag,v_wind, gravity,dt):
#Calculates real-time change in velocity, then moves the shell that much
self.v_x = self.v_x - ((drag*(self.v_x + v_wind)/self.mass)*dt)
self.v_y = self.v_y - ((drag*(self.v_y)/self.mass)*dt) - (gravity * dt)
self.x_pos=self.x_pos+dt*self.v_x
self.y_pos=self.y_pos-dt*self.v_y
|
{"/TankAttack.py": ["/helpers.py"]}
|
17,231
|
hybby/sreport
|
refs/heads/main
|
/tests/test_urls.py
|
"""
Unit tests for the sreport.py utility relating to URL processing
"""
import pytest
import requests
from sreport import validate_url, process_url
def test_valid_url():
"""
Tests whether the url validator correctly identifies valid URLs
"""
url = "https://www.google.com"
assert validate_url(url) is True
def test_invalid_url():
"""
Tests whether the url validator correctly identifies invalid URLs
"""
url = "bad://address"
assert validate_url(url) is False
def test_missing_url():
"""
Tests whether the url validator throws an exception when no URL is provided
"""
with pytest.raises(ValueError, match="No url provided"):
validate_url("")
def test_invalid_url_output():
"""
Tests whether we output the correct format of message for an invalid URL
"""
url = "bad://address"
expected_output = {
"Url": url,
"Error": "invalid url",
}
assert process_url(url) == expected_output
def test_ssl_error_output(requests_mock):
"""
Tests whether we output the correct format of message for a URL which
returns an SSL error. For example, if the site has a bad certificate
"""
url = "https://badcert.com"
requests_mock.get(
url,
exc=requests.exceptions.SSLError
)
expected_output = {
"Url": url,
"Error": "ssl error",
}
assert process_url(url) == expected_output
def test_connection_error_output(requests_mock):
"""
Tests whether we output the correct format of message for a URL which
refuses our connection. For example, if the DNS lookup fails.
"""
url = "http://not.exists.bbc.co.uk"
requests_mock.get(
url,
exc=requests.exceptions.ConnectionError
)
expected_output = {
"Url": url,
"Error": "connection error",
}
assert process_url(url) == expected_output
def test_connection_timeout_output(requests_mock):
"""
Tests whether we output the correct format of message for a URL which
takes longer than our timeout value to return a response.
"""
url = "http://slowsite.com"
requests_mock.get(
url,
exc=requests.exceptions.Timeout
)
expected_output = {
"Url": url,
"Error": "timed out",
}
assert process_url(url) == expected_output
def test_too_many_redirects_output(requests_mock):
"""
Tests whether we output the correct format of message for a URL which
refuses our connection. For example, if the DNS lookup fails.
"""
url = "http://here.there.everywhere.com"
requests_mock.get(
url,
exc=requests.exceptions.TooManyRedirects
)
expected_output = {
"Url": url,
"Error": "too many redirects",
}
assert process_url(url) == expected_output
def test_40x_50x_output(requests_mock):
"""
Tests whether we output correctly for common HTTP 40x and 50x responses
We expect the URL, status code and datetime of the response to be returned
"""
url = "http://not.exists.bbc.co.uk/"
codes = [
400, # bad request
401, # unauthorized
403, # forbidden
404, # not found
500, # internal server error
502, # bad gateway
503, # service unavailable
504 # gateway timeout
]
for code in codes:
requests_mock.get(
url,
status_code=code,
headers={'Date': 'Sat, 03 Oct 2020 17:32:59 GMT'}
)
expected_output = {
"Url": url,
"Status_code": code,
"Date": "Sat, 03 Oct 2020 17:32:59 GMT"
}
assert process_url(url) == expected_output
def test_200_output(requests_mock):
"""
Tests whether we output the correct message for HTTP 200 responses
We expect the URL, status code, datetime of the response and the content
length of the response to be returned.
As we always follow redirects, we won't test HTTP 301 responses.
"""
url = "http://www.example.com"
requests_mock.get(
url,
status_code=200,
headers={
'Date': 'Sat, 03 Oct 2020 17:32:59 GMT',
'Content-Length': '12345'
}
)
expected_output = {
"Url": url,
"Status_code": 200,
"Content_length": '12345',
"Date": "Sat, 03 Oct 2020 17:32:59 GMT"
}
assert process_url(url) == expected_output
|
{"/tests/test_urls.py": ["/sreport.py"], "/tests/test_io.py": ["/sreport.py"], "/tests/test_summary.py": ["/sreport.py"]}
|
17,232
|
hybby/sreport
|
refs/heads/main
|
/sreport.py
|
#!/usr/bin/env python3
"""
A utility to make HTTP(S) requests to specified URLs and report on the results
"""
import sys
import json
import requests
from validator_collection import checkers
USAGE = "Usage: ./sreport.py < urls.txt"
def parse_input(input_):
"""
Given an input string, return a list of strings split by newline character
"""
if len(input_) <= 0:
raise ValueError("No input provided")
return input_.splitlines()
def validate_url(url):
"""
Determine if a given URL is valid. Return True if so, False if not
"""
if not url:
raise ValueError("No url provided")
return checkers.is_url(url)
def process_url(url, timeout_secs=10):
"""
Given a URL, attempt to make a request to it and return information that
we're interested in, such as date/time of response, status code and length
"""
output = {}
output['Url'] = url
if not validate_url(url):
output['Error'] = 'invalid url'
return output
# attempt a request and deal with common exceptions we may encounter and
# wish to report upon. erroring out on other exceptions seems reasonable
# https://requests.readthedocs.io/en/master/_modules/requests/exceptions
try:
response = requests.get( # pylint: disable=unused-variable
url,
allow_redirects=True,
timeout=timeout_secs
)
except requests.exceptions.SSLError:
output['Error'] = "ssl error"
return output
except requests.exceptions.TooManyRedirects:
output['Error'] = "too many redirects"
return output
except requests.exceptions.ConnectionError:
# catches dns failures and refused connections
output['Error'] = "connection error"
return output
except requests.exceptions.Timeout:
# catches connection timeouts and read timeouts
output['Error'] = "timed out"
return output
# build our output message, adding attributes if they're available
if response.status_code:
output['Status_code'] = response.status_code
if 'Content-Length' in response.headers:
output['Content_length'] = response.headers['Content-Length']
if 'Date' in response.headers:
output['Date'] = response.headers['Date']
return output
def generate_summary(summary):
"""
Given a dictionary of status codes and occurrances, generate a report
object (array of objects) that summarises a count of overall responses
along with a breakdown of counts of different response codes.
"""
if not isinstance(summary, dict):
raise TypeError("input must be dict")
overall_responses = 0
output = []
for status_code, quantity in summary.items():
if not isinstance(status_code, int):
raise ValueError("bad input; response codes must be integers")
if not isinstance(quantity, int):
raise ValueError("bad input; response counts must be integers")
overall_responses = overall_responses + quantity
output.append({
'Status_code': status_code,
'Number_of_responses': quantity
})
output.append({
'Number_of_responses': overall_responses
})
return output
def output_json(output):
"""
Given a dict or a list, output it to stdout as a JSON document
"""
if not isinstance(output, (dict, list)):
raise TypeError("input must be dict or list")
print(json.dumps(output, indent=4))
if __name__ == "__main__":
# requirement: program is run from command line and takes input from stdin
if sys.stdin.isatty():
raise ValueError(
"This program only accepts input via stdin\n{}".format(USAGE)
)
with sys.stdin as stdin:
lines = parse_input(stdin.read())
stats = {}
for line in lines:
result = process_url(line)
output_json(result)
# if we recieved a successful response, increment our stats counter
# presence of a 'Status_code' attribute means a valid response
if 'Status_code' in result:
if result['Status_code'] in stats:
stats[result['Status_code']] = stats[result['Status_code']] + 1
else:
stats[result['Status_code']] = 1
# build our summary document
report = generate_summary(stats)
output_json(report)
|
{"/tests/test_urls.py": ["/sreport.py"], "/tests/test_io.py": ["/sreport.py"], "/tests/test_summary.py": ["/sreport.py"]}
|
17,233
|
hybby/sreport
|
refs/heads/main
|
/tests/test_io.py
|
"""
Unit tests for the sreport.py utility relating to input/output operations
"""
import pytest
from sreport import parse_input, output_json
def test_split_newlines_input():
"""
Tests that newline separated input is split into a list of strings
"""
sample_input = "foo\nbar"
assert parse_input(sample_input) == ['foo', 'bar']
def test_no_newlines_input():
"""
Tests that input with no newlines becomes a one item list
"""
sample_input = "foobar"
assert parse_input(sample_input) == ['foobar']
def test_no_input():
"""
Tests that an exception is thrown when no input is provided to script
"""
with pytest.raises(ValueError, match="No input provided"):
parse_input("")
def test_json_output(capsys):
"""
Tests that outputting an object in JSON is done in the manner we expect
"""
sample_object = {
"foo": "bar"
}
output_json(sample_object)
captured = capsys.readouterr()
assert captured.out == '{\n "foo": "bar"\n}\n'
def test_invalid_json_output():
"""
Tests that we raise TypeError if an object doesn't seem 'json-able'
"""
sample_object = ""
with pytest.raises(TypeError, match="input must be dict or list"):
output_json(sample_object)
|
{"/tests/test_urls.py": ["/sreport.py"], "/tests/test_io.py": ["/sreport.py"], "/tests/test_summary.py": ["/sreport.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.