index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
37,049
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/tests/test_example.py
|
import os
from django.core import management
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.test.client import Client, RequestFactory
from django.utils import timezone
from django.db.models.fields import BigIntegerField, BooleanField, CharField, \
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField, \
EmailField, FilePathField, FloatField, IPAddressField, IntegerField, \
NullBooleanField, PositiveIntegerField, PositiveSmallIntegerField, \
SlugField, SmallIntegerField, TextField, AutoField
from django.db.models.fields.files import ImageField
from django.db.models.fields.related import OneToOneField
from jmbo.models import ModelBase, Image, ModelBaseImage
RES_DIR = os.path.join(os.path.dirname(__file__), "res")
IMAGE_PATH = os.path.join(RES_DIR, "image.jpg")
def set_image(obj):
image = Image.objects.create(title="Title")
image.image.save(
os.path.basename(IMAGE_PATH),
ContentFile(open(IMAGE_PATH, "rb").read())
)
mbi = ModelBaseImage.objects.create(modelbase=obj, image=image)
class TestExample(TestCase):
@classmethod
def setUpClass(cls):
super(TestExample, cls).setUpClass()
cls.request = RequestFactory()
cls.client = Client()
# Post-syncdb steps
management.call_command("load_photosizes", interactive=False)
@classmethod
def setUpTestData(cls):
super(TestExample, cls).setUpTestData()
# Editor
cls.editor = get_user_model().objects.create(
username="editor",
email="editor@test.com"
)
cls.editor.set_password("password")
cls.editor.save()
def test_common_urls(self):
"""High-level test to confirm common set of URLs render"""
# todo: restore
#urls = [
# (reverse("auth:login"), 200),
# (reverse("auth:logout"), 302),
# (reverse("auth:password_reset"), 200),
#]
urls = [
(reverse("mote:home"), 200),
("/api/v1/", 200),
]
for url, code in urls:
print "Checking path %s" % url
response = self.client.get(url)
self.assertEqual(response.status_code, code)
def test_detail_pages(self):
"""Create an instance of each Jmbo content type and render detail
page"""
modelbase_fieldnames = [f.name for f in ModelBase._meta.fields]
for ct in ContentType.objects.all():
model_class = ct.model_class()
if (model_class is not None) \
and issubclass(model_class, ModelBase):
di = dict(
title=model_class.__name__,
description="Description",
state="published",
owner=self.editor,
)
# Set not null fields if possible
skip = False
for field in model_class._meta.fields:
if field.name in modelbase_fieldnames:
continue
if field.name in di:
continue
if not field.null:
if isinstance(field, (IntegerField, SmallIntegerField,
BigIntegerField, PositiveIntegerField,
PositiveSmallIntegerField)):
di[field.name] = 1
elif isinstance(field, (CharField, TextField)):
di[field.name] = "a"
elif isinstance(field, FloatField):
di[field.name] = 1.0
elif isinstance(field, DateField):
di[field.name] = timezone.now().date()
elif isinstance(field, DateTimeField):
di[field.name] = timezone.now()
elif isinstance(field, (BooleanField, NullBooleanField)):
di[field.name] = True
elif isinstance(field, (AutoField, ImageField,
OneToOneField)):
pass
else:
skip = True
break
# Skip if issues expected
if skip:
continue
# Save. Continue on error. We did our best.
try:
obj = model_class.objects.create(**di)
except TypeError:
continue
obj.sites = [1]
set_image(obj)
obj.save()
# Test
print "Checking %s detail page %s" \
% (model_class.__name__, obj.get_absolute_url())
response = self.client.get(obj.get_absolute_url())
self.assertEqual(response.status_code, 200)
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,050
|
praekelt/mobius-skeleton
|
refs/heads/develop
|
/skeleton/urls.py
|
from django.conf.urls import url, include
from django.views.generic.base import TemplateView
urlpatterns = [
url(
r"^$",
TemplateView.as_view(template_name="skeleton/home.html"),
name="home"
)
]
|
{"/skeleton/channels/routing.py": ["/skeleton/channels/consumers.py"], "/project/settings.py": ["/project/settings_local.py"], "/skeleton/admin.py": ["/skeleton/models.py"]}
|
37,057
|
MingzXIE/FinalProject
|
refs/heads/master
|
/navie_bayesian.py
|
from numpy import *
# Create the vocabulary list
def create_vocabulary_list(dataset):
vocabulary_set = set([])
for doc in dataset:
vocabulary_set = vocabulary_set | set(doc)
return list(vocabulary_set)
# convert words to vector
def words_to_vector(vocabulary_list, input_set):
vector_created = [0] * len(vocabulary_list)
for word in input_set:
if word in vocabulary_list:
vector_created[vocabulary_list.index(word)] = 1
else:
print('the word: %s is not in my vocabulary! ' % 'word')
return vector_created
def train_nb(matrix_to_train, labels):
number_of_docs_to_train = len(matrix_to_train)
number_of_words = len(matrix_to_train[0])
abusive_probability = sum(labels) / float(number_of_docs_to_train)
frequency_0 = ones(number_of_words)
frequency_1 = ones(number_of_words)
probability_0_denominator = 2.0
probability_1_denominator = 2.0
for i in range(number_of_docs_to_train):
if labels[i] == 1:
frequency_1 += matrix_to_train[i]
probability_1_denominator += sum(matrix_to_train[i])
else:
frequency_0 += matrix_to_train[i]
probability_0_denominator += sum(matrix_to_train[i])
probability_1_vector = log(frequency_1 / probability_1_denominator)
probability_0_vector = log(frequency_0 / probability_0_denominator)
return probability_0_vector, probability_1_vector, abusive_probability
def nb_classify(vector_to_classify, frequency_0_vector, frequency_1_vector, probability_class_1):
probability_1 = sum(vector_to_classify * frequency_1_vector) + log(probability_class_1)
probability_0 = sum(vector_to_classify * frequency_0_vector) + log(1.0 - probability_class_1)
if probability_0 > probability_1:
return 0
else:
return 1
def bag_of_words_model(vocabulary_list, input_set):
vector_created = [0] * len(vocabulary_list)
for word in input_set:
if word in vocabulary_list:
vector_created[vocabulary_list.index(word)] += 1
else:
print('the word: %s is not in my vocabulary! ' % 'word')
return vector_created
# create the word list
def text_parse(big_string):
import re
list_of_tokens = re.split(r'\W*', big_string)
return [tok.lower() for tok in list_of_tokens if len(tok) > 2]
def spam_test(number_of_files, spam_folder_name, ham_folder_name, testing_rate):
doc_list = []
label_list = []
full_text = []
for i in range(1,26,1):
word_list = text_parse(open('/Users/Major/Documents/AI/machinelearninginaction/Ch04/email/spam/%d.txt' %i).read())
doc_list.append(word_list)
full_text.extend(word_list)
# label the spam 1
label_list.append(1)
word_list = text_parse(open('/Users/Major/Documents/AI/machinelearninginaction/Ch04/email/spam/%d.txt' %i).read())
doc_list.append(word_list)
full_text.extend(word_list)
# label the ham 0
label_list.append(0)
vocabulary_list = create_vocabulary_list(doc_list) # create vocabulary list
# select testing set randomly, others for training
training_set = range(50)
test_set = []
number_of_testing = number_of_files * testing_rate
for i in range(number_of_testing):
rand_index = int(random.uniform(0, len(training_set)))
test_set.append(training_set[rand_index])
del (training_set[rand_index])
# create training set
train_matrix = []
train_labels = []
for doc_index in training_set:
train_matrix.append(bag_of_words_model(vocabulary_list, doc_list[doc_index]))
train_labels.append(label_list[doc_index])
p0V, p1V, pSpam = train_nb(array(train_matrix), array(train_labels))
# test and calculate error rate
error_count = 0
for doc_index in test_set:
wordVector = bag_of_words_model(vocabulary_list, doc_list[doc_index])
if nb_classify(array(train_matrix), p0V, p1V, pSpam) != label_list[doc_index]:
error_count += 1
print("classification error", doc_list[doc_index])
print('the error rate is: ', float(error_count) / len(test_set))
spam_test(25, 'nbtesting/spam/', 'nbtesting/ham/', 0.1)
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,058
|
MingzXIE/FinalProject
|
refs/heads/master
|
/algorithm_integration.py
|
from kNN import *
from decision_tree import *
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,059
|
MingzXIE/FinalProject
|
refs/heads/master
|
/logistic_regression.py
|
from numpy import *
def sigmoid(inx):
return 1.0/(1+exp(-inx))
def grad_ascent(data_mat_in,class_labels):# 100,3 matrix
data_matrix=mat(data_mat_in) # change to numpy matrix ,different features for col &sample for row
label_mat=mat(class_labels).transpose()
m,n=shape(data_matrix)
# parameter for train
alpha=0.001 # step length
max_cycles=500 # iteration num
weights=ones((n,1))
for k in range(max_cycles):
h=sigmoid(data_matrix * weights)
error = (label_mat-h) # calculate the difference between real type and predict type
weights = weights + alpha * data_matrix.transpose()*error
return weights # return the best parameter
def stoc_grad_ascent1(data_matrix, class_labels , num_iter=150):
m,n = shape(data_matrix)
weights = ones(n)
for j in range(num_iter):
data_index = list(range(m))
for i in range(m):
alpha = 4/(1.0+j+i)+0.01 # alpha will descent as iteration rise, but will not be 0
rand_index = int(random.uniform(0, len(data_index)))
h=sigmoid(sum(data_matrix[rand_index]*weights))
error=class_labels[rand_index]-h
weights=weights + alpha*error * data_matrix[rand_index]
del(data_index[rand_index])
return weights
def classifyVector(in_x, weights):
prob = sigmoid(sum(in_x * weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def de_predict(file1, file2):
fr_train=open(file1)
fr_test=open(file2)
training_set=[]
training_labels=[]
for line in fr_train.readlines():
curr_line=line.strip().split('\t')
line_Arr=[]
for i in range(21):
line_Arr.append(float(curr_line[i]))
training_set.append(line_Arr)
training_labels.append(line_Arr)
train_weights=stoc_grad_ascent1(array(training_set),training_labels,500)
error_count=0; num_test_vec=0.0
for line in fr_test.readlines():
num_test_vec+=1.0
curr_line=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(curr_line[i]))
if int(classifyVector(array(lineArr),train_weights))!=int(curr_line[21]):
error_count+=1
error_rate=(float(error_count)/num_test_vec)
print("the error rate of this test is:%f" % error_rate)
return error_rate
#
def multi_test(file1,file2):
num_tests = 10;
error_sum = 0.0
for k in range(num_tests):
error_sum += de_predict(file1,file2)
print
"after %d iterations the average error rate is: %f" % (num_tests, error_sum / float(num_tests))
de_predict('logisticregtraining.txt', 'logisticregtesting.txt')
multi_test('logisticregtraining.txt', 'logisticregtesting.txt')
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,060
|
MingzXIE/FinalProject
|
refs/heads/master
|
/decision_tree.py
|
from math import log
import operator
def calculate_shannon_entropy(dataset):
number_of_entries = len(dataset)
label_counts = {}
for feature_vector in dataset:
current_label = feature_vector[-1]
if current_label not in label_counts.keys():
label_counts[current_label] = 0
label_counts[current_label] += 1
shannon_entropy = 0.0
for key in label_counts:
probability = float(label_counts[key]) / number_of_entries
shannon_entropy -= probability * log(probability, 2)
return shannon_entropy
def split_dataset(dataset, axis, value):
new_list = []
for feature_vector in dataset:
if feature_vector[axis] == value:
reduced_feature = feature_vector[:axis]
reduced_feature.extend(feature_vector[axis + 1:])
new_list.append(reduced_feature)
return new_list
def choose_best_feature_to_split(dataset):
number_of_features = len(dataset[0]) - 1
base_entropy = calculate_shannon_entropy(dataset)
best_information_gained = 0.0
best_feature_to_split = -1
for i in range(number_of_features):
feature_list = [example[i] for example in dataset]
unique_vals = set(feature_list)
new_entropy = 0.0 # calculate the entropy
for value in unique_vals:
sub_dataset = split_dataset(dataset, i, value)
probability = len(sub_dataset)/float(len(dataset))
new_entropy += probability * calculate_shannon_entropy(sub_dataset)
information_gained = base_entropy - new_entropy
if information_gained > best_information_gained: # the more the better
best_information_gained = information_gained
best_feature_to_split = i
return best_feature_to_split
# vote function
def vote_function (class_list):
class_count = {}
for vote in class_list:
if vote not in class_count.keys():
class_count[vote] = 0
class_count[vote] += 1
sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_class_count[0][0]
# Create the decision tree via recursion
def create_tree(dataset, labels):
class_list = [example[-1] for example in dataset]
if class_list.count(class_list[0]) == len(class_list):
return class_list[0] # Stop splitting when all of the classes are equal
if len(dataset[0]) == 1: # Stop splitting when there are no more features in the dataset
return vote_function(class_list)
best_feature = choose_best_feature_to_split(dataset)
best_feature_label = labels[best_feature]
decision_tree = {best_feature_label : {}}
sub_labels = labels[:]
del (sub_labels[best_feature])
feat_value = [example[best_feature] for example in dataset]
unique_values = set(feat_value)
for value in unique_values:
decision_tree[best_feature_label][value] = create_tree(split_dataset(dataset, best_feature, value), sub_labels)
return decision_tree
def dt_predict(trained_tree, label_list, vector_to_predict):
first_side = list(trained_tree.keys())
first_str = first_side[0]
second_dict = trained_tree[first_str]
# Change strings to index
feature_index = label_list.index(first_str)
key = vector_to_predict[feature_index]
feature_value = second_dict[key]
if isinstance(feature_value, dict):
class_label = dt_predict(feature_value, label_list, vector_to_predict)
else:
class_label = feature_value
return class_label
# store and grab the tree with file
def store_tree(input_tree, file_name):
import pickle
fw = open(file_name, 'wb+')
pickle.dump(input_tree, fw)
fw.close()
def grab_tree(file_name):
import pickle
fr = open(file_name, 'rb')
return pickle.load(fr)
def file_to_tree(file_name, label_list):
fr = open(file_name)
target_dataset = [inst.strip().split('\t') for inst in fr.readlines()]
target_tree = create_tree(target_dataset, label_list)
return target_tree
new_label = ['age', 'prescript', 'astigmatic', 'tearRate']
new_tree = file_to_tree('decisiontreetesting.txt', new_label)
print(dt_predict(new_tree, new_label, ['presbyopic', 'hyper', 'yes', 'normal']))
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,061
|
MingzXIE/FinalProject
|
refs/heads/master
|
/kNN.py
|
from numpy import *
import operator
def file_to_matrix(file_name, n): # n is the number of the attributes
fr = open(file_name)
# Get the number of lines
lines = fr.readlines()
number_of_lines = len(lines)
# return the matrix
matrix_created = zeros((number_of_lines, n))
# return the labels
classify_label_vector = []
index = 0
for line in lines:
line = line.strip() # delete all the '\n'
elements_list = line.split('\t')
matrix_created[index, :] = elements_list[0: n]
classify_label_vector.append(int(elements_list[-1])) # the last element is the label
index += 1
return matrix_created, classify_label_vector
# Principle of normalization: new_value = (old_value-min)/(max-min)
def normalization(dataset):
min_value = dataset.min(0)
max_value = dataset.max(0)
ranges = max_value - min_value
normalized_dataset = zeros(shape(dataset))
m = dataset.shape[0]
normalized_dataset = dataset - tile(min_value, (m,1))
normalized_dataset = normalized_dataset/tile(ranges, (m,1))
return normalized_dataset, ranges, min_value # return 3 values, one for training, the other two for testing
def knn_classify(unlabelled_data, dataset, labels, k):
dataset_size = dataset.shape[0]
# calculate distances and sort them
diff_mat = tile(unlabelled_data, (dataset_size,1)) - dataset
sq_diff_mat = diff_mat ** 2
sq_distances = sq_diff_mat.sum(axis = 1)
distances = sq_distances ** 0.5
sorted_distances = distances.argsort()
class_count = {}
# vote for the result
for i in range(k):
selected_label = labels[sorted_distances[i]]
class_count[selected_label] = class_count.get(selected_label, 0)+1
sorted_class_count = sorted(class_count.items(), key = operator.itemgetter(1), reverse = True)
# classify the unlabelled data.
return sorted_class_count[0][0]
# return the error rate
def knn_test(file_input, k, n, test_ratio):
input_matrix, labels = file_to_matrix(file_input, n)
normalized_matrix, ranges, minimum_value = normalization(input_matrix)
m = normalized_matrix.shape[0]
number_of_test = int(m * test_ratio)
error_count = 0.0
for i in range(number_of_test):
classify_result = knn_classify(normalized_matrix[i, :], normalized_matrix[number_of_test:m, :], labels[number_of_test:m], k)
print("the result classified by classifier is: %d, the real answer is: %d" % (classify_result, labels[i]))
if classify_result != labels[i]:
error_count += 1.0
error_rate = error_count/float(number_of_test)
return error_rate
real_error_rate = knn_test('testset/knntesting.txt', 3, 3, 0.5)
print(real_error_rate)
def string_to_list(string_input, n):
line = string_input.strip()
float_list=[]
elements_list = line.split(',')
for str_number in elements_list:
float_num = float(str_number)
float_list.append(float_num)
return float_list
def knn_predict(array_to_predict, file_input, k, n):
input_matrix, labels = file_to_matrix(file_input, n)
normalized_matrix, ranges, minimum_value = normalization(input_matrix)
predict_result = knn_classify((array_to_predict - minimum_value)/ranges, normalized_matrix, labels, k)
return predict_result
# array_to_predict_input = [10.0, 10000.0, 0.5 ]
# result = knn_predict(array_to_predict_input, 'testset/knntesting.txt', 3,3)
# print(result)
# string_input = "10,10000.0,0.5 "
# ntext = 3
# matrix_test = string_to_list(string_input, ntext)
# result = knn_predict(matrix_test, 'testset/knntesting.txt', 3,3)
# print(result)
#
# print(matrix_test)
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,062
|
MingzXIE/FinalProject
|
refs/heads/master
|
/regression.py
|
from numpy import *
def load_dataset(file_name):
num_of_feature = len(open(file_name).readline().split('\t')) - 1
data_matrix = []; label_matrix = []
fr = open(file_name)
for line in fr.readlines():
line_array =[]
cur_line = line.strip().split('\t')
for i in range(num_of_feature):
line_array.append(float(cur_line[i]))
data_matrix.append(line_array)
label_matrix.append(float(cur_line[-1]))
return data_matrix,label_matrix
def stand_regression(xArr, yArr):
xMat = mat(xArr); yMat = mat(yArr).T
xTx = xMat.T*xMat
if linalg.det(xTx) == 0.0:
print("This matrix is singular, cannot do inverse")
return
ws = xTx.I * (xMat.T*yMat)
return ws
x_array, y_array = load_dataset('regressiontesting.txt')
print(x_array)
print(y_array)
ws = stand_regression(x_array, y_array)
print(ws)
user_input = [1.0, 0.92577]
x_input = mat(user_input)
y_predict = x_input * ws
print(y_predict)
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,063
|
MingzXIE/FinalProject
|
refs/heads/master
|
/logistic_regression1.py
|
from numpy import *
# def load_dataset(file_name):
# data_matrix=[];label_matrix=[]
# fr=open(file_name)
# for line in fr.readlines():
# line_array = line.strip().split()
# data_matrix.append([1.0,float(line_array[0]),float(line_array[1])])
# label_matrix.append(int(line_array[2]))
# return data_matrix,label_matrix
def sigmoid(inx):
return 1.0/(1+exp(-inx))
def grad_ascent(data_matrix_input, labels_input, iterate_num):
# convert the set into numpy matrix
data_matrix=mat(data_matrix_input)
label_mat = mat(labels_input).transpose()
m, n = shape(data_matrix)
alpha = 0.001
max_cycles=500#iteration num
weights=ones((n,1))
# iterate
for k in range(max_cycles):
h=sigmoid(data_matrix*weights) #h is a vector
error=(label_mat-h) #compute the difference between real type and predict type
weights=weights+alpha*data_matrix.transpose()*error
return weights #return the best parameter
def stoc_grad_ascent(data_matrix,class_labels,num_iter=150):
m,n=shape(data_matrix)
weights=ones(n)
for j in range(num_iter):
data_index = list(range(m))# python3 change: dataIndex=range(m)
for i in range(m):
alpha=4/(1.0+j+i)+0.01 #alpha will descent as iteration rise,but does not be 0
rand_index = int(random.uniform(0, len(data_index)))
h=sigmoid(sum(data_matrix[rand_index]*weights))
error=class_labels[rand_index]-h
weights=weights+alpha*error*data_matrix[rand_index]
del(data_index[rand_index])
return weights
def classify_vector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5:
return 1.0
else:
return 0.0
def logistic_reg_train(train_file, test_file):
fr_train = open(train_file)
fr_test = open(test_file)
training_set = []
training_labels = []
for line in fr_train.readlines():
split_line = line.strip().split('\t')
line_arr =[]
for i in range(21):
line_arr.append(float(split_line[i]))
training_set.append(line_arr)
training_labels.append(float(split_line[21]))
train_weights = stoc_grad_ascent(array(training_set), training_labels, 500)
error_count = 0; num_test_vec = 0.0
for line in fr_test.readlines():
num_test_vec += 1.0
currLine = line.strip().split('\t')
line_arr =[]
for i in range(21):
line_arr.append(float(currLine[i]))
if int(classify_vector(array(line_arr), train_weights))!= int(currLine[21]):
error_count += 1
error_rate = (float(error_count)/num_test_vec)
print("the error rate of this test is: %f" % error_rate)
return error_rate
def multi_test(train_file, test_file):
num_tests = 10; error_sum=0.0
for k in range(num_tests):
error_sum += logistic_reg_train(train_file, test_file)
print("after %d iterations the average error rate is: %f" % (num_tests, error_sum/float(num_tests)))
logistic_reg_train('logisticregtraining.txt', 'logisticregtesting.txt')
multi_test('logisticregtraining.txt', 'logisticregtesting.txt')
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,064
|
MingzXIE/FinalProject
|
refs/heads/master
|
/support_vector_machines.py
|
from numpy import *
import time
def load_dataset(file_name, k):
data_matrix = []
label_matrix = []
fr = open(file_name)
for line in fr.readlines():
line_array = line.strip().split('\t')
if k == 2:
data_matrix.append([float(line_array[0]), float(line_array[1])])
label_matrix.append(float(line_array[2]))
elif k == 3:
data_matrix.append([float(line_array[0]), float(line_array[1])], float(line_array[2]))
label_matrix.append(float(line_array[-1]))
elif k == 4:
data_matrix.append([float(line_array[0]), float(line_array[1])], float(line_array[2]), float(line_array[3]))
label_matrix.append(float(line_array[-1]))
elif k == 5:
data_matrix.append([float(line_array[0]), float(line_array[1])], float(line_array[2]), float(line_array[3]), float(line_array[4]))
label_matrix.append(float(line_array[-1]))
return data_matrix,label_matrix
def select_rand(i,m):
j = i
while j == i:
j = int(random.uniform(0,m))
return j
def clip_alpha(aj,H,L):
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smo(data_matrix_in, class_labels, constant, tolerate, iterate):
data_matrix = mat(data_matrix_in); label_matrix = mat(class_labels).transpose()
b = 0; m,n = shape(data_matrix)
alphas = mat(zeros((m,1)))
iter = 0
while (iter < iterate):
alpha_pairs_changed = 0
for i in range(m):
fXi = float(multiply(alphas,label_matrix).T*(data_matrix * data_matrix[i,:].T)) + b
Ei = fXi - float(label_matrix[i])
if ((label_matrix[i]*Ei < -tolerate) and (alphas[i] < constant)) or ((label_matrix[i]*Ei > tolerate) and (alphas[i] > 0)):
j = select_rand(i,m)
fXj = float(multiply(alphas,label_matrix).T*(data_matrix * data_matrix[j,:].T)) + b
Ej = fXj - float(label_matrix[j])
alpha_Iold = alphas[i].copy()
alpha_Jold = alphas[j].copy();
if (label_matrix[i] != label_matrix[j]):
L = max(0, alphas[j] - alphas[i])
H = min(constant, constant + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - constant)
H = min(constant, alphas[j] + alphas[i])
if L==H:
print("L==H")
continue
eta = 2.0 * data_matrix[i,:] * data_matrix[j, :].T - data_matrix[i, :] * data_matrix[i,:].T - data_matrix[j,:] * data_matrix[j,:].T
if eta >= 0: print("eta>=0"); continue
alphas[j] -= label_matrix[j]*(Ei - Ej)/eta
alphas[j] = clip_alpha(alphas[j], H, L)
if abs(alphas[j] - alpha_Jold) < 0.00001:
print("j not moving enough")
continue
alphas[i] += label_matrix[j] * label_matrix[i] * (alpha_Jold - alphas[j])
b1 = b - Ei- label_matrix[i]*(alphas[i]-alpha_Iold) * data_matrix[i,:] * data_matrix[i,:].T - label_matrix[j] * (alphas[j]-alpha_Jold) * data_matrix[i,:] * data_matrix[j,:].T
b2 = b - Ej- label_matrix[i]*(alphas[i]-alpha_Iold) * data_matrix[i,:] * data_matrix[j,:].T - label_matrix[j]*(alphas[j]-alpha_Jold)*data_matrix[j,:]*data_matrix[j,:].T
if (0 < alphas[i]) and (constant > alphas[i]): b = b1
elif (0 < alphas[j]) and (constant > alphas[j]): b = b2
else: b = (b1 + b2)/2.0
alpha_pairs_changed += 1
print("iter: %d i:%d, pairs changed %d" % (iter, i , alpha_pairs_changed))
if alpha_pairs_changed == 0:
iter += 1
else:
iter = 0
print("iteration number: %d" % iter)
return b, alphas
start_time = time.time()
data, label = load_dataset('svm_testSet.txt', 2)
b, alphas = smo(data, label, 0.6, 0.001, 40)
print(b)
shape(alphas[alphas>0])
end_time = time.time()
print(end_time - start_time)
|
{"/algorithm_integration.py": ["/kNN.py", "/decision_tree.py"]}
|
37,098
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/metrics/metrics.py
|
from __future__ import absolute_import, division
import numpy as np
def center_error(rects1, rects2):
r"""Center error.
"""
centers1 = rects1[..., :2] + (rects1[..., 2:] - 1) / 2
centers2 = rects2[..., :2] + (rects2[..., 2:] - 1) / 2
errors = np.sqrt(np.sum(np.power(centers1 - centers2, 2), axis=-1))
return errors
def rect_iou(rects1, rects2):
r"""Intersection over union.
"""
assert rects1.shape == rects2.shape
rects_inter = _intersection(rects1, rects2)
areas_inter = np.prod(rects_inter[..., 2:], axis=-1)
areas1 = np.prod(rects1[..., 2:], axis=-1)
areas2 = np.prod(rects2[..., 2:], axis=-1)
areas_union = areas1 + areas2 - areas_inter
eps = np.finfo(float).eps
ious = areas_inter / (areas_union + eps)
ious = np.clip(ious, 0.0, 1.0)
return ious
def _intersection(rects1, rects2):
r"""Rectangle intersection.
"""
assert rects1.shape == rects2.shape
x1 = np.maximum(rects1[..., 0], rects2[..., 0])
y1 = np.maximum(rects1[..., 1], rects2[..., 1])
x2 = np.minimum(rects1[..., 0] + rects1[..., 2],
rects2[..., 0] + rects2[..., 2])
y2 = np.minimum(rects1[..., 1] + rects1[..., 3],
rects2[..., 1] + rects2[..., 3])
w = np.maximum(x2 - x1, 0)
h = np.maximum(y2 - y1, 0)
return np.stack([x1, y1, w, h]).T
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,099
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/uav.py
|
from __future__ import absolute_import, print_function
import os
import glob
import numpy as np
import io
import six
from itertools import chain
class UAV(object):
def __init__(self, root_dir, sequences=None):
super(UAV, self).__init__()
self.root_dir = root_dir
seq_names = [os.path.basename(seq)[:-4] for seq in glob.glob(os.path.join(self.root_dir, "anno", "UAV123", "*.txt"))]
self.seq_names = seq_names if sequences is None else [seq for seq in seq_names if seq in sequences]
self.anno_files = [os.path.join(root_dir, "anno", "UAV123", s + ".txt") for s in self.seq_names]
self.seq_dirs = [os.path.join(root_dir, "data_seq", "UAV123", seq_name) for seq_name in self.seq_names]
def __getitem__(self, index):
img_files = sorted(glob.glob(os.path.join(self.seq_dirs[index], '*.jpg')))
# to deal with different delimeters
with open(self.anno_files[index], 'r') as f:
anno = np.loadtxt(io.StringIO(f.read().replace(',', ' ')))
assert len(img_files) == len(anno)
assert anno.shape[1] == 4
return img_files, anno
def __len__(self):
return len(self.seq_names)
if __name__ == "__main__":
from configs import cfg
uav = UAV(cfg.PATH.UAV, sequences=None)
uav[0]
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,100
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/coco.py
|
import torch
import cv2
from torch.utils.data import Dataset
import os
from utils import IoU
from transforms.transforms import *
from pycocotools.coco import COCO
class CocoDetection(Dataset):
def __init__(self, root, annFile):
self.root = root
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
def getFromCategory(self, category_id):
imgIds = self.coco.getImgIds(catIds=category_id);
img_id = imgIds[np.random.randint(len(imgIds))]
return self.getFromImageId(img_id, category_id=category_id)
def getFromImageId(self, img_id, category_id=None):
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anns = self.coco.loadAnns(ann_ids)
img_name = self.coco.loadImgs(img_id)[0]['file_name']
img_file = os.path.join(self.root, img_name)
if category_id is not None:
anns = list(filter(lambda x: x["category_id"] == category_id, anns))
# Deal with the case where no annotation exists in the image
if len(anns) == 0:
return self.getRandom()
anno = anns[np.random.randint(len(anns))]
# Deal with the case of too small objects
if anno["area"] < 500:
return self.getRandom()
catgory_id = anno["category_id"]
bbox = np.array(anno["bbox"])
mask = self.coco.annToMask(anno)
# Convert bounding box to x1y1x2y2 format.
bbox = format_from_to(bbox, "x1y1wh", "x1y1x2y2")
return img_file, bbox, mask, catgory_id
def __getitem__(self, index):
img_id = self.ids[index]
return self.getFromImageId(img_id)
def getRandom(self):
return self[np.random.randint(len(self))]
def __len__(self):
return len(self.ids)
class COCODistractor(Dataset):
def __init__(self, cocodataset, size):
self.dataset = cocodataset
self.size = size
self.transform = Compose([
ConvertFromInts(),
ToAbsoluteCoords(),
Crop(center_at_pred=True, context_amount=1.5),
ToPercentCoords(),
Resize(300),
])
def __getitem__(self, index):
img_file, bbox, _, catgory_id = self.dataset.getRandom()
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
bbox = to_percentage_coords(bbox, img.shape)
img_z, bbox_z, _ = self.transform(img, bbox)
bbox_xprev = jitter_transform(bbox)
img_x, bbox_x, bbox_xprev = self.transform(img, bbox, bbox_xprev)
H, W, _ = img_x.shape
abs_bbox = to_absolute_coords(bbox_x, img_x.shape)
w, h = abs_bbox[2:] - abs_bbox[:2]
# Import a distractor (an instance from the same category)
img_file2, bbox2, mask2, _ = self.dataset.getFromCategory(catgory_id)
img2 = cv2.imread(img_file2, cv2.IMREAD_COLOR)
bbox2 = bbox2.astype(int)
w2, h2 = bbox2[2:] - bbox2[:2]
cropped_img = img2[bbox2[1]:bbox2[3], bbox2[0]:bbox2[2]]
cropped_mask = mask2[bbox2[1]:bbox2[3], bbox2[0]:bbox2[2]]
# Scale the distractor image so that it has the same size as the first one.
ratio = np.sqrt(w * h) / np.sqrt(w2 * h2)
if np.isinf(ratio) or np.isnan(ratio):
return self[np.random.randint(len(self))]
w2, h2 = min(int(w2 * ratio), W - 1), min(int(h2 * ratio), H - 1)
cropped_img = cv2.resize(cropped_img, (w2, h2))
cropped_mask = cv2.resize(cropped_mask, (w2, h2)).astype(np.bool)
# max trails (10)
for _ in range(10):
x = np.random.randint(W - w2)
y = np.random.randint(H - h2)
bbox2 = to_percentage_coords(np.array([x, y, x + w2, y + h2]), img_x.shape)
# Avoid too difficult cases where the distractor completely occludes the main instance
if IoU(bbox_x, bbox2) < 0.30:
break
img_x[y:y + h2, x:x + w2][cropped_mask] = cropped_img[cropped_mask]
img_z = cv2.cvtColor(img_z, cv2.COLOR_BGR2RGB) / 255.
img_z = torch.from_numpy(img_z).permute(2, 0, 1).float()
img_x = cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB) / 255.
img_x = torch.from_numpy(img_x).permute(2, 0, 1).float()
bbox_z, bbox_x = torch.from_numpy(bbox_z).float(), torch.from_numpy(bbox_x).float()
bbox_xprev = torch.from_numpy(bbox_xprev).float()
return img_z, img_x, bbox_z, bbox_x, bbox_xprev
def __len__(self):
return self.size
class COCONegativePair(Dataset):
def __init__(self, cocodataset, size, cfg, transform=None):
self.dataset = cocodataset
self.size = size
self.cfg = cfg
self.transform = transform
if transform is not None and not isinstance(transform, list):
self.transform = [transform, transform]
def __getitem__(self, index):
img_file_z, bbox_z, _, category_id = self.dataset.getRandom()
img_file_x, bbox_x, _, _ = self.dataset.getFromCategory(category_id)
img_z = cv2.imread(img_file_z, cv2.IMREAD_COLOR)
img_x = cv2.imread(img_file_x, cv2.IMREAD_COLOR)
bbox_xprev = bbox_x
# Convert to percentage coordinates.
bbox_z = to_percentage_coords(bbox_z, img_z.shape)
bbox_x = to_percentage_coords(bbox_x, img_x.shape)
bbox_xprev = to_percentage_coords(bbox_xprev, img_x.shape)
if self.transform is not None:
img_z, bbox_z, _ = self.transform[0](img_z, bbox_z)
img_x, bbox_x, bbox_xprev = self.transform[1](img_x, bbox_x, bbox_xprev)
# Convert to RBG image, and scale values to [0, 1].
img_z = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_z, cv2.COLOR_BGR2RGB) / 255.
img_x = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB) / 255.
# Convert to PyTorch Tensors (in particular for images, (w, h, c) is transformed to (c, w, h)).
img_z = torch.from_numpy(img_z).permute(2, 0, 1).float()
img_x = torch.from_numpy(img_x).permute(2, 0, 1).float()
bbox_z = torch.from_numpy(bbox_z).float()
# The search image doesn't contain the exemplar, there is no groundtruth bounding box.
bbox_x = torch.zeros(4)
bbox_xprev = torch.zeros(4)
return img_z, img_x, bbox_z, bbox_x, bbox_xprev
def __len__(self):
return self.size
class COCOPositivePair(Dataset):
def __init__(self, cocodataset, size, cfg, transform=None):
self.dataset = cocodataset
self.size = size
self.cfg = cfg
self.transform = transform
if transform is not None and not isinstance(transform, list):
self.transform = [transform, transform]
def __getitem__(self, index):
img_file, bbox, _, _ = self.dataset.getRandom()
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
# Convert to percentage coordinates.
bbox = to_percentage_coords(bbox, img.shape)
if self.transform is not None:
img_z, bbox_z, _ = self.transform[0](img, bbox)
img_x, bbox_x, bbox_xprev = self.transform[1](img, bbox, bbox)
# Convert to RBG image, and scale values to [0, 1].
img_z = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_z, cv2.COLOR_BGR2RGB) / 255.
img_x = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB) / 255.
# Convert to PyTorch Tensors (in particular for images, (w, h, c) is transformed to (c, w, h)).
img_z = torch.from_numpy(img_z).permute(2, 0, 1).float()
img_x = torch.from_numpy(img_x).permute(2, 0, 1).float()
bbox_z = torch.from_numpy(bbox_z).float()
bbox_x = torch.from_numpy(bbox_x).float()
bbox_xprev = torch.from_numpy(bbox_xprev).float()
return img_z, img_x, bbox_z, bbox_x, bbox_xprev
def __len__(self):
return self.size
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,101
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/resnet.py
|
import torch.nn as nn
from torchvision import models
import torch
class ResNet(nn.Module):
def __init__(self):
super().__init__()
resnet = models.resnet50(pretrained=True)
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.register_buffer('mean', torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, x):
x = (x - self.mean) / self.std
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.res2(x)
x = self.res3(x)
return x
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,102
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/pairwise.py
|
from __future__ import absolute_import
import numpy as np
import torch
from torch.utils.data import Dataset
import cv2
from utils.bbox_utils import to_percentage_coords
class PairSampler(Dataset):
def __init__(self, datasets, cfg, transform=None, pairs_per_video=1, frame_range=100, causal=False):
super().__init__()
self.datasets = datasets
if not isinstance(datasets, list):
self.datasets = [datasets, datasets]
self.cfg = cfg
self.transform = transform
if transform is not None and not isinstance(transform, list):
self.transform = [transform, transform]
self.pairs_per_video = pairs_per_video
self.frame_range = frame_range
self.causal = causal
self.dataset_indices, self.sequence_indices = self._merge_datasets(self.datasets, pairs_per_video)
def __getitem__(self, index):
if index >= len(self):
raise IndexError('list index out of range')
dataset_id = self.dataset_indices[index]
sequence_id = self.sequence_indices[index]
img_files, anno = self.datasets[dataset_id][sequence_id]
rand_z, rand_x = self._sample_pair(len(img_files))
img_z = cv2.imread(img_files[rand_z], cv2.IMREAD_COLOR)
img_x = cv2.imread(img_files[rand_x], cv2.IMREAD_COLOR)
bbox_z = anno[rand_z, :]
bbox_x = anno[rand_x, :]
bbox_xprev = anno[max(rand_x - 1, 0), :] # Previous frame bounding box, to be used as guide
# Convert to percentage coordinates.
bbox_z = to_percentage_coords(bbox_z, img_z.shape)
bbox_x = to_percentage_coords(bbox_x, img_x.shape)
bbox_xprev = to_percentage_coords(bbox_xprev, img_x.shape)
if self.transform is not None:
img_z, bbox_z, _ = self.transform[0](img_z, bbox_z)
img_x, bbox_x, bbox_xprev = self.transform[1](img_x, bbox_x, bbox_xprev)
# Convert to RBG image, and scale values to [0, 1].
img_z = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_z, cv2.COLOR_BGR2RGB) / 255.
img_x = self.cfg.MODEL.INPUT_RANGE * cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB) / 255.
# Convert to PyTorch Tensors (in particular for images, (w, h, c) is transformed to (c, w, h)).
img_z = torch.from_numpy(img_z).permute(2, 0, 1).float()
img_x = torch.from_numpy(img_x).permute(2, 0, 1).float()
bbox_z = torch.from_numpy(bbox_z).float()
bbox_x = torch.from_numpy(bbox_x).float()
bbox_xprev = torch.from_numpy(bbox_xprev).float()
return img_z, img_x, bbox_z, bbox_x, bbox_xprev
def __len__(self):
return len(self.sequence_indices)
def _sample_pair(self, n):
if self.causal:
rand_z = np.random.choice(n - 1)
else:
rand_z = np.random.choice(n)
if self.frame_range == 0:
return rand_z, rand_z
possible_x = np.arange(
max(rand_z - self.frame_range, 1), # Keep one previous frame (so that we can use it as guide)
rand_z + self.frame_range + 1)
possible_x = np.intersect1d(possible_x, np.arange(n))
if self.causal:
possible_x = possible_x[possible_x > rand_z]
else:
possible_x = possible_x[possible_x != rand_z]
if possible_x.size > 0:
rand_x = np.random.choice(possible_x)
else:
rand_x = n-1 # To avoid errors when the list of possible x is empty
return rand_z, rand_x
@staticmethod
def _merge_datasets(datasets, pairs_per_video):
dataset_indices = np.concatenate(
[np.repeat(i, len(dataset) * pairs_per_video) for i, dataset in enumerate(datasets)]).ravel()
sequences_indices = np.concatenate(
[np.tile(np.arange(len(dataset)), pairs_per_video) for dataset in datasets]).ravel()
return dataset_indices, sequences_indices
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,103
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/correlate.py
|
import torch
from torch.nn import functional as F
def correlate(x, z, padding=0):
out = []
for i in range(x.size(0)):
out.append(F.conv2d(x[i].unsqueeze(0), z[i].unsqueeze(0), padding=padding))
return torch.cat(out, dim=0)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,104
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/utils/visualize.py
|
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from .bbox_utils import to_absolute_coords, format_from_to
from itertools import cycle
import io
from PIL import Image
from torchvision import transforms
def draw_rectangle(bbox, color="r"):
"""
bbox: x1y1x2y2 format.
"""
x1, y1, w, h = format_from_to(bbox, "x1y1x2y2", "x1y1wh")
rectangle = patches.Rectangle((x1, y1), w, h, linewidth=2, edgecolor=color, fill=False)
plt.gca().add_patch(rectangle)
def plot_sample(img, bbox, title=None, gt_box=None, anchor=None, prev_bbox=None, anchor_id=None):
"""
img: (Tensor)
bbox: x1y1x2y2 format, percentage coordinates.
"""
plt.imshow(img.permute(1, 2, 0))
draw_rectangle(to_absolute_coords(bbox, img.shape))
if gt_box is not None:
draw_rectangle(to_absolute_coords(gt_box, img.shape), "b")
if anchor is not None:
draw_rectangle(to_absolute_coords(anchor, img.shape), "y")
if prev_bbox is not None:
draw_rectangle(to_absolute_coords(prev_bbox, img.shape), "g")
if anchor_id is not None:
plt.gca().text(0.95, 0.95, "anchor_id: {}".format(anchor_id), transform = plt.gca().transAxes,
verticalalignment='top', horizontalalignment='right',
bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 10})
if title:
plt.gca().set_title(title)
plt.axis("off")
def plot_pair(exemplar, search, title=None, gt_box=None, prev_bbox=None, anchor=None, anchor_id=None, correlation=None):
"""Plots a pair of samples (exemplar/search)."""
plt.tight_layout()
if title:
plt.suptitle(title)
n = 3 if correlation is not None else 2
plt.subplot(1, n, 1)
plot_sample(*exemplar, title="Exemplar")
plt.subplot(1, n, 2)
plot_sample(*search, title="Search", gt_box=gt_box, prev_bbox=prev_bbox, anchor=anchor, anchor_id=anchor_id)
if correlation is not None:
plt.subplot(1, n, 3)
plt.imshow(correlation[0])
plt.gca().set_title("Correlation map")
def plot_bboxes(anchors, format="x1y1wh", title=None, random_color=True):
"""
Plots a list of bounding boxes.
"""
plt.xlim(0, 1)
plt.ylim(1, 0)
plt.gca().set_aspect('equal', adjustable='box')
cycol = cycle('bgrcmk')
n = len(anchors)
for i in range(n):
color = next(cycol) if random_color else "r"
draw_rectangle(format_from_to(anchors[i], format, "x1y1x2y2"), color=color)
if title:
plt.gca().set_title(title)
def plot_to_tensor():
buf = io.BytesIO()
plt.savefig(buf, format="jpeg")
buf.seek(0)
image = Image.open(buf)
image = transforms.ToTensor()(image).unsqueeze(0)
buf.close()
return image
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,105
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/siamConcatRPN.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers import *
from utils import utils, bbox_utils
class SiamConcatRPN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.base = BaseNet()
self.cfg = cfg
self.use_mask = cfg.TRAIN.USE_MASK
self.use_correlation_guide = cfg.TRACKING.USE_CORRELATION_GUIDE
self.GC1 = GC(2048, 512, 512, kh=11, kw=11)
self.convG1 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.GC2 = GC(512, 256, 512, kh=9, kw=9)
self.convG2 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.GC3 = GC(512, 256, 512, kh=7, kw=7)
self.convG3 = nn.Conv2d(512, 512, kernel_size=3, padding=1)
self.RF = Refine(512, 256, 512)
self.extras = nn.ModuleList([
nn.Conv2d(512, 256, kernel_size=1),
nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
nn.Conv2d(512, 256, kernel_size=1),
nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
nn.Conv2d(512, 128, kernel_size=1),
nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
nn.Conv2d(256, 128, kernel_size=1),
nn.Conv2d(128, 256, kernel_size=3),
nn.Conv2d(256, 128, kernel_size=1),
nn.Conv2d(128, 256, kernel_size=3)
])
self.loc = nn.ModuleList([
nn.Conv2d(512, 16, kernel_size=3, padding=1),
nn.Conv2d(512, 24, kernel_size=3, padding=1),
nn.Conv2d(512, 24, kernel_size=3, padding=1),
nn.Conv2d(256, 24, kernel_size=3, padding=1),
nn.Conv2d(256, 16, kernel_size=3, padding=1),
nn.Conv2d(256, 16, kernel_size=3, padding=1)
])
self.conf = nn.ModuleList([
nn.Conv2d(512, 8, kernel_size=3, padding=1),
nn.Conv2d(512, 12, kernel_size=3, padding=1),
nn.Conv2d(512, 12, kernel_size=3, padding=1),
nn.Conv2d(256, 12, kernel_size=3, padding=1),
nn.Conv2d(256, 8, kernel_size=3, padding=1),
nn.Conv2d(256, 8, kernel_size=3, padding=1)
])
def forward(self, z, z_mask, x, x_mask):
sources = list()
loc = list()
conf = list()
z, _ = self.base(z, z_mask)
x, r3 = self.base(x, x_mask, use_mask=self.use_mask)
x = torch.cat((x, z), dim=1)
x = self.GC1(x)
r = self.convG1(F.relu(x))
x = x + r
x = self.GC2(F.relu(x))
r = self.convG2(F.relu(x))
x = x + r
x = self.GC3(F.relu(x))
r = self.convG3(F.relu(x))
x = x + r
x = self.RF(r3, x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
return loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, 2)
def temple(self, z, z_mask):
if self.use_correlation_guide:
self.anchors = utils.generate_anchors(self.cfg).cuda()
self.point_form_anchors = bbox_utils.point_form(self.anchors).cuda()
self.siamFC = SiamFC(root_pretrained=self.cfg.PATH.PRETRAINED_SIAMFC).cuda()
self.siamFC.train()
self.z_cropped = z.unsqueeze(0)[:, :, 75:75 + 151, 75:75 + 151]
self.z_embedding, _ = self.base(z.unsqueeze(0), z_mask.unsqueeze(0))
def infer(self, x, x_mask):
sources = list()
loc = list()
conf = list()
if self.use_correlation_guide:
correlation = self.siamFC(self.z_cropped * 255., x.unsqueeze(0) * 255.)
padding = 11
map_dim = 38
full_map_dim = 60
index = correlation.argmax()
i, j = (padding + index // map_dim).float() / full_map_dim, (padding + index % map_dim).float() / full_map_dim
anchor_indices = utils.inside((j, i), self.point_form_anchors)
x, r3 = self.base(x.unsqueeze(0), x_mask.unsqueeze(0), use_mask=self.use_mask)
x = torch.cat((x, self.z_embedding), dim=1)
x = self.GC1(x)
r = self.convG1(F.relu(x))
x = x + r
x = self.GC2(F.relu(x))
r = self.convG2(F.relu(x))
x = x + r
x = self.GC3(F.relu(x))
r = self.convG3(F.relu(x))
x = x + r
x = self.RF(r3, x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.use_correlation_guide:
conf = conf.view(-1, 2)
conf[~anchor_indices, 0] = 1e5
conf[~anchor_indices, 1] = -1e5
return loc.view(-1, 4), conf.view(-1, 2)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,106
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/trackers/siamRPNBIG.py
|
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from . import Tracker
import utils
import cv2
def generate_anchor(total_stride, scales, ratios, score_size):
anchor_num = len(ratios) * len(scales)
anchor = np.zeros((anchor_num, 4), dtype=np.float32)
size = total_stride * total_stride
count = 0
for ratio in ratios:
ws = int(np.sqrt(size / ratio))
hs = int(ws * ratio)
for scale in scales:
wws = ws * scale
hhs = hs * scale
anchor[count, 0] = 0
anchor[count, 1] = 0
anchor[count, 2] = wws
anchor[count, 3] = hhs
count += 1
anchor = np.tile(anchor, score_size * score_size).reshape((-1, 4))
ori = - (score_size // 2) * total_stride
xx, yy = np.meshgrid([ori + total_stride * dx for dx in range(score_size)],
[ori + total_stride * dy for dy in range(score_size)])
xx, yy = np.tile(xx.flatten(), (anchor_num, 1)).flatten(), \
np.tile(yy.flatten(), (anchor_num, 1)).flatten()
anchor[:, 0], anchor[:, 1] = xx.astype(np.float32), yy.astype(np.float32)
return anchor
def get_subwindow_tracking(im, pos, model_sz, original_sz, avg_chans, out_mode='torch'):
if isinstance(pos, float):
pos = [pos, pos]
sz = original_sz
im_sz = im.shape
c = (original_sz+1) / 2
context_xmin = round(pos[0] - c) # floor(pos(2) - sz(2) / 2);
context_xmax = context_xmin + sz - 1
context_ymin = round(pos[1] - c) # floor(pos(1) - sz(1) / 2);
context_ymax = context_ymin + sz - 1
left_pad = int(max(0., -context_xmin))
top_pad = int(max(0., -context_ymin))
right_pad = int(max(0., context_xmax - im_sz[1] + 1))
bottom_pad = int(max(0., context_ymax - im_sz[0] + 1))
context_xmin = context_xmin + left_pad
context_xmax = context_xmax + left_pad
context_ymin = context_ymin + top_pad
context_ymax = context_ymax + top_pad
# zzp: a more easy speed version
r, c, k = im.shape
if any([top_pad, bottom_pad, left_pad, right_pad]):
te_im = np.zeros((r + top_pad + bottom_pad, c + left_pad + right_pad, k), np.uint8) # 0 is better than 1 initialization
te_im[top_pad:top_pad + r, left_pad:left_pad + c, :] = im
if top_pad:
te_im[0:top_pad, left_pad:left_pad + c, :] = avg_chans
if bottom_pad:
te_im[r + top_pad:, left_pad:left_pad + c, :] = avg_chans
if left_pad:
te_im[:, 0:left_pad, :] = avg_chans
if right_pad:
te_im[:, c + left_pad:, :] = avg_chans
im_patch_original = te_im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
else:
im_patch_original = im[int(context_ymin):int(context_ymax + 1), int(context_xmin):int(context_xmax + 1), :]
if not np.array_equal(model_sz, original_sz):
im_patch = cv2.resize(im_patch_original, (model_sz, model_sz))
else:
im_patch = im_patch_original
return im_to_torch(im_patch) if out_mode in 'torch' else im_patch
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
return img
def cxy_wh_2_rect(pos, sz):
return np.array([pos[0]-sz[0]/2, pos[1]-sz[1]/2, sz[0], sz[1]]) # 0-index
def rect_2_cxy_wh(rect):
return np.array([rect[0]+rect[2]/2, rect[1]+rect[3]/2]), np.array([rect[2], rect[3]]) # 0-index
class TrackerConfig(object):
# These are the default hyper-params for DaSiamRPN 0.3827
windowing = 'cosine' # to penalize large displacements [cosine/uniform]
# Params from the network architecture, have to be consistent with the training
exemplar_size = 127 # input z size
instance_size = 271 # input x size (search region)
total_stride = 8
score_size = int((instance_size-exemplar_size)/total_stride+1)
context_amount = 0.5 # context amount for the exemplar
ratios = [0.33, 0.5, 1, 2, 3]
scales = [8, ]
anchor_num = len(ratios) * len(scales)
anchor = []
penalty_k = 0.055
window_influence = 0.42
lr = 0.295
class TrackerSiamRPNBIG(Tracker):
def __init__(self, net, checkpoint=None, cfg=None):
super(TrackerSiamRPNBIG, self).__init__("TrackerSiamRPNBIG", image_mode="BGR")
self.net = net
if checkpoint is None:
print("Loading pretrained weights.")
self.net.load_state_dict(torch.load(cfg.PATH.PRETRAINED_SIAMRPN))
else:
utils.load_checkpoint(checkpoint, self.net)
self.net.eval()
if torch.cuda.is_available():
self.net.cuda()
def init(self, im, init_rect):
target_pos, target_sz = rect_2_cxy_wh(init_rect)
self.state = dict()
p = TrackerConfig()
self.state['im_h'] = im.shape[0]
self.state['im_w'] = im.shape[1]
if ((target_sz[0] * target_sz[1]) / float(self.state['im_h'] * self.state['im_w'])) < 0.004:
p.instance_size = 287 # small object big search region
p.score_size = int((p.instance_size - p.exemplar_size) / p.total_stride + 1)
p.anchor = generate_anchor(p.total_stride, p.scales, p.ratios, p.score_size)
avg_chans = np.mean(im, axis=(0, 1))
wc_z = target_sz[0] + p.context_amount * sum(target_sz)
hc_z = target_sz[1] + p.context_amount * sum(target_sz)
s_z = round(np.sqrt(wc_z * hc_z))
# initialize the exemplar
z_crop = get_subwindow_tracking(im, target_pos, p.exemplar_size, s_z, avg_chans)
z = Variable(z_crop.unsqueeze(0))
self.net.temple(z.cuda())
if p.windowing == 'cosine':
window = np.outer(np.hanning(p.score_size), np.hanning(p.score_size))
elif p.windowing == 'uniform':
window = np.ones((p.score_size, p.score_size))
window = np.tile(window.flatten(), p.anchor_num)
self.state['p'] = p
self.state['avg_chans'] = avg_chans
self.state['window'] = window
self.state['target_pos'] = target_pos
self.state['target_sz'] = target_sz
def update(self, im, iter=0):
p = self.state['p']
avg_chans = self.state['avg_chans']
window = self.state['window']
target_pos = self.state['target_pos']
target_sz = self.state['target_sz']
wc_z = target_sz[1] + p.context_amount * sum(target_sz)
hc_z = target_sz[0] + p.context_amount * sum(target_sz)
s_z = np.sqrt(wc_z * hc_z)
scale_z = p.exemplar_size / s_z
d_search = (p.instance_size - p.exemplar_size) / 2
pad = d_search / scale_z
s_x = s_z + 2 * pad
# extract scaled crops for search region x at previous target position
x_crop = Variable(get_subwindow_tracking(im, target_pos, p.instance_size, round(s_x), avg_chans).unsqueeze(0))
target_pos, target_sz, score = self.tracker_eval(self.net, x_crop.cuda(), target_pos, target_sz * scale_z, window,
scale_z, p)
target_pos[0] = max(0, min(self.state['im_w'], target_pos[0]))
target_pos[1] = max(0, min(self.state['im_h'], target_pos[1]))
target_sz[0] = max(10, min(self.state['im_w'], target_sz[0]))
target_sz[1] = max(10, min(self.state['im_h'], target_sz[1]))
self.state['target_pos'] = target_pos
self.state['target_sz'] = target_sz
self.state['score'] = score
res = cxy_wh_2_rect(self.state['target_pos'], self.state['target_sz'])
return res
def tracker_eval(self, net, x_crop, target_pos, target_sz, window, scale_z, p):
delta, score = net.infer(x_crop)
delta = delta.permute(1, 2, 3, 0).contiguous().view(4, -1).data.cpu().numpy()
score = F.softmax(score.permute(1, 2, 3, 0).contiguous().view(2, -1), dim=0).data[1, :].cpu().numpy()
delta[0, :] = delta[0, :] * p.anchor[:, 2] + p.anchor[:, 0]
delta[1, :] = delta[1, :] * p.anchor[:, 3] + p.anchor[:, 1]
delta[2, :] = np.exp(delta[2, :]) * p.anchor[:, 2]
delta[3, :] = np.exp(delta[3, :]) * p.anchor[:, 3]
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 0.5
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 0.5
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
# size penalty
s_c = change(sz(delta[2, :], delta[3, :]) / (sz_wh(target_sz))) # scale penalty
r_c = change((target_sz[0] / target_sz[1]) / (delta[2, :] / delta[3, :])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * p.penalty_k)
pscore = penalty * score
# window float
pscore = pscore * (1 - p.window_influence) + window * p.window_influence
best_pscore_id = np.argmax(pscore)
target = delta[:, best_pscore_id] / scale_z
target_sz = target_sz / scale_z
lr = penalty[best_pscore_id] * score[best_pscore_id] * p.lr
res_x = target[0] + target_pos[0]
res_y = target[1] + target_pos[1]
res_w = target_sz[0] * (1 - lr) + target[2] * lr
res_h = target_sz[1] * (1 - lr) + target[3] * lr
target_pos = np.array([res_x, res_y])
target_sz = np.array([res_w, res_h])
return target_pos, target_sz, score[best_pscore_id]
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,107
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/configs/__init__.py
|
from .defaults import cfg
from .demo import demo_cfg
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,108
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/__init__.py
|
from .base import BaseNet
from .global_conv import GC
from .refine import Refine, Refine2
from .correlate import correlate
from .siamFC import SiamFC
from .resnet import ResNet
from .alexnet import AlexNet
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,109
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/siamRPNBIG.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
Original SiamRPN network.
During training: use forward(z, x).
For tracking: call temple() once on the exemplar, and use infer on the search images afterwards.
"""
def correlate(x, z):
out = []
for i in range(x.size(0)):
out.append(F.conv2d(x[i].unsqueeze(0), z[i]))
return torch.cat(out, dim=0)
class SiamRPNBIG(nn.Module):
def __init__(self, cfg, feat_in=512, feature_out=512, anchor=5):
super(SiamRPNBIG, self).__init__()
self.anchor = anchor
self.feature_out = feature_out
self.featureExtract = nn.Sequential(
nn.Conv2d(3, 192, 11, stride=2),
nn.BatchNorm2d(192),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(192, 512, 5),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride=2),
nn.Conv2d(512, 768, 3),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True),
nn.Conv2d(768, 768, 3),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True),
nn.Conv2d(768, 512, 3),
nn.BatchNorm2d(512),
)
self.cfg = cfg
# Regression branch
self.conv_r1 = nn.Conv2d(feat_in, feature_out * 4 * anchor, 3)
self.conv_r2 = nn.Conv2d(feat_in, feature_out, 3)
# Classification branch
self.conv_cls1 = nn.Conv2d(feat_in, feature_out * 2 * anchor, 3)
self.conv_cls2 = nn.Conv2d(feat_in, feature_out, 3)
self.regress_adjust = nn.Conv2d(4 * anchor, 4 * anchor, 1)
self.r1_kernel = []
self.cls1_kernel = []
# Load pretrained AlexNet weights
self.reset_params()
self.freeze_params()
def reset_params(self):
model_dict = self.state_dict()
model_dict.update(torch.load(self.cfg.PATH.ALEXNETBIG_WEIGHTS))
self.load_state_dict(model_dict)
def load_pretrained(self):
self.load_state_dict(torch.load(self.cfg.PATH.PRETRAINED_MODEL))
def freeze_params(self):
# As stated in the paper, freeze the first 3 conv layers.
for i in [0, 4, 8]:
for p in self.featureExtract[i].parameters():
p.requires_grad = False
# Set the associated batch norm layers to evaluation mode.
for i in [1, 5, 9]:
self.featureExtract[i].requires_grad = False
self.featureExtract[i].eval()
def infer(self, x):
x_f = self.featureExtract(x)
return self.regress_adjust(F.conv2d(self.conv_r2(x_f), self.r1_kernel)), \
F.conv2d(self.conv_cls2(x_f), self.cls1_kernel)
def temple(self, z):
z_f = self.featureExtract(z)
r1_kernel_raw = self.conv_r1(z_f)
cls1_kernel_raw = self.conv_cls1(z_f)
kernel_size = r1_kernel_raw.data.size()[-1]
self.r1_kernel = r1_kernel_raw.view(self.anchor * 4, self.feature_out, kernel_size, kernel_size)
self.cls1_kernel = cls1_kernel_raw.view(self.anchor * 2, self.feature_out, kernel_size, kernel_size)
def forward(self, z, x):
z_f = self.featureExtract(z)
x_f = self.featureExtract(x)
r1_kernel_raw = self.conv_r1(z_f)
cls1_kernel_raw = self.conv_cls1(z_f)
batch_size, kernel_size = z.size(0), r1_kernel_raw.size(-1)
r1_kernel = r1_kernel_raw.view(batch_size, self.anchor * 4, self.feature_out, kernel_size, kernel_size)
cls1_kernel = cls1_kernel_raw.view(batch_size, self.anchor * 2, self.feature_out, kernel_size, kernel_size)
return (self.regress_adjust(correlate(self.conv_r2(x_f), r1_kernel)),
correlate(self.conv_cls2(x_f), cls1_kernel))
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,110
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/trackers/__init__.py
|
import cv2
import numpy as np
import time
from utils.openvot_viz import show_frame
class Tracker(object):
def __init__(self, name, image_mode="RGB"):
self.name = name
self.image_mode = image_mode
def init(self, image, init_rect):
raise NotImplementedError()
def update(self, image, iter):
raise NotImplementedError()
def track(self, img_files, init_rect, visualize=False):
frame_num = len(img_files)
bndboxes = np.zeros((frame_num, 4))
bndboxes[0, :] = init_rect
speed_fps = np.zeros(frame_num)
for f, img_file in enumerate(img_files):
image = cv2.imread(img_file, cv2.IMREAD_COLOR)
if self.image_mode == "RGB":
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
start_time = time.time()
if f == 0:
self.init(image, init_rect)
else:
bndboxes[f, :] = self.update(image, f)
elapsed_time = time.time() - start_time
speed_fps[f] = 1. / elapsed_time
if visualize:
show_frame(image, bndboxes[f, :], fig_n=1)
return bndboxes, speed_fps
from .tracker import TrackerDefault
from .siamRPNBIG import TrackerSiamRPNBIG
def load_tracker(net, checkpoint, cfg):
if checkpoint == "":
checkpoint = None
if cfg.MODEL.NET == "SiamRPNBIG":
return TrackerSiamRPNBIG(net, checkpoint, cfg)
else:
return TrackerDefault(net, checkpoint, cfg)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,111
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/__init__.py
|
from .imagenet import ImageNetVID
from .trackingnet import TrackingNet
from .simple import SimpleSampler
from .pairwise import PairSampler
from .otb import OTB
from .coco import CocoDetection, COCODistractor, COCOPositivePair, COCONegativePair
from .uav import UAV
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,112
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/simple.py
|
from __future__ import absolute_import
import numpy as np
import torch
from torch.utils.data import Dataset
import cv2
from utils.bbox_utils import to_percentage_coords
class SimpleSampler(Dataset):
def __init__(self, base_dataset, transform=None, pairs_per_video=1):
super().__init__()
self.base_dataset = base_dataset
self.transform = transform
self.pairs_per_video = pairs_per_video
self.indices = np.arange(len(self.base_dataset), dtype=int)
self.indices = np.tile(self.indices, pairs_per_video)
def __getitem__(self, index):
if index >= len(self):
raise IndexError('list index out of range')
index = self.indices[index]
img_files, anno = self.base_dataset[index]
rand = np.random.choice(len(img_files))
img = cv2.imread(img_files[rand], cv2.IMREAD_COLOR)
bbox = anno[rand, :]
# Convert to percentage coordinates.
bbox = to_percentage_coords(bbox, img.shape)
if self.transform is not None:
img, bbox = self.transform(img, bbox)
# Convert to RBG image, and scale values to [0, 1].
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.
# Convert to PyTorch Tensors (in particular for images, (w, h, c) is transformed to (c, w, h)).
img = torch.from_numpy(img).permute(2, 0, 1).float()
bbox = torch.from_numpy(bbox).float()
return img, bbox
def __len__(self):
return len(self.indices)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,113
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/train.py
|
import argparse
import models
import trainers
from configs import cfg
parser = argparse.ArgumentParser(description="Reference Guided RPN Training")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
net = models.load_net(cfg.MODEL.NET, cfg)
trainer = trainers.Trainer(net, cfg)
trainer.train()
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,114
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/__init__.py
|
from .siamRPNBIG import SiamRPNBIG
from .siamConcatRPN import SiamConcatRPN
from .siamBroadcastRPN import SiamBroadcastRPN
def load_net(model_name, cfg):
try:
return globals()[model_name](cfg)
except Exception:
raise Exception("No model named {}".format(model_name))
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,115
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/imagenet.py
|
from __future__ import absolute_import, division
import os
import glob
import xml.etree.ElementTree as ET
import numpy as np
import six
import random
class ImageNetVID(object):
"""
ILSVRC 2015 dataset.
Bounding boxes are in x1y1x2y2 format.
"""
def __init__(self, root_dir, subset='train', rand_choice=True):
super(ImageNetVID, self).__init__()
self.root_dir = root_dir
self.rand_choice = rand_choice
if not self._check_integrity():
raise Exception('Dataset not found or corrupted. ')
if subset == 'val':
self.seq_dirs = sorted(glob.glob(os.path.join(
self.root_dir, 'Data/VID/val/ILSVRC2015_val_*')))
self.seq_names = [os.path.basename(s) for s in self.seq_dirs]
self.anno_dirs = [os.path.join(
self.root_dir, 'Annotations/VID/val', s) for s in self.seq_names]
elif subset == 'train':
self.seq_dirs = sorted(glob.glob(os.path.join(
self.root_dir, 'Data/VID/train/ILSVRC*/ILSVRC*')))
self.seq_names = [os.path.basename(s) for s in self.seq_dirs]
self.anno_dirs = [os.path.join(
self.root_dir, 'Annotations/VID/train',
*s.split('/')[-2:]) for s in self.seq_dirs]
else:
raise Exception('Unknown subset.')
def __getitem__(self, index):
if isinstance(index, six.string_types):
if not index in self.seq_names:
raise Exception('Sequence {} not found.'.format(index))
index = self.seq_names.index(index)
elif self.rand_choice:
index = np.random.randint(len(self.seq_names))
anno_files = sorted(glob.glob(
os.path.join(self.anno_dirs[index], '*.xml')))
objects = [ET.ElementTree(file=f).findall('object')
for f in anno_files]
# choose the track id randomly
track_ids, counts = np.unique([obj.find(
'trackid').text for group in objects for obj in group], return_counts=True)
track_id = random.choice(track_ids[counts >= 2])
frames = []
anno = []
for f, group in enumerate(objects):
for obj in group:
if not obj.find('trackid').text == track_id:
continue
frames.append(f)
anno.append([
int(obj.find('bndbox/xmin').text),
int(obj.find('bndbox/ymin').text),
int(obj.find('bndbox/xmax').text),
int(obj.find('bndbox/ymax').text)])
img_files = [os.path.join(self.seq_dirs[index], '%06d.JPEG' % f) for f in frames]
anno = np.array(anno)
return img_files, anno
def __len__(self):
return len(self.seq_names)
def _check_integrity(self):
return os.path.isdir(self.root_dir) and len(os.listdir(self.root_dir)) > 0
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,116
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/trackers/tracker.py
|
import torch
import torch.nn.functional as F
from . import Tracker
import utils
from torch import optim
from utils.bbox_utils import format_from_to, to_percentage_coords, to_absolute_coords, decode
from transforms.transforms import *
import matplotlib.pyplot as plt
from loss import MultiBoxLoss
class TrackerDefault(Tracker):
def __init__(self, net, checkpoint, cfg):
super().__init__("TrackerDefault")
self.cfg = cfg
self.net = net
if checkpoint is not None:
utils.load_checkpoint(checkpoint, self.net)
self.net.eval()
self.anchors = utils.generate_anchors(cfg)
if torch.cuda.is_available():
self.net.cuda()
self.anchors = self.anchors.cuda()
self.z_transform = Compose([
ToAbsoluteCoords(),
Crop(context_amount=cfg.TRAIN.CROP_CONTEXT_AMOUNT_Z, make_square=False),
ToPercentCoords(),
Resize(cfg.MODEL.Z_SIZE),
])
self.x_crop = Crop(context_amount=cfg.TRAIN.CROP_CONTEXT_AMOUNT_X, return_rect=True, make_square=True)
self.x_resize = Resize(size=cfg.MODEL.X_SIZE)
self.z_crop = Crop(context_amount=cfg.TRAIN.CROP_CONTEXT_AMOUNT_Z, return_rect=True, make_square=False)
self.z_resize = Resize(size=cfg.MODEL.Z_SIZE)
self.criterion = MultiBoxLoss(self.anchors, self.cfg)
def init(self, img, init_rect):
self.init_size = init_rect[2:]
# Convert bounding boxes to x1y1x2y2 format.
bbox = format_from_to(init_rect, "x1y1wh", "x1y1x2y2")
# Convert to percentage coordinates.
self.bbox = to_percentage_coords(bbox, img.shape)
img_z, bbox_z, _ = self.z_transform(img, self.bbox)
self.z = self.cfg.MODEL.INPUT_RANGE * torch.from_numpy(img_z).permute(2, 0, 1).float().cuda() / 255.
bbox_z = torch.from_numpy(bbox_z).float().cuda()
self.net.temple(self.z, utils.mask_img(self.z, bbox_z))
self.window = self.build_cosine_window()
def update(self, _img, iter=0):
bbox_abs = to_absolute_coords(self.bbox, _img.shape)
crop_img, bbox, _, crop_rect = self.x_crop(_img, bbox_abs)
bbox = to_percentage_coords(bbox, crop_img.shape)
img, bbox, _ = self.x_resize(crop_img, bbox)
x = self.cfg.MODEL.INPUT_RANGE * torch.from_numpy(img).permute(2, 0, 1).float().cuda() / 255.
bbox = torch.from_numpy(bbox).float().cuda()
with torch.no_grad():
loc_pred, conf_pred = self.net.infer(x, utils.mask_img(x, bbox, use_mask=self.cfg.TRAIN.USE_MASK))
conf_pred = F.softmax(conf_pred, dim=1)[:, 1].cpu()
conf_pred = conf_pred.numpy()
pred_bboxs = decode(loc_pred, self.anchors, self.cfg.MODEL.ANCHOR_VARIANCES).cpu().numpy()
# Map the bounding box coordinates to the entire image space.
pred_bboxs = to_absolute_coords(pred_bboxs, crop_img.shape)
pred_bboxs[:, :2] += crop_rect[:2]
pred_bboxs[:, 2:] += crop_rect[:2]
bbox_abs = format_from_to(bbox_abs, "x1y1x2y2", "x1y1wh")
pred_bboxs = format_from_to(pred_bboxs, "x1y1x2y2", "x1y1wh")
"""
Engineering.
"""
def change(r):
return np.maximum(r, 1. / r)
def sz(w, h):
pad = (w + h) * 1.0
sz2 = (w + pad) * (h + pad)
return np.sqrt(sz2)
def sz_wh(wh):
pad = (wh[0] + wh[1]) * 1.0
sz2 = (wh[0] + pad) * (wh[1] + pad)
return np.sqrt(sz2)
if self.cfg.TRACKING.USE_ENGINEERING:
# size penalty
s_c = change(sz(pred_bboxs[:, 2], pred_bboxs[:, 3]) / (sz_wh(bbox_abs[2:]))) # scale penalty
r_c = change((bbox_abs[2] / bbox_abs[3]) / (pred_bboxs[:, 2] / pred_bboxs[:, 3])) # ratio penalty
penalty = np.exp(-(r_c * s_c - 1.) * self.cfg.TRACKING.PENALTY_K)
score = penalty * conf_pred
# cosine window
score = score * (1 - self.cfg.TRACKING.WINDOW_INFLUENCE) + self.window * self.cfg.TRACKING.WINDOW_INFLUENCE
else:
score = conf_pred
best_score_id = np.argmax(score)
pred_bbox = pred_bboxs[best_score_id]
if self.cfg.TRACKING.USE_ENGINEERING:
lr = penalty[best_score_id] * conf_pred[best_score_id] * self.cfg.TRACKING.LR
else:
lr = 1.0
pred_bbox[2:] = bbox_abs[2:] * (1 - lr) + pred_bbox[2:] * lr
# Prevent too large increase or decrease of the bounding box size
pred_bbox[2:] = np.clip(pred_bbox[2:], self.init_size / 3, 3 * self.init_size)
# Snap to image boundaries
pred_bbox[:2] = np.clip(pred_bbox[:2], 0., _img.shape[:2])
# Save the predicted bbox in percentage x1y1x2y2 format.
self.bbox = to_percentage_coords(format_from_to(pred_bbox, "x1y1wh", "x1y1x2y2"), _img.shape)
return pred_bbox
def build_cosine_window(self):
N = len(self.cfg.MODEL.FEATURE_MAPS_DIM)
nb_anchors = []
for k, f in enumerate(self.cfg.MODEL.FEATURE_MAPS_DIM):
num_11_anchors = 2 if self.cfg.MODEL.ANCHOR_MAX_SIZES[k] != self.cfg.MODEL.ANCHOR_MIN_SIZES[k] else 1
nb_anchors.append(num_11_anchors + 2 * len(self.cfg.MODEL.ANCHOR_ASPECT_RATIOS[k]))
windows = [np.outer(np.hanning(dim), np.hanning(dim)) for dim in self.cfg.MODEL.FEATURE_MAPS_DIM]
windows = [np.repeat(windows[i].flatten(), nb_anchors[i]) for i in range(N)]
return np.concatenate(windows)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,117
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/utils/utils.py
|
from math import sqrt as sqrt
from itertools import product as product
import torch
import os
import shutil
def generate_anchors(cfg):
mean = []
for k, f in enumerate(cfg.MODEL.FEATURE_MAPS_DIM):
for i, j in product(range(f), repeat=2):
f_k = cfg.MODEL.X_SIZE / cfg.MODEL.FEATURE_MAPS_STRIDES[k]
# unit center x,y
cx = (j + 0.5) / f_k
cy = (i + 0.5) / f_k
# aspect_ratio: 1
# rel size: min_size
s_k = cfg.MODEL.ANCHOR_MIN_SIZES[k] / cfg.MODEL.X_SIZE
mean += [cx, cy, s_k, s_k]
# aspect_ratio: 1
# rel size: sqrt(s_k * s_(k+1))
if cfg.MODEL.ANCHOR_MAX_SIZES[k] != cfg.MODEL.ANCHOR_MIN_SIZES[k]:
s_k_prime = sqrt(s_k * (cfg.MODEL.ANCHOR_MAX_SIZES[k] / cfg.MODEL.X_SIZE))
mean += [cx, cy, s_k_prime, s_k_prime]
# rest of aspect ratios
for ar in cfg.MODEL.ANCHOR_ASPECT_RATIOS[k]:
mean += [cx, cy, s_k * sqrt(ar), s_k / sqrt(ar)]
mean += [cx, cy, s_k / sqrt(ar), s_k * sqrt(ar)]
# Convert to PyTorch Tensor
output = torch.Tensor(mean).view(-1, 4)
output.clamp_(max=1, min=0)
return output
def mask_img(img, bbox, use_mask=True):
"""
Adds a mask of the input image according to the provided bounding box.
img: (Tensor) image to be masked
bbox: (Tensor) bounding box in pointform format.
Output: 4-channel image tensor
"""
img_size = img.shape[-2:]
if use_mask is False:
return img.new_ones(img_size).unsqueeze(0)
mask = img.new_zeros(img_size)
img_size = img.new_tensor(img_size).float().repeat(2)
bbox_coords = (bbox * img_size).floor()
bbox_coords = torch.clamp(torch.min(bbox_coords, img_size - 1), min=0).int()
mask[bbox_coords[1]:bbox_coords[3] + 1, bbox_coords[0]:bbox_coords[2] + 1] = 1
return mask.unsqueeze(0)
def mask_imgs(imgs, bboxs, use_mask=True):
"""
Batch-version of mask_img
"""
batch_size, _, w, h = imgs.shape
if use_mask is False:
return imgs.new_ones(batch_size, w, h).unsqueeze(1)
masks = imgs.new_zeros(batch_size, w, h)
img_size = imgs.new_tensor([w, h]).float().repeat(2)
bbox_coords = (bboxs * img_size).floor()
bbox_coords = torch.clamp(torch.min(bbox_coords, img_size - 1), min=0).int()
for i in range(batch_size):
masks[i, bbox_coords[i, 1]:bbox_coords[i, 3] + 1, bbox_coords[i, 0]:bbox_coords[i, 2] + 1] = 1
return masks.unsqueeze(1)
def save_checkpoint(state, data_dir, run_id=None, is_best=False):
"""Saves model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves
checkpoint + 'best.pth.tar'
Based on: https://github.com/cs230-stanford/cs230-code-examples
"""
checkpoint_dir = os.path.join(data_dir, "checkpoints")
if run_id is not None:
checkpoint_dir = os.path.join(checkpoint_dir, run_id)
filepath = os.path.join(checkpoint_dir, 'last.pth.tar')
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
print("Saving checkpoint to: {}".format(filepath))
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint_dir, 'best.pth.tar'))
def load_model(file_name, model):
if not os.path.exists(file_name):
raise("File doesn't exist {}".format(file_name))
print("Loading model: {}".format(file_name))
device = torch.device("cuda")
model.load_state_dict(torch.load(file_name))
model.to(device)
def load_checkpoint(checkpoint, model, optimizer=None):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
if not os.path.exists(checkpoint):
raise("File doesn't exist {}".format(checkpoint))
print("Loading checkpoint: {}".format(checkpoint))
device = torch.device("cuda")
checkpoint = torch.load(checkpoint)
model.load_state_dict(checkpoint['state_dict'])
model.to(device)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer'])
return checkpoint['epoch']
def IoU(a, b):
sa = (a[2] - a[0]) * (a[3] - a[1])
sb = (b[2] - b[0]) * (b[3] - b[1])
w = max(0, min(a[2], b[2]) - max(a[0], b[0]))
h = max(0, min(a[3], b[3]) - max(a[1], b[1]))
area = w * h
return area / (sa + sb - area)
def IoUs(a, b):
sa = (a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1])
sb = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
w = (torch.min(a[:, 2], b[:, 2]) - torch.max(a[:, 0], b[:, 0])).clamp(min=0)
h = (torch.min(a[:, 3], b[:, 3]) - torch.max(a[:, 1], b[:, 1])).clamp(min=0)
area = w * h
return area / (sa + sb - area)
def inside(p, bbox):
"""
p: point (x, y)
bbox: in x1y1x2y2 format
Returns mask of indices for which the provided point is included in the bounding box
"""
return (bbox[:, 0] <= p[0]) & (p[0] <= bbox[:, 2]) & (bbox[:, 1] <= p[1]) & (p[1] <= bbox[:, 3])
def inside_margin(p, bbox):
"""
p: point (x, y)
bbox: in cxcywh format
Returns mask of indices for which the provided point is included in the bounding box
"""
return ((bbox[:, 0] - p[0]).abs() < 0.15 * bbox[:, 2]) & ((bbox[:, 1] - p[1]).abs() < 0.15 * bbox[:, 3])
def compute_accuracy(ground_truth, prediction, cls):
"""
Compute the class accuracy
"""
ground_truth_indices = (ground_truth == cls)
if ground_truth_indices.sum() == 0: # no ground-truth element of the given class
return 1.0
predicted_classes = torch.sort(prediction[ground_truth_indices], descending=True, dim=1)[1][:, 0]
return (predicted_classes == cls).float().sum() / ground_truth_indices.float().sum()
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,118
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/benchmark.py
|
import argparse
import models
import trackers
import experiments
from configs import cfg
parser = argparse.ArgumentParser(description='Benchmark SiamBroadcastRPN on a dataset.')
parser.add_argument("--checkpoint")
parser.add_argument("--visualize", type=bool, default=False)
parser.add_argument("--sequences", nargs='+', default=[])
parser.add_argument("--version", default=2015)
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
if args.config_file:
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
if len(args.sequences) == 0:
args.sequences = None
net = models.load_net(cfg.MODEL.NET, cfg)
tracker = trackers.load_tracker(net, args.checkpoint, cfg)
experiment = experiments.ExperimentOTB(cfg, version=args.version, sequences=args.sequences)
experiment.run(tracker, visualize=args.visualize)
experiment.report([tracker.name], args=args)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,119
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/metrics/__init__.py
|
from .metrics import rect_iou, center_error
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,120
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/transforms/__init__.py
|
from .transforms import Transform
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,121
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/base.py
|
import torch.nn as nn
from torchvision import models
import torch
class BaseNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1_m = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=True)
resnet = models.resnet50(pretrained=True)
self.conv1 = resnet.conv1
self.bn1 = resnet.bn1
self.relu = resnet.relu # 1/2, 64
self.maxpool = resnet.maxpool
self.res2 = resnet.layer1 # 1/4, 256
self.res3 = resnet.layer2 # 1/8, 512
self.res4 = resnet.layer3 # 1/16, 1024
self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, x, m=None, use_mask=True):
"""
Input: frame and mask
"""
x = (x - self.mean) / self.std
x = self.conv1(x)
if use_mask:
x += self.conv1_m(m)
x = self.bn1(x)
x = self.relu(x) # 1/2, 64
x = self.maxpool(x) # 1/4, 64
x = self.res2(x) # 1/4, 64
r3 = self.res3(x) # 1/8, 128
x = self.res4(r3) # 1/16, 256
return x, r3
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,122
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/experiments/__init__.py
|
from .otb import ExperimentOTB
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,123
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/loss.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import compute_accuracy
from utils.bbox_utils import point_form, jaccard, encode, decode
class MultiBoxLoss(nn.Module):
def __init__(self, anchors, cfg):
super().__init__()
self.anchors = anchors
self.point_form_anchors = point_form(anchors)
self.cfg = cfg
if cfg.TRAIN.REGRESSION_LOSS == "smooth_l1":
self.regression_loss = F.smooth_l1_loss
elif cfg.TRAIN.REGRESSION_LOSS == "l1":
self.regression_loss = F.l1_loss
else:
raise Exception("Unknown regression loss.")
def forward(self, pred, gt_boxes):
loc_pred, conf_pred = pred
batch_size = conf_pred.size(0)
"""
Labels: overlap => th_high : 1
th_low < overlap < th_high : -1
overlap <= th_low : 0
"""
overlaps = jaccard(gt_boxes, self.point_form_anchors) # Shape: [batch_size, num_anchors]
labels = (overlaps >= self.cfg.TRAIN.TH_HIGH).long() - ((self.cfg.TRAIN.TH_LOW < overlaps) &
(overlaps < self.cfg.TRAIN.TH_HIGH)).long()
pos = labels == 1 # Shape: [batch_size, num_anchors]
neg = labels == 0 # Shape: [batch_size, num_anchors]
N = pos.sum().item()
"""Regression loss."""
# Repeat the anchors on the batch dimension [batch_size, num_anchors, 4], and select only the positive matches
matched_anchors = self.anchors.expand(batch_size, -1, -1)[pos] # Shape: [num_pos, 4]
# Indices of ground-truth boxes corresponding to positives matches
i = pos.nonzero()[:, 0] # Shape: [num_pos]
# Repeat the ground-truth boxes according to the number of positive matches
gt_boxes_repeat = gt_boxes[i] # Shape: [num_pos, 4]
loc_gt = encode(gt_boxes_repeat, matched_anchors, self.cfg.MODEL.ANCHOR_VARIANCES)
loss_loc = self.regression_loss(loc_pred[pos], loc_gt, reduction="mean")
"""Classification loss."""
# Hard negative mining, compute intermediate loss. Shape: [batch_size, num_anchors]
loss_cls = F.cross_entropy(conf_pred.view(-1, 2), pos.long().view(-1), reduction="none").view(batch_size, -1)
loss_cls[~neg] = 0 # Filter out non negative boxes
_, loss_idx = loss_cls.sort(1, descending=True)
_, idx_rank = loss_idx.sort(1)
num_pos = pos.sum(dim=1, keepdim=True)
num_neg = torch.clamp(torch.clamp(self.cfg.TRAIN.NEGPOS_RATIO * num_pos, min=10), max=pos.size(1) - 1)
neg = idx_rank < num_neg.expand_as(idx_rank) # Update negatives by picking the ones w. highest confidence loss
# Classification loss including Positive and Negative examples
pos_idx = pos.unsqueeze(2).expand_as(conf_pred)
neg_idx = neg.unsqueeze(2).expand_as(conf_pred)
conf_picked = conf_pred[(pos_idx + neg_idx).gt(0)].view(-1, 2)
labels_picked = labels[(pos + neg).gt(0)]
weight_balance = loc_pred.new_tensor([1/3, 1.])
loss_cls = F.cross_entropy(conf_picked, labels_picked, weight=weight_balance, reduction="mean")
# Compute accuracy and pixel error metrics
pos_accuracy = compute_accuracy(labels_picked, conf_picked, 1)
neg_accuracy = compute_accuracy(labels_picked, conf_picked, 0)
img_size = self.cfg.MODEL.X_SIZE
decoded_loc_pred = decode(loc_pred[pos], matched_anchors, self.cfg.MODEL.ANCHOR_VARIANCES)
position_error = torch.norm((gt_boxes_repeat[:, :2] - decoded_loc_pred[:, :2]) * img_size, dim=1).mean()
size_errors = (gt_boxes_repeat[:, 2:] - decoded_loc_pred[:, 2:]).abs().mean(dim=0) * img_size
return loss_loc, loss_cls, pos_accuracy, neg_accuracy, position_error, size_errors, N
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,124
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/refine.py
|
import torch.nn as nn
import torch.nn.functional as F
class Refine(nn.Module):
def __init__(self, inplanes, planes, outplanes, scale_factor=2):
super(Refine, self).__init__()
self.convFS1 = nn.Conv2d(inplanes, outplanes, kernel_size=3, padding=1)
self.convFS2 = nn.Conv2d(outplanes, planes, kernel_size=3, padding=1)
self.convFS3 = nn.Conv2d(planes, outplanes, kernel_size=3, padding=1)
self.convMM1 = nn.Conv2d(outplanes, planes, kernel_size=3, padding=1)
self.convMM2 = nn.Conv2d(planes, outplanes, kernel_size=3, padding=1)
self.scale_factor = scale_factor
def forward(self, f, pm):
s = self.convFS1(f)
sr = self.convFS2(F.relu(s))
sr = self.convFS3(F.relu(sr))
s = s + sr
m = s + F.interpolate(pm, scale_factor=self.scale_factor, mode='bilinear')
mr = self.convMM1(F.relu(m))
mr = self.convMM2(F.relu(mr))
m = m + mr
return m
class Refine2(nn.Module):
def __init__(self, in_dim1, in_dim2, mid_dim, out_dim):
super(Refine2, self).__init__()
self.convFS1 = nn.Conv2d(in_dim1, out_dim, kernel_size=3, padding=1)
self.convFS2 = nn.Conv2d(in_dim1, mid_dim, kernel_size=3, padding=1)
self.convFS3 = nn.Conv2d(mid_dim, out_dim, kernel_size=3, padding=1)
self.convFS4 = nn.Conv2d(in_dim2, out_dim, kernel_size=1)
self.convMM1 = nn.Conv2d(out_dim, mid_dim, kernel_size=3, padding=1)
self.convMM2 = nn.Conv2d(mid_dim, out_dim, kernel_size=3, padding=1)
def forward(self, f, pm):
s = self.convFS1(f)
sr = self.convFS2(F.relu(s))
sr = self.convFS3(F.relu(sr))
s = s + sr
pm = F.relu(self.convFS4(pm))
m = s + F.interpolate(pm, size=f.size(-1), mode='bilinear')
mr = self.convMM1(F.relu(m))
mr = self.convMM2(F.relu(mr))
m = m + mr
return m
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,125
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/transforms/transforms.py
|
import cv2
import numpy as np
from utils.bbox_utils import format_from_to, to_absolute_coords, to_percentage_coords
MEAN = (104, 117, 123)
def base_transform(image, size):
x = cv2.resize(image, (size, size)).astype(np.float32)
x = x.astype(np.float32)
return x
def jitter_transform(bbox):
bbox = format_from_to(bbox.copy(), "x1y1x2y2", "x1y1wh")
bbox[:2] += 0.25 * bbox[2:] * np.random.randn(2)
bbox[2:] += 0.25 * bbox[2:] * np.random.randn(2)
return format_from_to(bbox, "x1y1wh", "x1y1x2y2")
def get_image_size(image):
"""
Utility that outputs the size (w, h) of a OpenCV2 image.
"""
return tuple(image.shape[1::-1])
def crop(image, rect, fill):
"""
rect: in absolute coordinates, x1y1wh format.
"""
pads = np.concatenate((-rect[:2], rect[2:] - get_image_size(image)))
padding = max(0, int(pads.max()))
image = cv2.copyMakeBorder(image, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=fill)
rect = padding + rect
image = image[rect[1]:rect[3], rect[0]:rect[2]]
return image
def adjust_bbox(bbox, rect):
bbox = bbox.copy()
bbox[:2] = np.maximum(bbox[:2], rect[:2])
bbox[:2] -= rect[:2]
bbox[2:] = np.minimum(bbox[2:], rect[2:])
bbox[2:] -= rect[:2]
return bbox
class ConvertFromInts(object):
def __call__(self, image, bbox, prev_bbox=None):
return image.astype(np.float32), bbox, prev_bbox
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, bbox, prev_bbox=None):
for t in self.transforms:
if t is not False:
image, bbox, prev_bbox = t(image, bbox, prev_bbox)
return image, bbox, prev_bbox
class ToAbsoluteCoords(object):
def __call__(self, image, bbox, prev_bbox=None):
bbox = to_absolute_coords(bbox.copy(), image.shape)
if prev_bbox is not None:
prev_bbox = to_absolute_coords(prev_bbox.copy(), image.shape)
return image, bbox, prev_bbox
class ToPercentCoords(object):
def __call__(self, image, bbox, prev_bbox=None):
bbox = to_percentage_coords(bbox.copy(), image.shape)
if prev_bbox is not None:
prev_bbox = to_percentage_coords(prev_bbox.copy(), image.shape)
return image, bbox, prev_bbox
class Crop(object):
def __init__(self, mean=MEAN, context_amount=0.5,
random_translate=False, random_translate_range=0.3,
random_resize=False, random_resize_scale_min=0.35, random_resize_scale_max=1.5,
return_rect=False, center_at_pred=False, make_square=False):
self.mean = mean
self.context_amount = context_amount
self.random_translate = random_translate
self.random_translate_range = random_translate_range
self.random_resize = random_resize
self.random_resize_scale_min = random_resize_scale_min
self.random_resize_scale_max = random_resize_scale_max
self.return_rect = return_rect
self.center_at_pred = center_at_pred
self.make_square = make_square
def __call__(self, image, bbox, prev_bbox=None):
# If prev_bbox is provided, use this rectangle as cropping area
if self.center_at_pred and prev_bbox is not None:
rect = prev_bbox.copy()
else:
rect = bbox.copy()
# Convert to cxcywh
rect = format_from_to(rect, "x1y1x2y2", "cxcywh")
# Add context to the cropping area
if not self.make_square:
context = self.context_amount * rect[2:].sum()
rect[2:] = np.sqrt((rect[2:] + context).prod())
else:
rect[2:] += 2 * self.context_amount * rect[2:]
if self.random_resize:
rect[2:] *= np.random.uniform(self.random_resize_scale_min, self.random_resize_scale_max)
if self.random_translate:
displacement = np.random.uniform(-1, 1, 2) * self.random_translate_range * rect[2:]
rect[:2] -= displacement
# Convert back to x1y1x2y2 format
rect = format_from_to(rect, "cxcywh", "x1y1x2y2").astype(int)
# Crop the image
image = crop(image, rect, self.mean)
# Adjust bounding box coordinates
bbox = adjust_bbox(bbox, rect)
if prev_bbox is not None:
prev_bbox = adjust_bbox(prev_bbox, rect)
if not self.return_rect:
return image, bbox, prev_bbox
else:
return image, bbox, prev_bbox, rect
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, image, bbox, prev_bbox=None):
image = cv2.resize(image, (self.size, self.size))
return image, bbox, prev_bbox
class RandomMirror(object):
def __call__(self, image, bbox, prev_bbox=None):
_, width, _ = image.shape
if np.random.randint(2):
image = image[:, ::-1]
bbox[0::2] = width - bbox.copy()[2::-2]
if prev_bbox is not None:
prev_bbox[0::2] = width - prev_bbox.copy()[2::-2]
return image, bbox, prev_bbox
class PhotometricDistort(object):
def __init__(self):
self.pd = [
RandomContrast(),
ConvertColor(transform='HSV'),
RandomSaturation(),
RandomHue(),
ConvertColor(current='HSV', transform='BGR'),
RandomContrast()
]
self.rand_brightness = RandomBrightness()
self.rand_light_noise = RandomLightingNoise()
def __call__(self, image, bbox, prev_bbox=None):
image = image.copy()
image, bbox, prev_bbox = self.rand_brightness(image, bbox, prev_bbox)
"""
# Do not distort hue and saturation
if np.random.randint(2):
distort = Compose(self.pd[:-1])
else:
distort = Compose(self.pd[1:])
image, bbox, prev_bbox = distort(image, bbox, prev_bbox)
"""
return image, bbox, prev_bbox
class RandomSaturation(object):
def __init__(self, lower=0.8, upper=1.2):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
image[:, :, 1] *= np.random.uniform(self.lower, self.upper)
return image, bbox, prev_bbox
class RandomHue(object):
def __init__(self, delta=15.0):
assert delta >= 0.0 and delta <= 360.0
self.delta = delta
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
image[:, :, 0] += np.random.uniform(-self.delta, self.delta)
image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0
image[:, :, 0][image[:, :, 0] < 0.0] += 360.0
return image, bbox, prev_bbox
class RandomLightingNoise(object):
def __init__(self):
self.perms = ((0, 1, 2), (0, 2, 1),
(1, 0, 2), (1, 2, 0),
(2, 0, 1), (2, 1, 0))
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
swap = self.perms[np.random.randint(len(self.perms))]
shuffle = SwapChannels(swap) # shuffle channels
image = shuffle(image)
return image, bbox, prev_bbox
class ConvertColor(object):
def __init__(self, current='BGR', transform='HSV'):
self.transform = transform
self.current = current
def __call__(self, image, bbox, prev_bbox=None):
if self.current == 'BGR' and self.transform == 'HSV':
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
elif self.current == 'HSV' and self.transform == 'BGR':
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
else:
raise NotImplementedError
return image, bbox, prev_bbox
class RandomContrast(object):
def __init__(self, lower=0.5, upper=1.5):
self.lower = lower
self.upper = upper
assert self.upper >= self.lower, "contrast upper must be >= lower."
assert self.lower >= 0, "contrast lower must be non-negative."
# expects float image
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
alpha = np.random.uniform(self.lower, self.upper)
image *= alpha
return image, bbox, prev_bbox
class RandomBrightness(object):
def __init__(self, delta=32):
assert delta >= 0.0
assert delta <= 255.0
self.delta = delta
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
delta = np.random.uniform(-self.delta, self.delta)
image += delta
return image, bbox, prev_bbox
class SwapChannels(object):
"""Transforms a tensorized image by swapping the channels in the order
specified in the swap tuple.
Args:
swaps (int triple): final order of channels
eg: (2, 1, 0)
"""
def __init__(self, swaps):
self.swaps = swaps
def __call__(self, image):
"""
Args:
image (Tensor): image tensor to be transformed
Return:
a tensor with channels swapped according to swap
"""
image = image[:, :, self.swaps]
return image
class MotionBlur(object):
def __init__(self):
"""
Add motion blur to every second image.
"""
kernel_size = 9
kernel_motion_blur = np.zeros((kernel_size, kernel_size))
if np.random.randint(2):
# Horizontal blur
kernel_motion_blur[int((kernel_size - 1) / 2), :] = np.ones(kernel_size)
else:
# Vertical blur
kernel_motion_blur[:, int((kernel_size - 1) / 2)] = np.ones(kernel_size)
self.kernel_motion_blur = kernel_motion_blur / kernel_size
def __call__(self, image, bbox, prev_bbox=None):
if np.random.randint(2):
image = cv2.filter2D(image, -1, self.kernel_motion_blur)
return image, bbox, prev_bbox
class Transform(object):
def __init__(self, context_amount=0.5,
random_translate=False, random_translate_range=0.3,
random_resize=False, random_resize_scale_min=0.35, random_resize_scale_max=1.5,
size=300, mean=MEAN,
motion_blur=False, make_square=False):
self.transform = Compose([
ConvertFromInts(),
ToAbsoluteCoords(),
PhotometricDistort(),
Crop(mean=mean, context_amount=context_amount,
random_translate=random_translate, random_translate_range=random_translate_range,
random_resize=random_resize, random_resize_scale_min=random_resize_scale_min,
random_resize_scale_max=random_resize_scale_max, make_square=make_square),
ToPercentCoords(),
motion_blur and MotionBlur(),
Resize(size),
])
def __call__(self, image, bbox, prev_bbox=None):
image, bbox, prev_bbox = self.transform(image, bbox, prev_bbox)
if prev_bbox is not None:
prev_bbox = jitter_transform(prev_bbox)
return image, bbox, prev_bbox
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,126
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/alexnet.py
|
import torch.nn as nn
from torchvision import models
import torch
class AlexNet(nn.Module):
def __init__(self):
super().__init__()
self.featureExtract = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=11, stride=2),
nn.BatchNorm2d(192),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(192, 512, kernel_size=5),
nn.BatchNorm2d(512),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.Conv2d(512, 768, kernel_size=3),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True),
nn.Conv2d(768, 768, kernel_size=3),
nn.BatchNorm2d(768),
nn.ReLU(inplace=True),
nn.Conv2d(768, 512, kernel_size=3),
nn.BatchNorm2d(512),
)
self.register_buffer('mean', torch.FloatTensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer('std', torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def forward(self, x, m=None, use_mask=True):
"""
Input: frame and mask
"""
x = (x - self.mean) / self.std
x = self.featureExtract(x)
return x
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,127
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/siamBroadcastRPN.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.layers import *
class SiamBroadcastRPN(nn.Module):
def __init__(self, cfg):
super().__init__()
self.resnetX = ResNet()
self.resnetZ = ResNet()
self.cfg = cfg
self.relation = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.extras = nn.ModuleList([
nn.Conv2d(256, 64, kernel_size=1),
nn.BatchNorm2d(64),
nn.Conv2d(64, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 64, kernel_size=1),
nn.BatchNorm2d(64),
nn.Conv2d(64, 256, kernel_size=3, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
])
self.loc = nn.ModuleList([
nn.Conv2d(256, 24, kernel_size=3, padding=1),
nn.Conv2d(256, 24, kernel_size=3, padding=1)
])
self.conf = nn.ModuleList([
nn.Conv2d(256, 12, kernel_size=3, padding=1),
nn.Conv2d(256, 12, kernel_size=3, padding=1),
])
def forward(self, z, z_mask, x, x_mask):
sources = list()
loc = list()
conf = list()
z = self.resnetZ(z)
x = self.resnetX(x)
z = F.max_pool2d(z, kernel_size=8)
z = z.expand_as(x)
x = torch.cat((x, z), dim=1)
x = self.relation(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k % 5 == 4:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
return loc.view(loc.size(0), -1, 4), conf.view(conf.size(0), -1, 2)
def temple(self, z, z_mask):
z = self.resnetZ(z.unsqueeze(0))
z = F.max_pool2d(z, kernel_size=8)
self.z = z.expand(-1, -1, 32, 32)
def infer(self, x, x_mask):
sources = list()
loc = list()
conf = list()
x = self.resnetX(x.unsqueeze(0))
x = torch.cat((x, self.z), dim=1)
x = self.relation(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k % 5 == 4:
sources.append(x)
# apply multibox head to source layers
for (x, l, c) in zip(sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
return loc.view(-1, 4), conf.view(-1, 2)
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,128
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/configs/defaults.py
|
from yacs.config import CfgNode as CN
_C = CN()
"""
MODEL PARAMETERS
"""
_C.MODEL = CN()
_C.MODEL.NET = "Net"
_C.MODEL.Z_SIZE = 300
_C.MODEL.X_SIZE = 300
_C.MODEL.FEATURE_MAPS_DIM = [38, 19, 10, 5, 3, 1]
_C.MODEL.FEATURE_MAPS_STRIDES = [8, 16, 32, 64, 100, 300]
_C.MODEL.ANCHOR_MIN_SIZES = [60, 99, 120, 150, 190, 220]
_C.MODEL.ANCHOR_MAX_SIZES = [85, 110, 140, 180, 210, 250]
_C.MODEL.ANCHOR_ASPECT_RATIOS = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
_C.MODEL.ANCHOR_VARIANCES = [0.1, 0.2]
# Input tensor should be values between 0 and 1.0
_C.MODEL.INPUT_RANGE = 1.0
"""
TRACKING PARAMETERS
"""
_C.TRACKING = CN()
_C.TRACKING.USE_ENGINEERING = True
_C.TRACKING.LR = 0.295
_C.TRACKING.PENALTY_K = 0.055
_C.TRACKING.WINDOW_INFLUENCE = 0.42
_C.TRACKING.UPDATE_RATE = 0.0
_C.TRACKING.USE_CORRELATION_GUIDE = False
"""
TRAINING META-PARAMETERS
"""
_C.TRAIN = CN()
_C.TRAIN.BATCH_SIZE = 16
_C.TRAIN.LR = 1e-3
_C.TRAIN.WEIGHT_DECAY = 0.
_C.TRAIN.SCHEDULER_STEP_SIZE = 1000
_C.TRAIN.SCHEDULER_GAMMA = 0.99
_C.TRAIN.NUM_EPOCHS = 50
_C.TRAIN.PAIRS_PER_VIDEO = 1
_C.TRAIN.RESUME_CHECKPOINT = ""
_C.TRAIN.LAMBDA = 1.0 # Loss = loss_classification + lambda * loss_regression
_C.TRAIN.NEGPOS_RATIO = 3 # Negative/positive ratio during training
_C.TRAIN.REGRESSION_LOSS = "smooth_l1" # Smooth L1 or L1 loss.
# Positive/negative examples sampling
_C.TRAIN.TH_HIGH = 0.6
_C.TRAIN.TH_LOW = 0.3
# Debugging
_C.TRAIN.DEBUG_SEQ = -1
_C.TRAIN.USE_MASK = True
# Cropping
_C.TRAIN.CROP_CONTEXT_AMOUNT_Z = 1.0
_C.TRAIN.CROP_CONTEXT_AMOUNT_X = 1.0
# Data augmentation
_C.TRAIN.DATA_AUG_TRANSLATE_RANGE = 0.3
_C.TRAIN.DATA_AUG_RESIZE_SCALE_MIN = 0.35
_C.TRAIN.DATA_AUG_RESIZE_SCALE_MAX = 1.5
_C.TRAIN.FRAME_RANGE = 100
"""
PATHS
"""
_C.PATH = CN()
# TrackingNet dataset root path
_C.PATH.TRACKINGNET = "/PATH/TO/TRACKINGNET"
# UAV dataset root path
_C.PATH.UAV = "/PATH/TO/UAV"
# OTB dataset root path
_C.PATH.OTB = "/PATH/TO/OTB"
# ILSVRC dataset root path
_C.PATH.ILSVRC = "/PATH/TO/ILSVRC"
# COCO Detection 2014 root path
_C.PATH.COCO = "/PATH/TO/COCO"
# COCO annotation JSON file path
_C.PATH.COCO_ANN_FILE = "/PATH/TO/COCO_ANNOTATION_FILE"
# Where to save checkpoints, tensorboard runs...
_C.PATH.DATA_DIR = "/PATH/TO/PROJECT_DIRECTORY"
# Pretrained models
_C.PATH.PRETRAINED_SIAMRPN = "/PATH/TO/pretrained/SiamRPNBIG.model"
_C.PATH.PRETRAINED_SIAMFC = "/PATH/TO/pretrained/siamfc"
# AlexnetBIG weights
_C.PATH.ALEXNETBIG_WEIGHTS = "/PATH/TO/pretrained/alexnetBIG.pth"
"""
DEBUG
"""
_C.DEBUG = False
# Exporting as cfg is a nice convention
cfg = _C
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,129
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/models/layers/siamFC.py
|
import torch.nn as nn
import torch
import torch.nn.functional as F
import math
from scipy import io
import os
def initialize_weights(model):
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def load_matconvnet(filename):
mat = io.loadmat(filename)
net_dot_mat = mat.get('net')
params = net_dot_mat['params']
params = params[0][0]
params_names = params['name'][0]
params_names_list = [params_names[p][0] for p in range(params_names.size)]
params_values = params['value'][0]
params_values_list = [params_values[p] for p in range(params_values.size)]
return params_names_list, params_values_list
def load_siamfc_from_matconvnet(filename, model):
assert isinstance(model.branch, (AlexNetV1, AlexNetV2))
if isinstance(model.branch, AlexNetV1):
p_conv = 'conv'
p_bn = 'bn'
p_adjust = 'adjust_'
elif isinstance(model.branch, AlexNetV2):
p_conv = 'br_conv'
p_bn = 'br_bn'
p_adjust = 'fin_adjust_bn'
params_names_list, params_values_list = load_matconvnet(filename)
params_values_list = [torch.from_numpy(p) for p in params_values_list]
for l, p in enumerate(params_values_list):
param_name = params_names_list[l]
if 'conv' in param_name and param_name[-1] == 'f':
p = p.permute(3, 2, 0, 1)
p = torch.squeeze(p)
params_values_list[l] = p
net = (
model.branch.conv1,
model.branch.conv2,
model.branch.conv3,
model.branch.conv4,
model.branch.conv5)
for l, layer in enumerate(net):
layer[0].weight.data[:] = params_values_list[
params_names_list.index('%s%df' % (p_conv, l + 1))]
layer[0].bias.data[:] = params_values_list[
params_names_list.index('%s%db' % (p_conv, l + 1))]
if l < len(net) - 1:
layer[1].weight.data[:] = params_values_list[
params_names_list.index('%s%dm' % (p_bn, l + 1))]
layer[1].bias.data[:] = params_values_list[
params_names_list.index('%s%db' % (p_bn, l + 1))]
bn_moments = params_values_list[
params_names_list.index('%s%dx' % (p_bn, l + 1))]
layer[1].running_mean[:] = bn_moments[:, 0]
layer[1].running_var[:] = bn_moments[:, 1] ** 2
elif model.norm.norm == 'bn':
model.norm.bn.weight.data[:] = params_values_list[
params_names_list.index('%sm' % p_adjust)]
model.norm.bn.bias.data[:] = params_values_list[
params_names_list.index('%sb' % p_adjust)]
bn_moments = params_values_list[
params_names_list.index('%sx' % p_adjust)]
model.norm.bn.running_mean[:] = bn_moments[0]
model.norm.bn.running_var[:] = bn_moments[1] ** 2
elif model.norm.norm == 'linear':
model.norm.linear.weight.data[:] = params_values_list[
params_names_list.index('%sf' % p_adjust)]
model.norm.linear.bias.data[:] = params_values_list[
params_names_list.index('%sb' % p_adjust)]
return model
class XCorr(nn.Module):
def __init__(self):
super(XCorr, self).__init__()
def forward(self, z, x):
out = []
for i in range(z.size(0)):
out.append(F.conv2d(x[i, :].unsqueeze(0),
z[i, :].unsqueeze(0)))
return torch.cat(out, dim=0)
class Adjust2d(nn.Module):
def __init__(self, norm='bn'):
super(Adjust2d, self).__init__()
assert norm in [None, 'bn', 'cosine', 'euclidean', 'linear']
self.norm = norm
if norm == 'bn':
self.bn = nn.BatchNorm2d(1)
elif norm == 'linear':
self.linear = nn.Conv2d(1, 1, 1, bias=True)
self._initialize_weights()
def forward(self, out, z=None, x=None):
if self.norm == 'bn':
out = self.bn(out)
elif self.norm == 'linear':
out = self.linear(out)
elif self.norm == 'cosine':
n, k = out.size(0), z.size(-1)
norm_z = torch.sqrt(
torch.pow(z, 2).view(n, -1).sum(1)).view(n, 1, 1, 1)
norm_x = torch.sqrt(
k * k * F.avg_pool2d(torch.pow(x, 2), k, 1).sum(1, keepdim=True))
out = out / (norm_z * norm_x + 1e-32)
out = (out + 1) / 2
elif self.norm == 'euclidean':
n, k = out.size(0), z.size(-1)
sqr_z = torch.pow(z, 2).view(n, -1).sum(1).view(n, 1, 1, 1)
sqr_x = k * k * \
F.avg_pool2d(torch.pow(x, 2), k, 1).sum(1, keepdim=True)
out = out + sqr_z + sqr_x
out = out.clamp(min=1e-32).sqrt()
elif self.norm == None:
out = out
return out
def _initialize_weights(self):
if self.norm == 'bn':
self.bn.weight.data.fill_(1)
self.bn.bias.data.zero_()
elif self.norm == 'linear':
self.linear.weight.data.fill_(1e-3)
self.linear.bias.data.zero_()
class AlexNetV1(nn.Module):
def __init__(self):
super(AlexNetV1, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 96, 11, 2),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2))
self.conv2 = nn.Sequential(
nn.Conv2d(96, 256, 5, 1, groups=2),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2))
self.conv3 = nn.Sequential(
nn.Conv2d(256, 384, 3, 1),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(384, 384, 3, 1, groups=2),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.Conv2d(384, 256, 3, 1, groups=2))
initialize_weights(self)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
class AlexNetV2(nn.Module):
def __init__(self):
super(AlexNetV2, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 96, 11, 2),
nn.BatchNorm2d(96),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2))
self.conv2 = nn.Sequential(
nn.Conv2d(96, 256, 5, 1, groups=2),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 1))
self.conv3 = nn.Sequential(
nn.Conv2d(256, 384, 3, 1),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True))
self.conv4 = nn.Sequential(
nn.Conv2d(384, 384, 3, 1, groups=2),
nn.BatchNorm2d(384),
nn.ReLU(inplace=True))
self.conv5 = nn.Sequential(
nn.Conv2d(384, 32, 3, 1, groups=2))
initialize_weights(self)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
return x
class SiamFC(nn.Module):
def __init__(self, root_pretrained):
super(SiamFC, self).__init__()
self.root_pretrained = root_pretrained
self.branch = AlexNetV2()
self.norm = Adjust2d(norm="bn")
self.xcorr = XCorr()
self.load_weights()
def load_weights(self):
net_path = os.path.join(self.root_pretrained, "baseline-conv5_e55.mat")
load_siamfc_from_matconvnet(net_path, self)
def forward(self, z, x):
assert z.size()[:2] == x.size()[:2]
z = self.branch(z)
x = self.branch(x)
out = self.xcorr(z, x)
out = self.norm(out, z, x)
return out
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,130
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/trainers/trainer.py
|
import matplotlib; matplotlib.use('Agg')
import matplotlib.pyplot as plt; plt.figure(figsize=(20, 10))
import torch
from torch import nn
from torch import optim
from torch.utils.data import DataLoader, ConcatDataset
from tensorboardX import SummaryWriter
from datasets import TrackingNet, ImageNetVID, PairSampler, CocoDetection, COCODistractor, COCONegativePair, COCOPositivePair
from transforms import Transform
from utils import generate_anchors, mask_imgs
from utils.bbox_utils import decode
import utils
from utils import visualize
from loss import MultiBoxLoss
import os
from datetime import datetime
import time
from distutils.dir_util import copy_tree
class Trainer(object):
def __init__(self, net, cfg):
self.cfg = cfg
self.net = net
self.anchors = generate_anchors(cfg)
if torch.cuda.is_available():
self.net.cuda()
self.anchors = self.anchors.cuda()
# Dataset transform
transform = [
Transform(context_amount=cfg.TRAIN.CROP_CONTEXT_AMOUNT_Z, size=cfg.MODEL.Z_SIZE),
Transform(context_amount=cfg.TRAIN.CROP_CONTEXT_AMOUNT_X, size=cfg.MODEL.X_SIZE,
random_translate=True, random_resize=True, motion_blur=True,
random_translate_range=cfg.TRAIN.DATA_AUG_TRANSLATE_RANGE,
random_resize_scale_min=cfg.TRAIN.DATA_AUG_RESIZE_SCALE_MIN,
random_resize_scale_max=cfg.TRAIN.DATA_AUG_RESIZE_SCALE_MAX
)
]
# Training dataset
trackingnet = TrackingNet(cfg.PATH.TRACKINGNET, subset="train", debug_seq=cfg.TRAIN.DEBUG_SEQ)
imagenet = ImageNetVID(cfg.PATH.ILSVRC, subset="train")
sampler = PairSampler([trackingnet, imagenet], cfg=cfg, transform=transform, pairs_per_video=cfg.TRAIN.PAIRS_PER_VIDEO,
frame_range=cfg.TRAIN.FRAME_RANGE)
# Distractor dataset
coco = CocoDetection(cfg.PATH.COCO, cfg.PATH.COCO_ANN_FILE)
# coco_distractor = COCODistractor(coco, 4000)
coco_positive = COCOPositivePair(coco, 4000, cfg=cfg, transform=transform)
coco_negative = COCONegativePair(coco, 12000, cfg=cfg, transform=transform)
dataset = ConcatDataset([sampler, coco_positive, coco_negative])
self.dataloader = DataLoader(dataset, batch_size=cfg.TRAIN.BATCH_SIZE, num_workers=4, shuffle=True,
pin_memory=True, drop_last=True)
# Validation dataset
val_trackingnet = TrackingNet(cfg.PATH.TRACKINGNET, subset="val")
val_imagenet = ImageNetVID(cfg.PATH.ILSVRC, subset="val")
validation_sampler = PairSampler([val_trackingnet, val_imagenet], cfg=cfg, transform=transform,
pairs_per_video=1, frame_range=cfg.TRAIN.FRAME_RANGE)
val_coco_positive = COCOPositivePair(coco, 100, cfg=cfg, transform=transform)
val_dataset = ConcatDataset([validation_sampler, val_coco_positive])
if cfg.TRAIN.DEBUG_SEQ >= 0: # When debugging on a single sequence, the validation is performed on the same one
val_dataset = PairSampler([trackingnet], cfg=cfg, transform=transform, pairs_per_video=200)
self.validation_dataloader = DataLoader(val_dataset, batch_size=min(cfg.TRAIN.BATCH_SIZE, 20), num_workers=4,
shuffle=True, pin_memory=True, drop_last=False)
# Loss
self.criterion = MultiBoxLoss(self.anchors, cfg)
self.optimizer = optim.Adam(self.net.parameters(), lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WEIGHT_DECAY)
self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=cfg.TRAIN.SCHEDULER_STEP_SIZE,
gamma=cfg.TRAIN.SCHEDULER_GAMMA)
# Summary Writer
self.run_id = datetime.now().strftime('%b%d_%H-%M-%S')
if not cfg.DEBUG:
self.save_config()
self.save_code()
self.writer = SummaryWriter(log_dir=os.path.join(cfg.PATH.DATA_DIR, "runs", self.run_id))
self.start_epoch = 0
if cfg.TRAIN.RESUME_CHECKPOINT:
self.start_epoch = utils.load_checkpoint(cfg.TRAIN.RESUME_CHECKPOINT, self.net, self.optimizer)
if torch.cuda.is_available():
self.net = nn.DataParallel(self.net)
self.best_IOU = 0.
def train(self):
print("Training model {} with configuration:".format(type(self.net).__name__))
print(self.cfg)
for epoch in range(self.start_epoch, self.cfg.TRAIN.NUM_EPOCHS):
epoch_size = len(self.dataloader)
print("Epoch {} / {}, {} iterations".format(epoch + 1, self.cfg.TRAIN.NUM_EPOCHS, epoch_size))
"""Training."""
self.net.train()
for batch_idx, batch in enumerate(self.dataloader):
self.scheduler.step()
z, x, z_bbox, x_bbox, xprev_bbox = batch
if torch.cuda.is_available():
z, x, = z.cuda(), x.cuda()
z_bbox, x_bbox, xprev_bbox = z_bbox.cuda(), x_bbox.cuda(), xprev_bbox.cuda()
# 20% black-and-white data-augmentation
if torch.rand(1) < 0.2:
x = x.mean(dim=1, keepdim=True).expand_as(x)
z = z.mean(dim=1, keepdim=True).expand_as(z)
# Adding masks using ground truth bounding boxes
z_mask, x_mask = mask_imgs(z, z_bbox), mask_imgs(x, xprev_bbox, use_mask=self.cfg.TRAIN.USE_MASK)
self.optimizer.zero_grad()
s = time.time()
loc_pred, conf_pred = self.net.forward(z, z_mask, x, x_mask)
forward_time = time.time() - s
loss_loc, loss_cls, pos_accuracy, neg_accuracy, position_err, size_errors, pos_matches = self.criterion((loc_pred, conf_pred), x_bbox)
loss = loss_cls + self.cfg.TRAIN.LAMBDA * loss_loc
if loss > 0:
loss.backward()
torch.nn.utils.clip_grad_norm_(self.net.parameters(), 1.) # Prevent exploding gradients
self.optimizer.step()
iter_idx = epoch * epoch_size + batch_idx
if not self.cfg.DEBUG:
self.log_metrics(iter_idx, loss_loc.item(), loss_cls.item(), loss.item(), pos_accuracy,
neg_accuracy, position_err, size_errors, pos_matches, forward_time,
self.optimizer.param_groups[0]['lr'])
if not self.cfg.DEBUG:
"""Validation."""
self.net.eval()
with torch.no_grad():
mean_IOU = self.compute_validation_metrics(epoch_size * (epoch + 1))
is_best = mean_IOU > self.best_IOU
self.best_IOU = max(mean_IOU, self.best_IOU)
"""Save Checkpoint."""
utils.save_checkpoint({
'epoch': epoch + 1,
'state_dict': self.net.module.state_dict(), # Save the 'module' layer from a DataParallel model
'optimizer': self.optimizer.state_dict(),
}, self.cfg.PATH.DATA_DIR, run_id=self.run_id, is_best=is_best)
def log_metrics(self, iter_idx, loss_l, loss_c, loss, pos_accuracy, neg_accuracy, position_error, size_errors,
pos_matches, forward_time, lr):
self.writer.add_scalar("/train/loss/localization", loss_l, iter_idx)
self.writer.add_scalar("/train/loss/classification", loss_c, iter_idx)
self.writer.add_scalar("/train/loss/total", loss, iter_idx)
self.writer.add_scalar("/train/metrics/pos_accuracy", pos_accuracy, iter_idx)
self.writer.add_scalar("/train/metrics/neg_accuracy", neg_accuracy, iter_idx)
self.writer.add_scalar("/train/metrics/position_error", position_error, iter_idx)
self.writer.add_scalar("/train/metrics/w_error", size_errors[0], iter_idx)
self.writer.add_scalar("/train/metrics/h_error", size_errors[1], iter_idx)
self.writer.add_scalar("/train/forward_time", forward_time, iter_idx)
self.writer.add_scalar("/train/pos_matches", pos_matches, iter_idx)
self.writer.add_scalar("/train/learning_rate", lr, iter_idx)
def compute_validation_metrics(self, iter_idx):
IoUs = []
for i, (z, x, z_bbox, x_bbox, xprev_bbox) in enumerate(self.validation_dataloader):
if torch.cuda.is_available():
z, x, = z.cuda(), x.cuda()
z_bbox, x_bbox, x_bbox, xprev_bbox = z_bbox.cuda(), x_bbox.cuda(), x_bbox.cuda(), xprev_bbox.cuda()
# 20% black-and-white data-augmentation
if torch.rand(1) < 0.2:
x = x.mean(dim=1, keepdim=True).expand_as(x)
z = z.mean(dim=1, keepdim=True).expand_as(z)
z_mask, x_mask = mask_imgs(z, z_bbox), mask_imgs(x, xprev_bbox, use_mask=self.cfg.TRAIN.USE_MASK)
loc_pred, conf_pred = self.net.forward(z, z_mask, x, x_mask)
best_ids = conf_pred[:, :, 1].argmax(dim=1)
best_anchors = self.criterion.point_form_anchors[best_ids]
indices = best_ids.view(-1, 1, 1).expand(-1, -1, 4)
pred_bboxs = decode(loc_pred.gather(1, indices).squeeze(1), self.anchors[best_ids],
self.cfg.MODEL.ANCHOR_VARIANCES)
IoUs.append(utils.IoUs(x_bbox, pred_bboxs))
# Display the first 30 images from the validation set.
if i < 30:
visualize.plot_pair((z[0].cpu(), z_bbox[0].cpu()), (x[0].cpu(), pred_bboxs[0].cpu()),
gt_box=x_bbox[0].cpu(), prev_bbox=xprev_bbox[0].cpu(),
anchor=best_anchors[0].cpu(), anchor_id=best_ids[0].cpu())
self.writer.add_image("Image_{}".format(i), visualize.plot_to_tensor(), iter_idx)
plt.clf()
IoUs = torch.cat(IoUs)
self.writer.add_scalar("/validation/metrics/mean_IoU", IoUs.mean(), iter_idx)
self.writer.add_scalar("/validation/metrics/median_IoU", IoUs.median(), iter_idx)
return IoUs.mean()
def save_code(self):
archive_dir = os.path.join(self.cfg.PATH.DATA_DIR, "archive", self.run_id)
copy_tree(".", archive_dir)
def save_config(self):
config_file = os.path.join(self.cfg.PATH.DATA_DIR, "configs", self.run_id + ".yaml")
with open(config_file, "a") as f:
f.write(self.cfg.dump())
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,131
|
Silencesss/SiamBroadcastRPN
|
refs/heads/master
|
/datasets/trackingnet.py
|
import numpy as np
import os
from os import scandir
class TrackingNet(object):
"""
TrackingNet dataset.
Bounding boxes are in x1y1wh format.
"""
def __init__(self, root_dir, subset="train", debug_seq=-1):
self.root_dir = root_dir
validation_chunk = "TRAIN_5"
validation_size = 300
val_chunk_seq_names = [validation_chunk + ":" + f.name for f in
scandir(os.path.join(self.root_dir, validation_chunk, "frames")) if f.is_dir()]
if subset == "val":
self.seq_names = val_chunk_seq_names[:validation_size]
elif subset == "train":
chunks = [f.name for f in scandir(self.root_dir) if f.is_dir() and "TRAIN" in f.name
and f.name != validation_chunk]
self.seq_names = [chunk + ":" + f.name for chunk in chunks for f in
scandir(os.path.join(self.root_dir, chunk, "frames")) if f.is_dir() ]
self.seq_names.extend(val_chunk_seq_names[validation_size:])
else:
raise Exception('Unknown subset.')
if debug_seq >= 0:
self.seq_names = [self.seq_names[debug_seq]]
def __len__(self):
return len(self.seq_names)
def __getitem__(self, idx):
chunk, seq_id = self.seq_names[idx].split(":")
num_image_files = len([f.name for f in scandir(os.path.join(self.root_dir, chunk, "frames", seq_id))])
image_files = [os.path.join(self.root_dir, chunk, "frames", seq_id, "{}.jpg".format(i)) for i in range(num_image_files)]
anno = np.loadtxt(os.path.join(self.root_dir, chunk, "anno", seq_id + ".txt"), delimiter=",", )
# Convert bounding boxes to x1y1x2y2 format.
anno[:, 2] += anno[:, 0]
anno[:, 3] += anno[:, 1]
return image_files, anno
|
{"/datasets/uav.py": ["/configs/__init__.py"], "/datasets/coco.py": ["/transforms/transforms.py"], "/models/siamConcatRPN.py": ["/models/layers/__init__.py"], "/trackers/siamRPNBIG.py": ["/trackers/__init__.py"], "/configs/__init__.py": ["/configs/defaults.py"], "/models/layers/__init__.py": ["/models/layers/base.py", "/models/layers/refine.py", "/models/layers/correlate.py", "/models/layers/siamFC.py", "/models/layers/resnet.py", "/models/layers/alexnet.py"], "/trackers/__init__.py": ["/trackers/tracker.py", "/trackers/siamRPNBIG.py"], "/datasets/__init__.py": ["/datasets/imagenet.py", "/datasets/trackingnet.py", "/datasets/simple.py", "/datasets/pairwise.py", "/datasets/coco.py", "/datasets/uav.py"], "/train.py": ["/models/__init__.py", "/configs/__init__.py"], "/models/__init__.py": ["/models/siamRPNBIG.py", "/models/siamConcatRPN.py", "/models/siamBroadcastRPN.py"], "/trackers/tracker.py": ["/trackers/__init__.py", "/transforms/transforms.py", "/loss.py"], "/benchmark.py": ["/models/__init__.py", "/trackers/__init__.py", "/experiments/__init__.py", "/configs/__init__.py"], "/metrics/__init__.py": ["/metrics/metrics.py"], "/transforms/__init__.py": ["/transforms/transforms.py"], "/models/siamBroadcastRPN.py": ["/models/layers/__init__.py"], "/trainers/trainer.py": ["/datasets/__init__.py", "/transforms/__init__.py", "/loss.py"]}
|
37,134
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/parxml/read.py
|
import numpy as np
import matplotlib.pyplot as plt
import cv2 as cv
# %matplotlib inline
# path
def load(file_path):
# read all data
data=open(file_path,'rb').read()
# del the head part
image_data = data[320:]
# get image size
image_size = np.sqrt((len(image_data) / 2)).astype(int).tolist()
# define the image
image = np.empty((image_size, image_size), dtype=float)
# loop for insert data
for i in range(image_size):
for j in range(image_size):
index = i * image_size + j
val = int(image_data[2 * index + 1]) * 256 + int(image_data[2 * index])
image[j, i] = val
# nomarlize
# image_std = (image - np.mean(image)) / np.std(image)
# image_std_clip = np.clip(image_std, -0.75, 0.75)
## 添加通道, 映射到0,255
# image_minmax = (image_std_clip-np.min(image_std_clip))/(np.max(image_std_clip)-np.min(image_std_clip))
# img = (image_minmax*255).astype(np.uint8)
# img = cv.cvtColor(img,cv.COLOR_GRAY2BGR)
## 映射到0,1
image_maxmin = image/(256*256)
print('convert done')
return image_maxmin
if __name__ == '__main__':
# file_path = 'C:\\Users\\Administrator\\Desktop\\liver_cases_rawdata\\liver_cases_rawdata\\chendarong\\A_1_LI_1489568403_276000_UNPROCESSED_IBRST_00'
file_path = 'E:/code/segment/data/liver_cases_rawdata/chendarong/A_101_LI_1489568524_311000_UNPROCESSED_IBRST_00'
a = load(file_path)
a = cv.flip(a,0,dst=None)
plt.imshow(a, cmap=plt.cm.gray)
plt.show()
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,135
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/main.py
|
import tensorflow as tf
import os
import numpy as np
import dataset
import networks.U_net as U_net
import cv2 as cv
from utils import losses
from utils import evalu
from utils import postproce
os.environ["CUDA_VISIBLE_DEVICES"] = '1' #指定第一块GPU可用
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9 # 程序最多只能占用指定gpu50%的显存
config.gpu_options.allow_growth = True #程序按需申请内存
sess = tf.InteractiveSession(config = config)
epoch = 2000000
batch_size = 8
learning_rate = 0.0001
savenet_path = './libSaveNet/save_unet/'
trainfile_dir = './data/data2/train/'
testfile_dir = './data/data2/test/'
input_name = 'img'
label_name = 'recmask'
channel = 1
x_train,y_train = dataset.get_data(trainfile_dir, input_name, label_name)
x_test,y_test = dataset.get_data(testfile_dir, input_name, label_name)
#####原图
y_train = np.expand_dims(y_train,-1)
y_test = np.expand_dims(y_test,-1)
def train():
x = tf.placeholder(tf.float32,shape = [batch_size,1024,1024, channel])
y_ = tf.placeholder(tf.float32,shape = [batch_size,1024,1024,1])
y = U_net.H_DenseUnet(x,grow_date=32)
y_pred = tf.nn.sigmoid(y)
loss = losses.mixedLoss(y_pred, y_,alpha=0.5)
summary_op = tf.summary.scalar('trainloss', loss)
summary_op2 = tf.summary.scalar('testloss', loss)
batch_norm_updates_op = tf.group(*tf.get_collection(tf.GraphKeys.UPDATE_OPS))
with tf.control_dependencies([batch_norm_updates_op]):
train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
variables_to_restore = []
for v in tf.global_variables():
variables_to_restore.append(v)
saver = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V2, max_to_keep=8)
writer = tf.summary.FileWriter('./my_graph/train', sess.graph)
writer2 = tf.summary.FileWriter('./my_graph/test')
tf.global_variables_initializer().run()
# last_file = tf.train.latest_checkpoint(savenet_path)
# if last_file:
# tf.logging.info('Restoring model from {}'.format(last_file))
# saver.restore(sess, last_file)
count, m = 0, 0
for ep in range(epoch):
batch_idxs = len(x_train) // batch_size
for idx in range(batch_idxs):
# batch_input = x_train[idx * batch_size: (idx + 1) * batch_size]
# batch_labels = y_train[idx * batch_size: (idx + 1) * batch_size]
batch_input, batch_labels = dataset.random_batch(x_train,y_train,batch_size)
sess.run(train_step, feed_dict={x: batch_input, y_: batch_labels})
count += 1
# print(count)
if count % 50 == 0:
m += 1
batch_input_test, batch_labels_test = dataset.random_batch(x_test, y_test, batch_size)
# batch_input_test = x_test[0 : batch_size]
# batch_labels_test = y_test[0 : batch_size]
loss1 = sess.run(loss, feed_dict={x: batch_input,y_: batch_labels})
loss2 = sess.run(loss, feed_dict={x: batch_input_test, y_: batch_labels_test})
print("Epoch: [%2d], step: [%2d], train_loss: [%.8f]" \
% ((ep + 1), count, loss1), "\t", 'test_loss:[%.8f]' % (loss2))
writer.add_summary(sess.run(summary_op, feed_dict={x: batch_input, y_: batch_labels}), m)
writer2.add_summary(sess.run(summary_op2, feed_dict={x: batch_input_test,
y_: batch_labels_test}), m)
if (count + 1) % 10000 == 0:
saver.save(sess, os.path.join(savenet_path, 'conv_unet%d.ckpt-done' % (count)))
def test():
batch_size = 1
###------------------数据集
# correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
savepath = 'E:\code\segment\libSaveNet\save_unet\conv_unet79999.ckpt-done'
x = tf.placeholder(tf.float32,shape = [1,1024,1024, channel])
y_ = tf.placeholder(tf.float32,shape = [1,1024,1024,1])
y = U_net.inference(x,is_training=False)
loss = tf.reduce_mean(tf.square(y - y_))
variables_to_restore = []
for v in tf.global_variables():
variables_to_restore.append(v)
saver = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V2, max_to_keep=None)
tf.global_variables_initializer().run()
saver.restore(sess, savepath)
nub = np.shape(x_train)[0]
predicList = []
ycList = []
for i in range(nub):
inputTrain = x_train[i:i+1,:,:,:]
labelTrain = y_train[i:i+1,:,:,:]
inputTest = x_test[i:i+1,:,:,:]
labelTest = y_test[i:i+1,:,:,:]
output = sess.run(y,feed_dict={x: inputTest})
loss_test = sess.run(loss, feed_dict={x: inputTest, y_: labelTest})
## 映射到0,1的数据
img = ((np.squeeze(inputTest))*255).astype(np.uint8)
out = np.squeeze(output).astype(np.uint8)
out = out*255
label = (np.squeeze(labelTest)*255).astype(np.uint8)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (5, 5)) # 定义结构元素
outclosing = cv.morphologyEx(out, cv.MORPH_CLOSE, kernel) # 闭运算
##### 框出目标区域——————————————————
postproce.contourmask(img,outclosing)
cv.namedWindow('imgrec',0)
cv.resizeWindow('imgrec', 500, 500)
cv.imshow('imgrec',img)
cv.namedWindow('label',0)
cv.resizeWindow('label', 500, 500)
cv.imshow('label',label)
cv.namedWindow('output',0)
cv.resizeWindow('output', 500, 500)
cv.imshow('output',out)
cv.waitKey(0)
cv.destroyAllWindows()
##### 评价指标——————————————————————
predic,iouList = evalu.calcu2(outclosing,label)
if predic==-1 and iouList==-1:
print('loss: %g, wrong index:%g' % (loss_test,i))
ycList.append(i)
else:
for j in range(len(predic)):
predicList.append(predic[j])
print(i)
print('loss: %g' % (loss_test),iouList)
pos = predicList.count(1)
all = len(predicList)
precision = pos / len(predicList)
print('1-nub: %g, all nub:%g' % (pos,all))
print(precision)
if __name__ == '__main__':
train()
# test()
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,136
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/parxml/procXml.py
|
import xml.dom.minidom
import numpy as np
import cv2 as cv
import os
import parxml.read as read
import matplotlib.pyplot as plt
def generadata(readpath,writepath):
indexPatient = -1
# a = os.listdir(readpath)
for file1 in os.listdir(readpath):
# file1 = a[15]
file_dir = readpath+file1+'/'
file_name = []
indexPatient += 1
for file2 in os.listdir(file_dir):
file_name.append(file_dir + file2)
nubFile = len(file_name)
for i in range(nubFile//2):
# i = 38
path1 = file_name[i*2]
path2 = file_name[i*2+1]
# img = cv.imread(path1)
imge =read.load(path1)
img_shape = np.shape(imge)
## 上下翻转 1
# img = cv.flip(imge, 0, dst=None)
## 上下翻转 2
img = np.zeros(img_shape)
row = img_shape[0]
for j in range(row):
img[j,:] = imge[row-1-j,:]
mask, areaList = prcxml(path2,img_shape,img)
# cv.rectangle(img2)
# plt.imshow(img,cmap=plt.cm.gray)
# plt.show()
# plt.imshow(mask)
# plt.show()
np.savez(writepath+str(indexPatient)+'.'+str(i)+'.npz',img=img,recmask=mask,areaList=areaList)
def prcxml(xmlPath,im_shape,imge):
dom = xml.dom.minidom.parse(xmlPath)
root = dom.documentElement
xminList = root.getElementsByTagName('xmin')
xmaxList = root.getElementsByTagName('xmax')
yminList = root.getElementsByTagName('ymin')
ymaxList = root.getElementsByTagName('ymax')
length = len(xminList)
areaList = []
mask = np.zeros([im_shape[0],im_shape[1]],dtype='uint8')
for i in range(length):
xminObj = xminList[i]
xmin = int(xminObj.firstChild.data)
yminObj = yminList[i]
ymin = int(yminObj.firstChild.data)
xmaxObj = xmaxList[i]
xmax = int(xmaxObj.firstChild.data)
ymaxObj = ymaxList[i]
ymax = int(ymaxObj.firstChild.data)
pointMin = (xmin,ymin)
pointMax = (xmax,ymax)
areaList.append([pointMin,pointMax])
cv.rectangle(mask, pointMin, pointMax, 255, -1)
# cv.rectangle(imge, pointMin, pointMax, 255, 1)
# cv.namedWindow('input_image', 0)
# cv.resizeWindow('input_image', 500, 500)
# cv.imshow('input_image', imge)
# cv.waitKey(0)
# cv.destroyAllWindows()
return mask, areaList
# src = cv.imread(path2)
# cv.namedWindow('input_image', cv.WINDOW_AUTOSIZE)
# cv.namedWindow('input_image',0)
# cv.resizeWindow('input_image', 500, 500)
# cv.imshow('input_image',src)
# cv.waitKey(0)
# cv.destroyAllWindows()
if __name__ == '__main__':
# readpath = 'E:/code/segment/data/liver cases/'
readpath = 'E:/code/segment/data/liver_cases_rawdata/'
writepath = 'E:/code/segment/data/data4/train/data.'
generadata(readpath,writepath)
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,137
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/utils/postproce.py
|
import tensorflow as tf
import sklearn as sk
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
def connectComp(img):
## 针对255峰值
imgPre = np.greater(img, 200)
imgPre = imgPre.astype(np.uint8)
ret, labels, stats, centroids = cv.connectedComponentsWithStats(imgPre, connectivity=8)
# plt.imshow(labels)
# plt.show()
# cv.namedWindow('imgmask', 0)
# cv.resizeWindow('imgmask', 500, 500)
# cv.imshow('imgmask', imgPre)
# cv.waitKey(0)
# plt.imshow(imgPre)
# plt.show()
#### 滤除掉像素点极少的区域,输出区域数组
rect_squence = []
for i in range(ret-1):
mask = (labels==i+1)
# ### 1.索引找不同类别的像素个数
# arr = labels[mask]
# area = arr.size
#--------------
### 2.stats 取出area面积
area = stats[i+1][-1]
if area >= 27:
# plt.imshow(mask)
# plt.show()
rect_squence.append(mask)
rect = np.asarray(rect_squence)
return rect
def filterFewPoint(mask):
imgPre = np.greater(mask, 200)
imgPre = imgPre.astype(np.uint8)
ret, labels, stats, centroids = cv.connectedComponentsWithStats(imgPre, connectivity=8)
# plt.imshow(labels)
# plt.show()
for i in range(ret-1):
maskzj = (labels==i+1)
area = stats[i+1][-1]
if area < 19:
# plt.imshow(maskzj)
# plt.show()
labels[maskzj] = 0
# plt.imshow(labels)
# plt.show()
else:
# plt.imshow(maskzj)
# plt.show()
labels[maskzj] = 255
return labels
def contourmask(img,mask):
maskFilt = filterFewPoint(mask)
maskFilt = maskFilt.astype(np.uint8)
contours, hierarchy = cv.findContours(maskFilt, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
for i in range(0, len(contours)):
x, y, w, h = cv.boundingRect(contours[i])
cv.rectangle(img, (x, y), (x + w, y + h), (153, 153, 0), 1)
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,138
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/utils/losses.py
|
import tensorflow as tf
from keras import backend as K
def dice_coe(output, target, loss_type='sorensen', axis=(1, 2, 3), smooth=1e-5):
# 如果模型最后没有 nn.Sigmoid(),那么这里就需要对预测结果计算一次 Sigmoid 操作
# y_pred = nn.Sigmoid()(y_pred)
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
dice = (2. * inse + smooth) / (l + r + smooth)
dice = tf.reduce_mean(dice)
return dice
def focal_loss(y_true, y_pred,gamma=2., alpha=0.25):
# 如果模型最后没有 nn.Sigmoid(),那么这里就需要对预测结果计算一次 Sigmoid 操作
# y_pred = nn.Sigmoid()(y_pred)
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
focal_loss_fixed = -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))
return focal_loss_fixed
def mixedLoss(y_ture,y_pred,alpha):
return alpha * focal_loss(y_ture,y_pred) - K.log(dice_coe(y_pred,y_ture))
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,139
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/networks/U_net.py
|
from networks.ops import *
import tensorflow as tf
import numpy as np
#paramaters
FILTER_DIM = 64
OUTPUT_C = 1
#deep 5
def inference(images,is_training=True,reuse = False,name='UNet'):
with tf.variable_scope(name, reuse=reuse):
L1_1 = ReLU(conv_bn(images, FILTER_DIM, k_h=3,is_train=is_training, name='Conv2d_1_1'),name='ReLU_1_1')
L1_2 = ReLU(conv_bn(L1_1, FILTER_DIM, k_h=3,is_train=is_training, name='Conv2d_1_2'),name='ReLU_1_2')
L2_1 = tf.nn.max_pool(L1_2, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'MaxPooling1') ##
L2_2 = ReLU(conv_bn(L2_1, FILTER_DIM*2, k_h=3, is_train=is_training,name='Conv2d_2_1'),name='ReLU_2_1')
L2_3 = ReLU(conv_bn(L2_2, FILTER_DIM*2, k_h=3, is_train=is_training,name='Conv2d_2_2'),name='ReLU_2_2')
L3_1 = tf.nn.max_pool(L2_3, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'MaxPooling2') ##
L3_2 = ReLU(conv_bn(L3_1, FILTER_DIM*4, k_h=3, is_train=is_training,name='Conv2d_3_1'),name='ReLU_3_1')
L3_3 = ReLU(conv_bn(L3_2, FILTER_DIM*4, k_h=3, is_train=is_training,name='Conv2d_3_2'),name='ReLU_3_2')
L4_1 = tf.nn.max_pool(L3_3, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'MaxPooling3') ##
L4_2 = ReLU(conv_bn(L4_1, FILTER_DIM*8, k_h=3, is_train=is_training,name='Conv2d_4_1'),name='ReLU_4_1')
L4_3 = ReLU(conv_bn(L4_2, FILTER_DIM*8, k_h=3, is_train=is_training,name='Conv2d_4_2'),name='ReLU_4_2')
L5_1 = tf.nn.max_pool(L4_3, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'MaxPooling4') ##
L5_2 = ReLU(conv_bn(L5_1, FILTER_DIM*16, k_h=3, is_train=is_training,name='Conv2d_5_1'),name='ReLU_5_1')
L5_3 = ReLU(conv_bn(L5_2, FILTER_DIM*16, k_h=3, is_train=is_training,name='Conv2d_5_2'),name='ReLU_5_2')
L4_U1 = ReLU(Deconv2d_bn(L5_3, L4_3.get_shape().as_list(),k_h = 3,is_train=is_training,name = 'Deconv2d4'),name='DeReLU4')
L4_U1 = tf.concat((L4_3, L4_U1), -1)
L4_U2 = ReLU(conv_bn(L4_U1, FILTER_DIM * 8, k_h=3, is_train=is_training,name='Conv2d_4_u1'),name='ReLU_4_u1')
L4_U3 = ReLU(conv_bn(L4_U2, FILTER_DIM * 8, k_h=3, is_train=is_training,name='Conv2d_4_u2'),name='ReLU_4_u2')
L3_U1 = ReLU(Deconv2d_bn(L4_U3,L3_3.get_shape().as_list(),k_h = 3,is_train=is_training,name = 'Deconv2d3'),name = 'DeReLU3')
L3_U1 = tf.concat((L3_3, L3_U1), -1)
L3_U2 = ReLU(conv_bn(L3_U1, FILTER_DIM*4, k_h=3,is_train=is_training, name='Conv2d_3_u1'), name='ReLU_3_u1')
L3_U3 = ReLU(conv_bn(L3_U2, FILTER_DIM*4, k_h=3,is_train=is_training, name='Conv2d_3_u2'), name='ReLU_3_u2')
L2_U1 = ReLU(Deconv2d_bn(L3_U3,L2_3.get_shape().as_list(), k_h = 3,is_train=is_training,name = 'Deconv2d2'),name='DeReLU2')
L2_U1 = tf.concat((L2_3, L2_U1), -1)
L2_U2 = ReLU(conv_bn(L2_U1, FILTER_DIM*2, k_h=3, is_train=is_training,name='Conv2d_2_u1'),name='ReLU_2_u1')
L2_U3 = ReLU(conv_bn(L2_U2, FILTER_DIM*2, k_h=3, is_train=is_training,name='Conv2d_2_u2'),name='ReLU_2_u2')
L1_U1 = ReLU(Deconv2d_bn(L2_U3, L1_2.get_shape().as_list(),k_h=3,is_train=is_training,name='Deconv2d1'),name='DeReLU1')
L1_U1 = tf.concat((L1_2, L1_U1), 3)
L1_U2 = ReLU(conv_bn(L1_U1, FILTER_DIM, k_h=3, is_train=is_training,name='Conv1d_1_u1'),name='ReLU_1_u1')
L1_U3 = ReLU(conv_bn(L1_U2, FILTER_DIM, k_h=3, is_train=is_training,name='Conv1d_1_u2'),name='ReLU_1_u2')
conv1 = ReLU(conv_bn(L1_U3, 2, k_h=3, is_train=is_training,name='Conv2d_1'),name='ReLU_1')
out = conv_b(conv1, OUTPUT_C,name='Conv1d_out')
# variables = tf.contrib.framework.get_variables(name)
return out
def H_DenseUnet(images,grow_date=32,compression=0.5,is_training=True,reuse = False,name='DenseUnet'):
with tf.variable_scope(name, reuse=reuse):
nb_layers = [6,12,36,24]
L1_1 = ReLU(conv_bn(images, FILTER_DIM,is_train=is_training, name='Conv2d_1_1'),name='ReLU_1_1')
L1_1 = GC_Block(L1_1, 1, is_training=is_training, name='GC_block1')
L1_2 = ReLU(conv_bn(L1_1, FILTER_DIM,is_train=is_training, name='Conv2d_1_2'),name='ReLU_1_2')
L1_2 = GC_Block(L1_2, 1, is_training=is_training, name='GC_block2')
L2_1 = tf.nn.max_pool(L1_2, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'MaxPooling1') ##
L2_d = Denseblock(L2_1,nb_layers[0],grow_date=grow_date,is_training=is_training,name='dense_block1')
L2_t = transition_block(L2_d,compression=compression,is_training=is_training,name='trans_block1')
L3_d = Denseblock(L2_t,nb_layers[1],grow_date=grow_date,is_training=is_training,name='dense_block2')
L3_t = transition_block(L3_d,compression=compression,is_training=is_training,name='trans_block2')
L4_d = Denseblock(L3_t,nb_layers[2],grow_date=grow_date,is_training=is_training,name='dense_block3')
L4_t = transition_block(L4_d,compression=compression,is_training=is_training,name='trans_block3')
L5_d = Denseblock(L4_t,nb_layers[3],grow_date=grow_date,is_training=is_training,name='dense_block4')
L5_d = GC_Block(L5_d,is_training=is_training, name='GC_block5')
shape_list2,shape_list3,shape_list4 = L2_d.get_shape().as_list(),L3_d.get_shape().as_list(),L4_d.get_shape().as_list()
L4_U1 = ReLU(Deconv2d_bn(L5_d, shape_list4,k_h = 3,is_train=is_training,name = 'Deconv2d4'),name='DeReLU4')
L4_U1 = tf.concat((L4_d, L4_U1), -1)
L4_U1 = ReLU(conv_bn(L4_U1, shape_list4[-1],k_w=1,k_h=1,is_train=is_training, name='Conv2d_4_u1'),name='ReLU_4_u1')
L4_U2 = ReLU(conv_bn(L4_U1, shape_list3[-1], k_h=3, is_train=is_training,name='Conv2d_4_u2'),name='ReLU_4_u2')
L3_U1 = ReLU(Deconv2d_bn(L4_U2, shape_list3,k_h = 3,is_train=is_training,name = 'Deconv2d3'),name='DeReLU3')
L3_U1 = tf.concat((L3_d, L3_U1), -1)
L3_U1 = ReLU(conv_bn(L3_U1, shape_list3[-1],k_w=1,k_h=1,is_train=is_training, name='Conv2d_3_u1'),name='ReLU_3_u1')
L3_U2 = ReLU(conv_bn(L3_U1, shape_list2[-1], k_h=3, is_train=is_training,name='Conv2d_3_u2'),name='ReLU_3_u2')
L2_U1 = ReLU(Deconv2d_bn(L3_U2, shape_list2,k_h = 3,is_train=is_training,name = 'Deconv2d2'),name='DeReLU2')
L2_U1 = tf.concat((L2_d, L2_U1), -1)
L2_U1 = ReLU(conv_bn(L2_U1, shape_list2[-1],k_w=1,k_h=1,is_train=is_training, name='Conv2d_2_u1'),name='ReLU_2_u1')
L2_U2 = ReLU(conv_bn(L2_U1, FILTER_DIM, k_h=3, is_train=is_training,name='Conv2d_2_u2'),name='ReLU_2_u2')
L1_U1 = ReLU(Deconv2d_bn(L2_U2, L1_2.get_shape().as_list(),k_h = 3,is_train=is_training,name = 'Deconv2d1'),name='DeReLU1')
L1_U1 = tf.concat((L1_2, L1_U1), -1)
L1_U2 = ReLU(conv_bn(L1_U1, FILTER_DIM, k_h=3, is_train=is_training,name='Conv1d_1_u1'),name='ReLU_1_u1')
L1_U2 = GC_Block(L1_U2,is_training=is_training, name='GC_block_L1U')
L1_U3 = ReLU(conv_bn(L1_U2, FILTER_DIM, k_h=3, is_train=is_training,name='Conv1d_1_u2'),name='ReLU_1_u2')
L1_U3 = GC_Block(L1_U3,is_training=is_training, name='GC_block_L1U2')
out = conv_bn(L1_U3,1,k_w=1,k_h=1,is_train=is_training, name='Conv2d_out')
return out
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,140
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/networks/ops.py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
def weight_variable(shape,name=None,trainable=True, decay_mult = 0.0):
weights = tf.get_variable(
name, shape, tf.float32, trainable=trainable,
initializer=tf.truncated_normal_initializer(stddev=0.1)
# initializer=tf.contrib.layers.xavier_initializer(dtype=tf.float32),
# regularizer=tf.contrib.layers.l2_regularizer(decay_mult)
)
return weights
def bias_variable(shape,name=None, bias_start = 0.0, trainable = True, decay_mult = 0.0):
bais = tf.get_variable(
name, shape, tf.float32, trainable = trainable,
initializer = tf.constant_initializer(bias_start, dtype = tf.float32)
# regularizer = tf.contrib.layers.l2_regularizer(decay_mult)
)
return bais
def conv_bn(inpt ,output_dim, k_h = 3, k_w = 3, strides = [1, 1, 1, 1], is_train = True, name='Conv2d'):
with tf.variable_scope(name):
filter_ = weight_variable([k_h,k_w,inpt.get_shape()[-1],output_dim],name='weights')
conv = tf.nn.conv2d(inpt, filter=filter_, strides=strides, padding="SAME")
batch_norm = tf.layers.batch_normalization(conv, training=is_train) ###由contrib换成layers
return batch_norm
def BatchNorm(
value, is_train = True, name = 'BatchNorm',
epsilon = 1e-5, momentum = 0.9
):
with tf.variable_scope(name):
return tf.contrib.layers.batch_norm(
value,
decay = momentum,
# updates_collections = tf.GraphKeys.UPDATE_OPS,
# updates_collections = None,
epsilon = epsilon,
scale = True,
is_training = is_train,
scope = name
)
def conv_relu(inpt, output_dim, k_h = 3, k_w = 3, strides = [1, 1, 1, 1],name='Conv2d'):
with tf.variable_scope(name):
filter_ = weight_variable([k_h, k_w, inpt.get_shape()[-1], output_dim],name='weights')
conv = tf.nn.conv2d(inpt, filter=filter_, strides=strides, padding="SAME")
biases = bias_variable(output_dim,name='biases')
pre_relu = tf.nn.bias_add(conv, biases)
out = tf.nn.relu(pre_relu)
return out
def conv_b(inpt, output_dim, k_h = 3, k_w = 3, strides = [1, 1, 1, 1],name='Conv2d'):
with tf.variable_scope(name):
filter_ = weight_variable([k_h, k_w, inpt.get_shape()[-1], output_dim],name='weights')
conv = tf.nn.conv2d(inpt, filter=filter_, strides=strides, padding="SAME")
biases = bias_variable(output_dim,name='biases')
out = tf.nn.bias_add(conv, biases)
return out
def ReLU(value, name = 'ReLU'):
with tf.variable_scope(name):
return tf.nn.relu(value)
def Deconv2d(
value, output_shape, k_h = 3, k_w = 3, strides =[1, 2, 2, 1],
name = 'Deconv2d', with_w = False
):
with tf.variable_scope(name):
weights = weight_variable(
name='weights',
shape=[k_h, k_w, output_shape[-1], value.get_shape()[-1]],
decay_mult = 1.0
)
deconv = tf.nn.conv2d_transpose(
value, weights, output_shape, strides = strides
)
biases = bias_variable(name='biases', shape=[output_shape[-1]])
deconv = tf.nn.bias_add(deconv, biases)
# deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return deconv, weights, biases
else:
return deconv
def Deconv2d_bn(
value, output_shape, k_h = 3, k_w = 3, strides =[1, 2, 2, 1],
is_train=True, name = 'Deconv2d', with_w = False
):
with tf.variable_scope(name):
weights = weight_variable(
name='weights',
shape=[k_h, k_w, output_shape[-1], value.get_shape()[-1]],
decay_mult = 1.0
)
deconv = tf.nn.conv2d_transpose(
value, weights, output_shape, strides = strides
)
batch_norm = tf.layers.batch_normalization(deconv, training=is_train) ###由contrib换成layers
# deconv = tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape())
if with_w:
return batch_norm, weights
else:
return batch_norm
def Denseblock(x,nb_layers,grow_date,is_training=True,name='dense_block'):
with tf.variable_scope(name):
concat_feat = x
for i in range(nb_layers):
# 1x1 Convolution (Bottleneck layer)
x = ReLU(conv_bn(x,grow_date*4,k_h = 1, k_w = 1,is_train=is_training,name='conv1'+str(i+1)),name='ReLU1'+str(i+1))
# 3x3 Convolution
x = ReLU(conv_bn(x,grow_date,is_train=is_training,name='conv2'+str(i+1)),name='ReLU2'+str(i+1))
concat_feat = tf.concat((concat_feat,x),-1)
return concat_feat
def transition_block(x, compression=0.5,is_training=True, name='tran_block'):
with tf.variable_scope(name):
features = x.get_shape()[-1]
x = ReLU(conv_bn(x, int(int(features)*compression), k_h=1, k_w=1, is_train=is_training, name='conv_trans'),
name='ReLU_trans')
x = tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], padding = 'SAME',name = 'AvgPooling')
return x
def GC_Block(net,ration=16,softmax=True,is_training=True,name='NonLocal'):
with tf.variable_scope(name):
input_shape = net.get_shape().as_list()
a = conv_b(net,1,1,1,name='embA')
g_orig = g = net
# Flatten from (B,H,W,C) to (B,HW,C) or similar
if softmax:
f = tf.nn.softmax(a)
else:
f = a / tf.cast(tf.shape(a)[-1], tf.float32)
f_flat = tf.reshape(f, [tf.shape(f)[0], -1, tf.shape(f)[-1]])
g_flat = tf.reshape(g, [tf.shape(g)[0], -1, tf.shape(g)[-1]])
f_flat.set_shape([a.shape[0], a.shape[1] * a.shape[2] if None not in a.shape[1:3] else None, a.shape[-1]])
g_flat.set_shape([g.shape[0], g.shape[1] * g.shape[2] if None not in g.shape[1:3] else None, g.shape[-1]])
# Compute f * g ("self-attention") -> (B,HW,C)
fg = tf.matmul(tf.transpose(f_flat, [0, 2, 1]), g_flat)
# Expand and fix the static shapes TF lost track of.
fg = tf.expand_dims(fg, 1)
fg = conv_bn(fg,input_shape[-1]/ration,1,1,is_train=is_training,name='bottleneck')
fg = conv_b(fg,input_shape[-1],1,1,name='transform')
res = fg + net
return res
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,141
|
sfxz035/detection-of-arbitrarily-shaped-fiducial-markers
|
refs/heads/master
|
/utils/evalu.py
|
import tensorflow as tf
import sklearn as sk
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from utils.postproce import *
def dice_coef_theoretical(y_pred, y_true,threvalu=0.5):
"""Define the dice coefficient
Args:
y_pred: Prediction
y_true: Ground truth Label
Returns:
Dice coefficient
"""
y_true_f = tf.cast(tf.reshape(y_true, [-1]), tf.float32)
y_pred_f = tf.nn.sigmoid(y_pred)
# y_pred_f = tf.cast(tf.greater(y_pred_f, threvalu), tf.float32)
y_pred_f = tf.cast(tf.reshape(y_pred_f, [-1]), tf.float32)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
union = tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f)
dice = (2. * intersection) / (union + 0.00001)
if (tf.reduce_sum(y_pred) == 0) and (tf.reduce_sum(y_true) == 0):
dice = 1
return dice
def pix_RePre(y_pred, y_true,threvalu=0.5):
y_true_f = tf.cast(tf.reshape(y_true, [-1]), tf.float32)
# y_pred_f = tf.cast(tf.greater(y_pred, threvalu), tf.float32)
y_pred_f = tf.cast(tf.reshape(y_pred, [-1]), tf.float32)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
P = tf.reduce_sum(y_true_f)
P_pre = tf.reduce_sum(y_pred_f)
recall = intersection/P
precison = intersection/P_pre
return recall,precison
def Iou_tf(y_pred,y_true,threvalu=0.5):
### 1
# y_true = (y_true-np.min(y_true))/(np.max(y_true)-np.min(y_true))
# y_true = tf.cast(y_true,tf.bool)
# y_pred_f = tf.cast(tf.greater(y_pred, threvalu), tf.bool)
# intersection = y_true&y_pred_f
# union = y_true|y_pred_f
# intersection = tf.reduce_sum(tf.cast(intersection,tf.float32))
# union = tf.reduce_sum(tf.cast(union,tf.float32))
####2
y_true_f = tf.cast(tf.reshape(y_true, [-1]), tf.float32)
y_pred_f = tf.cast(tf.reshape(y_pred, [-1]), tf.float32)
intersection = tf.reduce_sum(y_true_f * y_pred_f)
union = tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f)-intersection
iou = intersection/union
if (tf.reduce_sum(y_pred) == 0) and (tf.reduce_sum(y_true) == 0):
iou = 1
return iou
def Iou_np(y_pred,y_true):
y_true_f = np.reshape(y_true,[-1]).astype(np.float32)
y_pred_f = np.reshape(y_pred,[-1]).astype(np.float32)
intersection = np.sum(y_pred_f*y_true_f)
union = np.sum(y_true_f)+np.sum(y_pred_f)-intersection
iou = intersection/union
if(np.sum(y_pred)==0)and(np.sum(y_true==0)):
iou=1
return iou
def calcu(y_pre,y_ture):
arrArea_pre = connectComp(y_pre)
arrArea_true = connectComp(y_ture)
nub1 = np.shape(arrArea_true)[0]
nub2 = np.shape(arrArea_pre)[0]
# if nub1==1 and nub2 == 0:
# return 1,10000
if nub1 != nub2:
print('nub != nub2')
nub = max(nub1,nub2)
else:
nub = nub1
predic = []
iouList = []
for i in range(nub):
try:
area_true = arrArea_true[i]
area_pre = arrArea_pre[i]
# plt.imshow(area_true)
# plt.show()
# plt.imshow(area_pre)
# plt.show()
iou = Iou_np(area_pre,area_true)
iouList.append(iou)
if iou >0.4:
predic.append(1)
else:
predic.append(0)
print('0!!!!!!!!!!!')
except(IndexError):
return -1,-1
return predic,iouList
def calcu2(y_pre,y_ture):
maskFilt_pre = filterFewPoint(y_pre)
maskFilt_pre = maskFilt_pre.astype(np.uint8)
contours_pre, hierarchy_pre = cv.findContours(maskFilt_pre, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
maskFilt_true = filterFewPoint(y_ture)
maskFilt_true = maskFilt_true.astype(np.uint8)
contours_true, hierarchy_true = cv.findContours(maskFilt_true, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
nub1,nub2 = len(contours_pre),len(contours_true)
if nub1 != nub2:
print('nub != nub2')
nub = max(nub1,nub2)
else:
nub = nub1
predic = []
iouList = []
for i in range(0, nub):
try:
x1L, y1L, w1, h1 = cv.boundingRect(contours_pre[i])
x1R, y1R = x1L+w1, y1L+h1
x2L, y2L, w2, h2 = cv.boundingRect(contours_true[i])
x2R, y2R = x2L+w2, y2L+h2
xL,yL = max(x1L,x2L),max(y1L,y2L)
xR,yR = min(x1R,x2R),min(y1R,y2R)
intersection1 = max(xR-xL,0)
intersection2 = max(yR-yL,0)
inter = intersection1*intersection2
# mask = np.zeros([1024, 1024], dtype='uint8')
# cv.rectangle(mask, (x1L, y1L), (x1R, y1R), (153, 153, 0), 1)
# cv.rectangle(mask, (x2L, y2L), (x2R, y2R), (153, 153, 0), 1)
# cv.namedWindow('imgrec',0)
# cv.resizeWindow('imgrec', 500, 500)
# cv.imshow('imgrec',mask)
# cv.waitKey(0)
union_square = w1*h1+w2*h2-inter
iou = inter/union_square
iouList.append(iou)
if iou >0.4:
predic.append(1)
else:
predic.append(0)
print('0!!!!!!!!!!!')
except(IndexError):
return -1,-1
return predic,iouList
#### 待处理
def tf_confusion_metrics(predict, real, session, feed_dict):
predictions = tf.argmax(predict, 1)
actuals = tf.argmax(real, 1)
ones_like_actuals = tf.ones_like(actuals)
zeros_like_actuals = tf.zeros_like(actuals)
ones_like_predictions = tf.ones_like(predictions)
zeros_like_predictions = tf.zeros_like(predictions)
tp_op = tf.reduce_sum(
tf.cast(
tf.logical_and(
tf.equal(actuals, ones_like_actuals),
tf.equal(predictions, ones_like_predictions)
),
"float"
)
)
tn_op = tf.reduce_sum(
tf.cast(
tf.logical_and(
tf.equal(actuals, zeros_like_actuals),
tf.equal(predictions, zeros_like_predictions)
),
"float"
)
)
fp_op = tf.reduce_sum(
tf.cast(
tf.logical_and(
tf.equal(actuals, zeros_like_actuals),
tf.equal(predictions, ones_like_predictions)
),
"float"
)
)
fn_op = tf.reduce_sum(
tf.cast(
tf.logical_and(
tf.equal(actuals, ones_like_actuals),
tf.equal(predictions, zeros_like_predictions)
),
"float"
)
)
tp, tn, fp, fn = session.run([tp_op, tn_op, fp_op, fn_op], feed_dict)
tpr = float(tp) / (float(tp) + float(fn))
fpr = float(fp) / (float(fp) + float(tn))
fnr = float(fn) / (float(tp) + float(fn))
accuracy = (float(tp) + float(tn)) / (float(tp) + float(fp) + float(fn) + float(tn))
recall = tpr
precision = float(tp) / (float(tp) + float(fp))
f1_score = (2 * (precision * recall)) / (precision + recall)
|
{"/main.py": ["/networks/U_net.py"], "/parxml/procXml.py": ["/parxml/read.py"], "/networks/U_net.py": ["/networks/ops.py"], "/utils/evalu.py": ["/utils/postproce.py"]}
|
37,187
|
rbooker/capstone1
|
refs/heads/main
|
/models.py
|
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
bcrypt = Bcrypt()
db = SQLAlchemy()
class User(db.Model):
"""User"""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
username = db.Column(db.Text, nullable=False, unique=True)
password = db.Column(db.Text,nullable=False)
quizzes = db.relationship('Quiz', cascade="all, delete")
questions = db.relationship('Question', cascade="all, delete")
@classmethod
def signup(cls, username, password):
"""Sign up user.
Hashes password and adds user to system.
Stolen from the 'Warbler' app
"""
hashed_pwd = bcrypt.generate_password_hash(password).decode('UTF-8')
user = User(
username=username,
password=hashed_pwd,
)
db.session.add(user)
return user
@classmethod
def authenticate(cls, username, password):
"""Find user with `username` and `password`.
This is a class method (call it on the class, not an individual user.)
It searches for a user whose password hash matches this password
and, if it finds such a user, returns that user object.
If can't find matching user (or if password is wrong), returns False.
Stolen from the 'warbler' app
"""
user = cls.query.filter_by(username=username).first()
if user:
is_auth = bcrypt.check_password_hash(user.password, password)
if is_auth:
return user
return False
@classmethod
def change_password(cls, username, password, new_password):
"""Change password"""
user = cls.authenticate(username, password)
if user:
hashed_pwd = bcrypt.generate_password_hash(new_password).decode('UTF-8')
user.password = hashed_pwd
db.session.commit()
return True
return False
class Quiz(db.Model):
"""Quiz"""
__tablename__ = "quizzes"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(50), nullable=False)
description = db.Column(db.String(250), nullable=True)
rounds = db.Column(db.Integer, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'),nullable=False)
questions = db.relationship("QuizQuestion", back_populates="quiz", cascade="all, delete")
class QuizQuestion(db.Model):
"""Mapping of a quiz to a question."""
__tablename__ = "quiz_questions"
quiz_id = db.Column(db.Integer, db.ForeignKey('quizzes.id'), primary_key=True)
question_id = db.Column(db.Integer, db.ForeignKey('questions.id'), primary_key=True)
round = db.Column(db.Integer, nullable=False)
question = db.relationship("Question", back_populates="quizzes")
quiz = db.relationship("Quiz", back_populates="questions")
class Question(db.Model):
"""Question"""
__tablename__ = "questions"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
question = db.Column(db.Text, nullable=False)
answer = db.Column(db.Text, nullable=False)
difficulty = db.Column(db.Integer, nullable=False)
category = db.Column(db.Text, nullable=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'),nullable=False)
quizzes = db.relationship("QuizQuestion", back_populates="question", cascade="all, delete")
def connect_db(app):
"""Connect to database."""
db.app = app
db.init_app(app)
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,188
|
rbooker/capstone1
|
refs/heads/main
|
/test_quiz_views.py
|
"""Quiz View Tests"""
# run these tests like:
#
# FLASK_ENV=production python -m unittest test_quiz_views.py
import os
from unittest import TestCase
from models import db, connect_db, User, Quiz, Question, QuizQuestion
os.environ['DATABASE_URL'] = "postgresql:///trivia-test"
from app import app, CURR_USER_KEY
db.create_all()
app.config['WTF_CSRF_ENABLED'] = False
class QuizViewTestCase(TestCase):
def setUp(self):
"""Create test client, add sample data."""
db.drop_all()
db.create_all()
self.client = app.test_client()
self.testuser = User.signup(username="testuser",
password="testuser")
self.testuser_id = 6969
self.testuser.id = self.testuser_id
db.session.commit()
def tearDown(self):
resp = super().tearDown()
db.session.rollback()
return resp
def test_create_quiz(self):
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post("/quizzes/create", data={"name": "testquiz",
"description": "a test quiz",
"rounds" : 5,
"qs_per_round": 5,
"round_one_diff": 1,
"round_two_diff": 2,
"round_three_diff": 3,
"round_four_diff": 4,
"round_five_diff": 5})
self.assertEqual(resp.status_code, 302)
quiz = Quiz.query.one()
self.assertEqual(quiz.name, "testquiz")
self.assertEqual(quiz.description, "a test quiz")
self.assertEqual(quiz.rounds, 5)
self.assertEqual(len(quiz.questions), 25)
def setup_quizzes(self):
quiz = Quiz(name="testquiz",
description="a test quiz",
rounds = 1,
user_id = self.testuser.id)
db.session.add(quiz)
db.session.commit()
db.session.refresh(quiz)
question = Question(question="What is the answer to life, the universe, and everything?",
answer= "Forty-two",
difficulty = 5,
user_id = self.testuser.id)
db.session.add(question)
db.session.commit()
db.session.refresh(question)
quiz_question = QuizQuestion(quiz_id=quiz.id,
question_id=question.id,
round=1)
db.session.add(quiz_question)
db.session.commit()
def test_show_quizzes(self):
self.setup_quizzes()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.get("/quizzes/show")
self.assertEqual(resp.status_code, 200)
self.assertIn('testquiz', str(resp.data))
self.assertIn('a test quiz', str(resp.data))
def test_show_quiz(self):
self.setup_quizzes()
test_quiz = Quiz.query.filter(Quiz.name=="testquiz").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.get(f"/quizzes/show/{test_quiz.id}")
self.assertEqual(resp.status_code, 200)
self.assertIn('testquiz', str(resp.data))
self.assertIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertIn('Forty-two', str(resp.data))
self.assertIn('Difficulty:</strong> 5', str(resp.data))
def test_edit_quiz_replace_question(self):
self.setup_quizzes()
test_quiz = Quiz.query.filter(Quiz.name=="testquiz").one()
test_question = Question.query.filter(Question.answer=="Forty-two").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/quizzes/edit/{test_quiz.id}", data={"checked_questions":f"{test_question.id}"})
self.assertEqual(resp.status_code, 200)
self.assertIn('testquiz', str(resp.data))
#The question should be different, but of the same difficulty
self.assertNotIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertNotIn('Forty-two', str(resp.data))
self.assertIn('Difficulty:</strong> 5', str(resp.data))
def test_edit_quiz_delete_question(self):
self.setup_quizzes()
test_quiz = Quiz.query.filter(Quiz.name=="testquiz").one()
test_question = Question.query.filter(Question.answer=="Forty-two").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/quizzes/remove_questions/{test_quiz.id}", data={"checked_questions":f"{test_question.id}"}, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
self.assertIn('testquiz', str(resp.data))
#The question should be gone
self.assertNotIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertNotIn('Forty-two', str(resp.data))
def test_delete_quiz(self):
self.setup_quizzes()
test_quiz = Quiz.query.filter(Quiz.name=="testquiz").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/quizzes/delete/{test_quiz.id}", follow_redirects=True)
self.assertEqual(resp.status_code, 200)
#Redirects to "show all quizzes page" - testquiz should be gone
self.assertNotIn('testquiz', str(resp.data))
#And, because it was the only quiz, "You have no saved quizzes" should be displayed
self.assertIn('You have no saved quizzes', str(resp.data))
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,189
|
rbooker/capstone1
|
refs/heads/main
|
/tools.py
|
import requests
from math import ceil
from models import Question
def get_quiz_data(difficulty, total_questions, user_id):
"""Gets the initial quiz data from the Jservice API"""
###########################################
#difficulty: a list storing the difficulty of the questions to retrieve
#total_questions: the total number of questions to retrieve
#user_id: needed for instantiation of Question objects
###########################################
quiz_questions = []
while len(quiz_questions) < total_questions:
question_resp = requests.get(f"http://jservice.io/api/random?count={total_questions * 5}")
question_data = question_resp.json()
for question in question_data:
if question["value"] is not None:
q_diff = ceil(int(question["value"])/200)
if q_diff in difficulty:
quiz_questions.append(Question(question=question["question"], answer=question["answer"], category=question["category"]["title"], difficulty=q_diff, user_id=user_id))
if len(quiz_questions) == total_questions:
break
return quiz_questions
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,190
|
rbooker/capstone1
|
refs/heads/main
|
/test_question_views.py
|
"""Quiz View Tests"""
# run these tests like:
#
# FLASK_ENV=production python -m unittest test_question_views.py
import os
from unittest import TestCase
from models import db, connect_db, User, Quiz, Question, QuizQuestion
os.environ['DATABASE_URL'] = "postgresql:///trivia-test"
from app import app, CURR_USER_KEY
db.create_all()
app.config['WTF_CSRF_ENABLED'] = False
class QuestionViewTestCase(TestCase):
def setUp(self):
"""Create test client, add sample data."""
db.drop_all()
db.create_all()
self.client = app.test_client()
self.testuser = User.signup(username="testuser",
password="testuser")
self.testuser_id = 6969
self.testuser.id = self.testuser_id
db.session.commit()
def tearDown(self):
resp = super().tearDown()
db.session.rollback()
return resp
def test_create_question(self):
"""Test the create question route"""
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post("/questions/create", data={"question": "What is the answer to life, the universe, and everything?",
"answer": "Forty-two",
"difficulty" : 5},
follow_redirects=True)
self.assertEqual(resp.status_code, 200)
#Assert the question/answer pair is there and that the difficulty is correct
self.assertIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertIn('Forty-two', str(resp.data))
self.assertIn('<strong>Difficulty:</strong> 5', str(resp.data))
def setup_quiz_and_question(self):
"""Set up a quiz and question for subsequent tests"""
quiz = Quiz(name="testquiz",
description="a test quiz",
rounds = 1,
user_id = self.testuser.id)
db.session.add(quiz)
db.session.commit()
db.session.refresh(quiz)
question = Question(question="What is the answer to life, the universe, and everything?",
answer= "Forty-two",
difficulty = 5,
user_id = self.testuser.id)
db.session.add(question)
db.session.commit()
db.session.refresh(question)
quiz_question = QuizQuestion(quiz_id=quiz.id,
question_id=question.id,
round=1)
db.session.add(quiz_question)
db.session.commit()
def test_edit_question(self):
"""Test edit question route"""
self.setup_quiz_and_question()
test_question = Question.query.filter(Question.answer=="Forty-two").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/questions/edit/{test_question.id}", data={"question": "What is the question of life, the universe, and everything?",
"answer": "Six times nine",
"difficulty" : 4}, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
#Assert that the old question/answer/difficulty are not there
self.assertNotIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertNotIn('Forty-two', str(resp.data))
self.assertNotIn('<strong>Difficulty:</strong> 5', str(resp.data))
#Assert the question/answer pair is there and that the difficulty is correct
self.assertIn('What is the question of life, the universe, and everything?', str(resp.data))
self.assertIn('Six times nine', str(resp.data))
self.assertIn('<strong>Difficulty:</strong> 4', str(resp.data))
def test_show_questions(self):
"""Test show all questions"""
self.setup_quiz_and_question()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.get("/questions/show")
self.assertEqual(resp.status_code, 200)
#Assert the question/answer pair is there and that the difficulty is correct
self.assertIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertIn('Forty-two', str(resp.data))
self.assertIn('<strong>Difficulty:</strong> 5', str(resp.data))
def test_show_question(self):
"""Test show question - The GET route for questions/show/<int:question_id>"""
self.setup_quiz_and_question()
test_question = Question.query.filter(Question.answer=="Forty-two").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.get(f"/questions/show/{test_question.id}")
self.assertEqual(resp.status_code, 200)
#Assert the question/answer pair is there and that the difficulty is correct
self.assertIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertIn('Forty-two', str(resp.data))
self.assertIn('<strong>Difficulty:</strong> 5', str(resp.data))
def test_add_question_to_quiz(self):
"""Test adding question to quiz - The POST route for questions/show/<int:question_id>"""
self.setup_quiz_and_question()
test_quiz = Quiz.query.filter(Quiz.name=="testquiz").one()
test_quiz_id = test_quiz.id
#Create new question to add to quiz
new_test_question = Question(question="What is the question of life, the universe, and everything?",
answer= "Six times nine",
difficulty = 5,
user_id = self.testuser.id)
db.session.add(new_test_question)
db.session.commit()
db.session.refresh(new_test_question)
new_test_question_id = new_test_question.id
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/questions/show/{new_test_question_id}", data={"quiz": test_quiz_id,
"round": 1},
follow_redirects=True)
self.assertEqual(resp.status_code, 200)
#Route redirects to page displaying quiz the question was added to - assert correct quiz
self.assertIn('testquiz', str(resp.data))
#Assert the question/answer pair is there and that the difficulty is correct
self.assertIn('What is the question of life, the universe, and everything?', str(resp.data))
self.assertIn('Six times nine', str(resp.data))
self.assertIn('<strong>Difficulty:</strong> 5', str(resp.data))
def test_delete_question(self):
self.setup_quiz_and_question()
test_question = Question.query.filter(Question.answer=="Forty-two").one()
with self.client as c:
with c.session_transaction() as sess:
sess[CURR_USER_KEY] = self.testuser.id
resp = c.post(f"/questions/delete/{test_question.id}", follow_redirects=True)
self.assertEqual(resp.status_code, 200)
#Redirects to "show all questions page" - test_question should be gone
self.assertNotIn('What is the answer to life, the universe, and everything?', str(resp.data))
self.assertNotIn('Forty-two', str(resp.data))
self.assertNotIn('<strong>Difficulty:</strong> 5', str(resp.data))
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,191
|
rbooker/capstone1
|
refs/heads/main
|
/forms.py
|
from wtforms import SelectField, StringField, SelectMultipleField, RadioField, PasswordField, widgets
from flask_wtf import FlaskForm
from wtforms.validators import InputRequired, DataRequired, Length, Optional
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
Taken from the WTForms website
"""
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class CreateQuizForm(FlaskForm):
"""Form for creating quizzes"""
name = StringField("Quiz Name", validators=[InputRequired(message="Quiz Name can't be blank"), Length(max=50, message="Quiz Name can't exceed 50 characters")])
description = StringField("Description", validators=[Optional(), Length(max=250, message="Quiz Description can't exceed 250 characters")])
rounds = SelectField("Number of Rounds", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int)
qs_per_round = SelectField("Questions per Round", choices=[(5,5),(10,10),(15,15),(20,20)], coerce=int)
round_one_diff = MultiCheckboxField("Round One Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select at least one question difficulty level")])
round_two_diff = MultiCheckboxField("Round Two Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select at least one question difficulty level")])
round_three_diff = MultiCheckboxField("Round Three Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select at least one question difficulty level")])
round_four_diff = MultiCheckboxField("Round Four Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select at least one question difficulty level")])
round_five_diff = MultiCheckboxField("Round Five Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select at least one question difficulty level")])
class AddQuestionToQuiz(FlaskForm):
"""Form for adding a question to a quiz"""
quiz = SelectField("Add Question To Quiz:", coerce=int)
round = SelectField('Add Question To Round:', coerce=int, validate_choice=False)
class EditQuestion(FlaskForm):
"""Form for editing a question"""
question = StringField("Question", validators=[InputRequired(message="Question can't be blank")])
answer = StringField("Answer", validators=[InputRequired(message="Answer can't be blank")])
difficulty = RadioField("Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select a difficulty")])
class AddQuestion(FlaskForm):
"""Form for editing a question"""
question = StringField("Question", validators=[InputRequired(message="Question can't be blank")])
answer = StringField("Answer", validators=[InputRequired(message="Answer can't be blank")])
difficulty = RadioField("Difficulty", choices=[(1,1),(2,2),(3,3),(4,4),(5,5)], coerce=int, validators=[DataRequired(message="Select a difficulty")])
class NewUserForm(FlaskForm):
"""Form for adding a user"""
username = StringField('Username', validators=[DataRequired(message="Enter a name")])
password = PasswordField('Password', validators=[Length(min=6, message="Password must be at least six characters long")])
class LogInForm(FlaskForm):
"""Form for logging in a user"""
username = StringField('Username', validators=[DataRequired(message="Enter a name")])
password = PasswordField('Password', validators=[Length(min=6, message="Password must be at least six characters long")])
class ChangeUsernameForm(FlaskForm):
"""Form for changing username"""
username = StringField('New Username', validators=[DataRequired(message="Enter a name")])
password = PasswordField('Password', validators=[Length(min=6, message="Password must be at least six characters long")])
class ChangePasswordForm(FlaskForm):
"""Form for changing password"""
new_password = PasswordField('New Password', validators=[Length(min=6, message="Password must be at least six characters long")])
password = PasswordField('Current Password', validators=[Length(min=6, message="Password must be at least six characters long")])
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,192
|
rbooker/capstone1
|
refs/heads/main
|
/app.py
|
import os
from flask import Flask, render_template, request, flash, redirect, session, g
from flask_debugtoolbar import DebugToolbarExtension
from tools import get_quiz_data
from models import db, connect_db, User, Quiz, QuizQuestion, Question
from forms import CreateQuizForm, AddQuestionToQuiz, EditQuestion, AddQuestion, NewUserForm, LogInForm, ChangeUsernameForm, ChangePasswordForm
from sqlalchemy.exc import IntegrityError
import re
CURR_USER_KEY = "curr_user"
app = Flask(__name__)
uri = (os.environ.get('DATABASE_URL', 'postgresql:///trivia'))
if uri.startswith("postgres://"):
uri = uri.replace("postgres://", "postgresql://", 1)
app.config['SQLALCHEMY_DATABASE_URI'] = uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_ECHO'] = True
connect_db(app)
db.create_all()
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'shh')
debug = DebugToolbarExtension(app)
######################################################
#Login/Logout routes
######################################################
@app.before_request
def add_user_to_g():
"""If we're logged in, add curr user to Flask global."""
if CURR_USER_KEY in session:
g.user = User.query.get(session[CURR_USER_KEY])
else:
g.user = None
def do_login(user):
"""Log in user."""
session[CURR_USER_KEY] = user.id
def do_logout():
"""Logout user."""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
@app.route('/signup', methods=["GET", "POST"])
def signup():
"""Handle user signup."""
form = NewUserForm()
if form.validate_on_submit():
try:
user = User.signup(
username=form.username.data,
password=form.password.data
)
db.session.commit()
except IntegrityError:
flash("Username already taken", 'danger')
return render_template('signup.html', form=form)
do_login(user)
return redirect("/")
else:
return render_template('signup.html', form=form)
@app.route('/login', methods=["GET", "POST"])
def login():
"""Handle user login."""
form = LogInForm()
if form.validate_on_submit():
user = User.authenticate(form.username.data,
form.password.data)
if user:
do_login(user)
return redirect("/")
flash("Invalid credentials.", 'danger')
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
"""Handle logout of user."""
do_logout()
flash("You have been successfully logged out. Goodbye!", "success")
return redirect("/login")
@app.route('/deleteprofile')
def delete_profile_page():
"""Show delete profile page - contains warnings"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
return render_template("delete_profile.html")
@app.route('/delete', methods=["POST"])
def delete_user():
"""Delete user."""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
do_logout()
db.session.delete(g.user)
db.session.commit()
return redirect("/signup")
######################################################
#Quiz routes
######################################################
@app.route("/quizzes/create", methods=["GET", "POST"])
def create_quiz():
"""Create a new quiz"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
form = CreateQuizForm()
if form.validate_on_submit():
#Get form data
user_id = user.id
name = form.name.data
description = form.description.data
rounds = form.rounds.data
qs_per_round = form.qs_per_round.data
difficulty_levels = [form.round_one_diff.data,
form.round_two_diff.data,
form.round_three_diff.data,
form.round_four_diff.data,
form.round_five_diff.data]
#Create new quiz and add to db
quiz = Quiz(name=name,
description=description,
rounds=rounds,
user_id=user_id)
db.session.add(quiz)
db.session.commit()
db.session.refresh(quiz)
quiz_id = quiz.id
#Get questions and add to db
for round_no in range(1, quiz.rounds + 1):
quiz_data = get_quiz_data(difficulty_levels[round_no - 1], qs_per_round, user_id)
db.session.add_all(quiz_data)
db.session.commit()
#Create associations between quiz and questions
for quiz_datum in quiz_data:
db.session.refresh(quiz_datum)
quiz_question = QuizQuestion(quiz_id=quiz_id,
question_id=quiz_datum.id,
round=round_no)
db.session.add(quiz_question)
db.session.commit()
flash("Quiz successfully created!", "success")
return redirect(f"/quizzes/show/{quiz_id}")
else:
return render_template("create_quiz.html", form=form)
@app.route("/quizzes/show")
def show_quizzes():
"""Show all quizzes"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
user_id = user.id
quizzes = Quiz.query.filter(Quiz.user_id == user_id).order_by(Quiz.id).all()
return render_template("show_all_quizzes.html",quizzes=quizzes)
@app.route("/quizzes/show/<int:quiz_id>")
def show_quiz(quiz_id):
"""Show the quiz with the given id"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
quiz = Quiz.query.get_or_404(quiz_id)
quiz_questions = []
for round_no in range(1, quiz.rounds + 1):
round = []
for quiz_question in quiz.questions:
if quiz_question.round == round_no:
round.append(quiz_question.question)
quiz_questions.append(round)
return render_template("show_quiz.html", quiz_questions=quiz_questions, quiz=quiz)
@app.route("/quizzes/edit/<int:quiz_id>", methods=["GET", "POST"])
def edit_quiz(quiz_id):
"""Edit the quiz with the given id"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
if request.method == 'POST':
q_ids = request.form.getlist("checked_questions")
quiz = Quiz.query.get_or_404(quiz_id)
replacement_question_ids =[]
for q_id in q_ids:
q_to_replace = Question.query.get_or_404(int(q_id))
for quiz_question in q_to_replace.quizzes:
qq = quiz_question.quiz
if qq.id == quiz_id:
quiz_question_to_replace = quiz_question
#get replacement question - will have same difficulty and round as old question
replacement_question_array = get_quiz_data([q_to_replace.difficulty], 1, q_to_replace.user_id)
replacement_question = replacement_question_array[0]
#add new question to db
db.session.add(replacement_question)
db.session.commit()
db.session.refresh(replacement_question)
replacement_question_ids.append(replacement_question.id)
#add new question to quiz
new_quiz_question = QuizQuestion(quiz_id=quiz_id,
question_id=replacement_question.id,
round=quiz_question_to_replace.round)
db.session.add(new_quiz_question)
db.session.commit()
#remove question from quiz - don't delete it, though
db.session.delete(quiz_question_to_replace)
db.session.commit()
quiz_questions = []
for round_no in range(1, quiz.rounds + 1):
round = []
for quiz_question in quiz.questions:
if quiz_question.round == round_no:
round.append(quiz_question.question)
quiz_questions.append(round)
flash("Questions successfully replaced. New questions highlighted in yellow.", "success")
return render_template("edit_quiz.html", quiz_questions=quiz_questions, quiz=quiz, rq_ids=replacement_question_ids)
else:
quiz = Quiz.query.get_or_404(quiz_id)
quiz_questions = []
for round_no in range(1, quiz.rounds + 1):
round = []
for quiz_question in quiz.questions:
if quiz_question.round == round_no:
round.append(quiz_question.question)
quiz_questions.append(round)
return render_template("edit_quiz.html", quiz_questions=quiz_questions, quiz=quiz, rq_ids=None)
@app.route("/quizzes/remove_questions/<int:quiz_id>", methods=["POST"])
def remove_question(quiz_id):
"""Remove selected questions in the quiz with the given id"""
q_ids = request.form.getlist("checked_questions")
quiz = Quiz.query.get_or_404(quiz_id)
for q_id in q_ids:
q_to_remove = Question.query.get_or_404(int(q_id))
for quiz_question in q_to_remove.quizzes:
qq = quiz_question.quiz
if qq.id == quiz_id:
quiz_question_to_remove = quiz_question
#remove question from quiz - don't delete it, though
db.session.delete(quiz_question_to_remove)
db.session.commit()
flash("Questions successfully removed", "success")
return redirect(f"/quizzes/edit/{quiz_id}")
@app.route("/quizzes/delete/<int:quiz_id>", methods=["POST"])
def delete_quiz(quiz_id):
quiz_to_delete = Quiz.query.get_or_404(quiz_id)
db.session.delete(quiz_to_delete)
db.session.commit()
flash("Quiz deleted.", "success")
return redirect("/quizzes/show")
######################################################
#Question routes
######################################################
@app.route("/questions/create", methods=["GET", "POST"])
def create_question():
"""Add question"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
user_id = user.id
form = AddQuestion()
if form.validate_on_submit():
new_question = Question(question=form.question.data,
answer=form.answer.data,
difficulty=form.difficulty.data,
user_id=user_id)
db.session.add(new_question)
db.session.commit()
db.session.refresh(new_question)
flash("New question successfully created.", 'success')
return redirect(f"/questions/show/{new_question.id}")
return render_template("create_question.html", form=form)
@app.route("/questions/edit/<int:question_id>", methods=["GET", "POST"])
def edit_question(question_id):
"""Edit question"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
question = Question.query.get_or_404(question_id)
form = EditQuestion()
if form.validate_on_submit():
question.question = form.question.data
question.answer = form.answer.data
question.difficulty = form.difficulty.data
db.session.commit()
flash("Question successfully edited.", 'success')
return redirect(f"/questions/show/{question_id}")
return render_template("edit_question.html", question=question, form=form)
@app.route("/questions/show")
def show_questions():
"""Show all questions"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
user_id = user.id
questions = Question.query.filter(Question.user_id == user_id).order_by(Question.id).all()
return render_template("show_all_questions.html",questions=questions)
@app.route("/questions/show/<int:question_id>", methods=["GET", "POST"])
def show_question(question_id):
"""Show question - Also allow it to be added to a quiz"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
question = Question.query.get_or_404(question_id)
form = AddQuestionToQuiz()
#Get all the quizzes that the question is on
quizzes_with_question = [quiz_question.quiz.id for quiz_question in question.quizzes]
#Then use this to get all the quizzes it isn't on
quiz_choice_data = db.session.query(Quiz.id, Quiz.name, Quiz.rounds).filter(Quiz.id.notin_(quizzes_with_question)).all()
#These are the options for the quiz select in the form
quiz_choices = [(quiz_datum.id, quiz_datum.name) for quiz_datum in quiz_choice_data]
#These help dynamically generate the rounds in the quiz selected that the question can be added to
quiz_rounds = [(quiz_datum.id, quiz_datum.rounds) for quiz_datum in quiz_choice_data]
form.quiz.choices = quiz_choices
if form.validate_on_submit():
quiz_id = form.quiz.data
round = form.round.data
#Add the question to the selected quiz and round and show the amended quiz
new_quiz_question = QuizQuestion(quiz_id=quiz_id, question_id=question_id, round=round)
db.session.add(new_quiz_question)
db.session.commit()
#Get quiz for sake of showing its name in flash message
quiz = Quiz.query.get_or_404(quiz_id)
flash(f"Question ID:{question.id} added to Round {round} of Quiz {quiz.name}", "success")
return redirect(f"/quizzes/show/{quiz.id}")
return render_template("show_question.html", question=question, form=form, quiz_rounds=quiz_rounds)
@app.route("/questions/delete/<int:question_id>", methods=["POST"])
def delete_question(question_id):
"""Delete Question"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
question_to_delete = Question.query.get_or_404(question_id)
db.session.delete(question_to_delete)
db.session.commit()
flash("Question deleted.", "success")
return redirect("/questions/show")
######################################################
#Change username/password routes
######################################################
@app.route("/change_username", methods=["GET", "POST"])
def change_username():
"""Change username"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
user_id = user.id
user_name = user.username
form = ChangeUsernameForm()
if form.validate_on_submit():
if User.authenticate(user.username, form.password.data):
try:
user.username = form.username.data
db.session.commit()
except IntegrityError:
flash("Username already taken.", 'danger')
return render_template('change_username.html', form=form, user_name=user_name)
flash("Username successfully changed.", 'success')
return redirect("/")
flash("Password incorrect. Please try again.", 'danger')
return render_template('change_username.html', form=form, user_name=user_name)
@app.route("/change_password", methods=["GET", "POST"])
def change_password():
"""Change password"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
user = g.user
form = ChangePasswordForm()
if form.validate_on_submit():
if User.change_password(user.username, form.password.data, form.new_password.data):
flash("Password successfully changed", 'success')
return redirect("/")
flash("Password incorrect. Please try again.", 'danger')
return render_template('change_password.html', form=form)
######################################################
#Homepage/About/FAQ routes
######################################################
@app.route('/')
def homepage():
"""Show homepage"""
if g.user:
return render_template('home.html', username=g.user.username)
else:
return render_template('home-anon.html')
@app.route('/about')
def about_page():
"""Show 'about' page"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
return render_template('about.html')
@app.route('/faq')
def faq_page():
"""Show FAQ page"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/")
return render_template('faq.html')
##############################################################################
# Turn off all caching in Flask
# (useful for dev; in production, this kind of stuff is typically
# handled elsewhere)
#
# https://stackoverflow.com/questions/34066804/disabling-caching-in-flask
@app.after_request
def add_header(req):
"""Add non-caching headers on every request."""
req.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
req.headers["Pragma"] = "no-cache"
req.headers["Expires"] = "0"
req.headers['Cache-Control'] = 'public, max-age=0'
return req
|
{"/test_quiz_views.py": ["/models.py", "/app.py"], "/tools.py": ["/models.py"], "/test_question_views.py": ["/models.py", "/app.py"], "/app.py": ["/tools.py", "/models.py", "/forms.py"]}
|
37,196
|
chris8447/talos
|
refs/heads/master
|
/talos/scan/Scan.py
|
from collections import OrderedDict
from .scan_prepare import scan_prepare
from .scan_run import scan_run
class Scan:
"""Hyperparamater scanning and optimization
USE: ta.Scan(x=x, y=y, params=params_dict, model=model)
Takes in a Keras model, and a dictionary with the parameter
boundaries for the experiment.
p = {
'epochs' : [50, 100, 200],
'activation' : ['relu'],
'dropout': (0, 0.1, 5)
}
Accepted input formats are [1] single value in a list, [0.1, 0.2]
multiple values in a list, and (0, 0.1, 5) a range of 5 values
from 0 to 0.1.
Here is an example of the input model:
def model():
# any Keras model
return out, model
You must replace the parameters in the model with references to
the dictionary, for example:
model.fit(epochs=params['epochs'])
To learn more, start from the examples and documentation
available here: https://github.com/autonomio/talos
PARAMETERS
----------
x : ndarray
1d or 2d array consisting of the training data. `x` should have the
shape (m, n), where m is the number of training examples and n is the
number of features. Extra dimensions can be added to account for the
channels entry in convolutional neural networks.
y : ndarray
The labels corresponding to the training data. `y` should have the
shape (m, c) where c is the number of classes. A binary classification
problem will have c=1.
params : python dictionary
Lists all permutations of hyperparameters, a subset of which will be
selected at random for training and evaluation.
model : keras model
Any Keras model with relevant declrations like params['first_neuron']
dataset_name : str
References the name of the experiment. The dataset_name and
experiment_no will be concatenated to produce the file name for the
results saved in the local directory.
experiment_no : str
Indexes the user's choice of experiment number.
x_val : ndarray
User specified cross-validation data. (Default is None).
y_val : ndarray
User specified cross-validation labels. (Default is None).
val_split : float, optional
The proportion of the input `x` which is set aside as the
validation data. (Default is 0.3).
shuffle : bool, optional
If True, shuffle the data in x and y before splitting into the train
and cross-validation datasets. (Default is True).
random_method : uniform, stratified, lhs, lhs_sudoku
Determinines the way in which the grid_downsample is applied. The
default setting is 'uniform'.
seed : int
Sets numpy random seed.
search_method : {None, 'random', 'linear', 'reverse'}
Determines the random sampling of the dictionary. `random` picks one
hyperparameter point at random and removes it from the list, then
samples again. `linear` starts from the start of the grid and moves
forward, and `reverse` starts at the end of the grid and moves
backwards.
max_iteration_start_time : None or str
Allows setting a time when experiment will be completed. Use the format
"%Y-%m-%d %H:%M" here.
permutation_filter : lambda function
Use it to filter permutations based on previous knowledge.
USE: permutation_filter=lambda p: p['batch_size'] < 150
This example removes any permutation where batch_size is below 150
reduction_method : {None, 'correlation'}
Method for honing in on the optimal hyperparameter subspace. (Default
is None).
reduction_interval : int
The number of reduction method rounds that will be performed. (Default
is None).
reduction_window : int
The number of rounds of the reduction method before observing the
results. (Default is None).
grid_downsample : int
The fraction of `params` that will be tested (Default is None).
round_limit : int
Limits the number of rounds (permutations) in the experiment.
reduction_metric : {'val_acc'}
Metric used to tune the reductions.
last_epoch_value : bool
Set to True if the last epoch metric values are logged as opposed
to the default which is peak epoch values for each round.
disable_progress_bar : bool
Disable TQDM live progress bar.
print_params : bool
Print params for each round on screen (useful when using TrainingLog
callback for visualization)
debug : bool
Implements debugging feedback. (Default is False).
"""
# TODO: refactor this so that we don't initialize global variables
global self
def __init__(self, x, y, params, model,
dataset_name=None,
experiment_no=None,
experiment_name=None,
x_val=None,
y_val=None,
val_split=.3,
shuffle=True,
round_limit=None,
time_limit=None,
grid_downsample=1.0,
random_method='uniform_mersenne',
seed=None,
search_method='random',
permutation_filter=None,
reduction_method=None,
reduction_interval=50,
reduction_window=20,
reduction_threshold=0.2,
reduction_metric='val_acc',
reduce_loss=False,
last_epoch_value=False,
clear_tf_session=True,
disable_progress_bar=False,
print_params=False,
debug=False):
# NOTE: these need to be follow the order from __init__
# and all paramaters needs to be included here and only here.
self.x = x
self.y = y
self.params = OrderedDict(params)
self.model = model
self.dataset_name = dataset_name
self.experiment_no = experiment_no
self.experiment_name = experiment_name
self.x_val = x_val
self.y_val = y_val
self.val_split = val_split
self.shuffle = shuffle
self.random_method = random_method
self.search_method = search_method
self.round_limit = round_limit
self.time_limit = time_limit
self.permutation_filter = permutation_filter
self.reduction_method = reduction_method
self.reduction_interval = reduction_interval
self.reduction_window = reduction_window
self.grid_downsample = grid_downsample
self.reduction_threshold = reduction_threshold
self.reduction_metric = reduction_metric
self.reduce_loss = reduce_loss
self.debug = debug
self.seed = seed
self.clear_tf_session = clear_tf_session
self.disable_progress_bar = disable_progress_bar
self.last_epoch_value = last_epoch_value
self.print_params = print_params
# input parameters section ends
self._null = self.runtime()
def runtime(self):
self = scan_prepare(self)
self = scan_run(self)
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,197
|
chris8447/talos
|
refs/heads/master
|
/test/core_tests/test_templates.py
|
def test_templates():
import talos as ta
x, y = ta.templates.datasets.titanic()
x = x[:50]
y = y[:50]
model = ta.templates.models.titanic
p = ta.templates.params.titanic()
ta.Scan(x, y, p, model, round_limit=2)
x, y = ta.templates.datasets.iris()
x = x[:50]
y = y[:50]
model = ta.templates.models.iris
p = ta.templates.params.iris()
ta.Scan(x, y, p, model, round_limit=2)
x, y = ta.templates.datasets.cervical_cancer()
x = x[:50]
y = y[:50]
model = ta.templates.models.cervical_cancer
p = ta.templates.params.cervical_cancer()
ta.Scan(x, y, p, model, round_limit=2)
x, y = ta.templates.datasets.breast_cancer()
x = x[:50]
y = y[:50]
model = ta.templates.models.breast_cancer
p = ta.templates.params.breast_cancer()
ta.Scan(x, y, p, model, round_limit=2)
x, y = ta.templates.datasets.icu_mortality(50)
ta.templates.pipelines.breast_cancer(random_method='quantum')
ta.templates.pipelines.cervical_cancer(random_method='sobol')
ta.templates.pipelines.iris(random_method='uniform_crypto')
ta.templates.pipelines.titanic(random_method='korobov_matrix')
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,198
|
chris8447/talos
|
refs/heads/master
|
/test_script.py
|
#!/usr/bin/env python
import time
import talos as ta
from test.core_tests.test_scan_object import test_scan_object
from test.core_tests.test_reporting_object import test_reporting_object
from test.core_tests.test_random_methods import test_random_methods
from test.core_tests.test_params_object import test_params_object
from test.core_tests.test_auto_scan import test_auto_scan
from test.core_tests.test_templates import test_templates
from talos.utils.generator import generator
from talos.utils.gpu_utils import force_cpu
if __name__ == '__main__':
'''NOTE: test/core_tests/test_scan.py needs to be edited as well!'''
# testing different model types
from test.core_tests.test_scan import BinaryTest, MultiLabelTest
BinaryTest().values_single_test()
BinaryTest().values_list_test()
BinaryTest().values_range_test()
MultiLabelTest().values_single_test()
MultiLabelTest().values_list_test()
MultiLabelTest().values_range_test()
# reporting specific testing
from test.core_tests.test_scan import ReportingTest, DatasetTest
ReportingTest()
DatasetTest()
# MOVE TO command specific tests
# Scan() object tests
scan_object = test_scan_object()
# reporting tests
test_reporting_object(scan_object)
test_params_object()
test_auto_scan()
test_templates()
# create a string for name of deploy file
start_time = str(time.strftime("%s"))
p = ta.Predict(scan_object)
p.predict(scan_object.x)
p.predict_classes(scan_object.x)
ta.Autom8(scan_object, scan_object.x, scan_object.y)
ta.Evaluate(scan_object)
ta.Deploy(scan_object, start_time)
ta.Restore(start_time + '.zip')
test_random_methods()
fit_generator = ta.utils.generator(scan_object.x, scan_object.y, 20)
force_cpu()
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,199
|
chris8447/talos
|
refs/heads/master
|
/talos/__init__.py
|
# import commands
from .scan.Scan import Scan
from .commands.reporting import Reporting
from .commands.predict import Predict
from .commands.deploy import Deploy
from .commands.evaluate import Evaluate
from .commands.restore import Restore
from .commands.autom8 import Autom8
from .commands.params import Params
from .commands.kerasmodel import KerasModel
from . import utils
from . import examples as templates
# the purpose of everything below is to keep the namespace completely clean
del_from_utils = ['best_model', 'connection_check', 'detector',
'exceptions', 'last_neuron', 'load_model', 'validation_split',
'pred_class', 'results', 'string_cols_to_numeric']
for key in del_from_utils:
if key.startswith('__') is False:
delattr(utils, key)
template_sub = [templates.datasets,
templates.models,
templates.params,
templates.pipelines]
keep_from_templates = ['iris', 'cervical_cancer', 'titanic', 'breast_cancer',
'icu_mortality']
for sub in template_sub:
for key in list(sub.__dict__):
if key.startswith('__') is False:
if key not in keep_from_templates:
delattr(sub, key)
del commands, parameters, scan, reducers, model, metrics, key, del_from_utils
del examples, sub, keep_from_templates, template_sub
__version__ = "0.5.0"
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,200
|
chris8447/talos
|
refs/heads/master
|
/talos/parameters/ParamGrid.py
|
import numpy as np
from ..reducers.sample_reducer import sample_reducer
from ..reducers.permutation_filter import permutation_filter
class ParamGrid:
'''Suite for handling parameters internally within Talos
Takes as input the parameter dictionary from the user, and
returns a class object which can then be used to pick parameters
for each round together with other parameter related operations.
'''
def __init__(self, main_self):
self.main_self = main_self
# creates a reference dictionary for column number to label
self.param_reference = {}
for i, col in enumerate(self.main_self.params.keys()):
self.param_reference[col] = i
# convert the input to useful format
self._p = self._param_input_conversion()
# create a list of lists, each list being a parameter sequence
ls = [list(self._p[key]) for key in self._p.keys()]
# get the number of total dimensions / permutations
virtual_grid_size = 1
for l in ls:
virtual_grid_size *= len(l)
final_grid_size = virtual_grid_size
# calculate the size of the downsample
if self.main_self.grid_downsample is not None:
final_grid_size = int(virtual_grid_size * self.main_self.grid_downsample)
# take round_limit into account
if self.main_self.round_limit is not None:
final_grid_size = min(final_grid_size, self.main_self.round_limit)
# create the params grid
self.param_grid = self._create_param_grid(ls,
final_grid_size,
virtual_grid_size)
# handle the case where permutation filter is provided
if self.main_self.permutation_filter is not None:
self = permutation_filter(self,
ls,
final_grid_size,
virtual_grid_size)
# initialize with random shuffle if needed
if self.main_self.shuffle:
np.random.shuffle(self.param_grid)
# create a index for logging purpose
self.param_log = list(range(len(self.param_grid)))
# add the log index to param grid
self.param_grid = np.column_stack((self.param_grid, self.param_log))
def _create_param_grid(self, ls, final_grid_size, virtual_grid_size):
# select permutations according to downsample
if final_grid_size < virtual_grid_size:
out = sample_reducer(self, final_grid_size, virtual_grid_size)
else:
out = range(0, final_grid_size)
# build the parameter permutation grid
param_grid = self._create_param_permutations(ls, out)
return param_grid
def _create_param_permutations(self, ls, permutation_index):
'''Expand params dictionary to permutations
Takes the input params dictionary and expands it to
actual parameter permutations for the experiment.
'''
final_grid = []
for i in permutation_index:
p = []
for l in reversed(ls):
i, s = divmod(int(i), len(l))
p.insert(0, l[s])
final_grid.append(tuple(p))
_param_grid_out = np.array(final_grid, dtype='object')
return _param_grid_out
def _param_input_conversion(self):
'''DETECT PARAM FORMAT
Checks of the hyperparameter input format is list
or tupple in the params dictionary and expands accordingly.
'''
out = {}
for param in self.main_self.params.keys():
# for range/step style input
if isinstance(self.main_self.params[param], tuple):
out[param] = self._param_range(self.main_self.params[param][0],
self.main_self.params[param][1],
self.main_self.params[param][2])
# all other input styles
else:
out[param] = self.main_self.params[param]
return out
def _param_range(self, start, end, n):
'''Deal with ranged inputs in params dictionary
A helper function to handle the cases where params
dictionary input is in the format (start, end, steps)
and is called internally through ParamGrid().
'''
try:
out = np.arange(start, end, (end - start) / n, dtype=float)
# this is for python2
except ZeroDivisionError:
out = np.arange(start, end, (end - start) / float(n), dtype=float)
if type(start) == int and type(end) == int:
out = out.astype(int)
out = np.unique(out)
return out
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,201
|
chris8447/talos
|
refs/heads/master
|
/test/core_tests/test_scan.py
|
#!/usr/bin/env python
from __future__ import print_function
from keras.losses import binary_crossentropy, sparse_categorical_crossentropy
from keras.losses import categorical_crossentropy, mean_squared_error
from keras.optimizers import SGD, Adam, Adadelta, Adagrad
from keras.optimizers import Adamax, RMSprop, Nadam
from keras.activations import relu, sigmoid
from sklearn.model_selection import train_test_split as splt
from talos.scan.Scan import Scan
from talos.commands.reporting import Reporting
import talos as ta
# single values
def values_single_params():
return {'lr': [1],
'first_neuron': [4],
'hidden_layers': [2],
'batch_size': [100],
'epochs': [2],
'dropout': [0],
'shapes': ['brick'],
'optimizer': [Adam],
'losses': [binary_crossentropy,
sparse_categorical_crossentropy,
categorical_crossentropy,
mean_squared_error],
'activation': ['relu'],
'last_activation': ['softmax']}
# lists of values
def values_list_params():
return {'lr': [1, 2],
'first_neuron': [4, 4],
'hidden_layers': [2, 2],
'batch_size': [100, 200],
'epochs': [1, 2],
'dropout': [0, 0.1],
'shapes': ['brick', 'funnel', 'triangle', 0.2],
'optimizer': [Adam, Adagrad, Adamax, RMSprop, Adadelta, Nadam, SGD],
'losses': ['binary_crossentropy',
'sparse_categorical_crossentropy',
'categorical_crossentropy',
'mean_squared_error'],
'activation': ['relu', 'elu'],
'last_activation': ['softmax']}
# range of values
def values_range_params():
return {'lr': (0.5, 5, 10),
'first_neuron': (4, 100, 5),
'hidden_layers': (0, 5, 5),
'batch_size': (200, 300, 10),
'epochs': (1, 5, 4),
'dropout': (0, 0.5, 5),
'shapes': ['funnel'],
'optimizer': [Nadam],
'losses': [binary_crossentropy,
sparse_categorical_crossentropy,
categorical_crossentropy,
mean_squared_error],
'activation': [relu],
'last_activation': [sigmoid]}
"""
The tests below have to serve several purpose:
- test possible input methods to params dict
- test binary, multi class, multi label and continuous problems
- test all Scan arguments
Each problem type is presented as a Class, and contains three
experiments using single, list, or range inputs. There is an
effort to test as many scenarios as possible here, so be
inventive / experiment! Doing well with this part of the testing,
there is a healthy base for a more serious approach to ensuring
procedural integrity.
"""
def get_params(task):
"""
Helper that allows the tests to feed from same
params dictionaries.
USE: values_single, values_list, values_range = get_appropriate_loss(0)
0 = binary
1 = 1d multi class
2 = 2d multi label
3 = continuous / regression
"""
# first create the params dict
values_single = values_single_params()
values_list = values_list_params()
values_range = values_range_params()
# then limit the losses according to prediction task
values_single['losses'] = [values_single_params()['losses'][task]]
values_list['losses'] = [values_list_params()['losses'][task]]
values_range['losses'] = [values_range_params()['losses'][task]]
return values_single, values_list, values_range
class BinaryTest:
def __init__(self):
# read the params dictionary with the right loss
self.values_single, self.values_list, self.values_range = get_params(0)
# prepare the data for the experiment
self.x, self.y = ta.templates.datasets.cervical_cancer()
self.x = self.x[:300]
self.y = self.y[:300]
self.model = ta.templates.models.cervical_cancer
# split validation data
self.x_train, self.x_val, self.y_train, self.y_val = splt(self.x,
self.y,
test_size=0.2)
def values_single_test(self):
print("BinaryTest : Running values_single_test...")
Scan(self.x,
self.y,
params=self.values_single,
model=ta.templates.models.cervical_cancer)
def values_list_test(self):
print("BinaryTest : Running values_list_test...")
Scan(self.x_train,
self.y_train,
x_val=self.x_val,
y_val=self.y_val,
params=self.values_list,
round_limit=5,
dataset_name='BinaryTest',
experiment_no='000',
model=ta.templates.models.cervical_cancer,
random_method='crypto_uniform',
seed=2423,
search_method='linear',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_loss',
reduce_loss=True,
last_epoch_value=True,
clear_tf_session=False,
disable_progress_bar=True,
debug=True)
# comprehensive
def values_range_test(self):
print("BinaryTest : Running values_range_test...")
Scan(self.x_train,
self.y_train,
params=self.values_range,
model=ta.templates.models.cervical_cancer,
grid_downsample=0.0001,
permutation_filter=lambda p: p['first_neuron'] * p['hidden_layers'] < 220,
random_method='sobol',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_acc',
reduce_loss=False,
debug=True)
class MultiLabelTest:
def __init__(self):
# read the params dictionary with the right loss
self.values_single, self.values_list, self.values_range = get_params(2)
self.x, self.y = ta.templates.datasets.iris()
self.x_train, self.x_val, self.y_train, self.y_val = splt(self.x,
self.y,
test_size=0.2)
def values_single_test(self):
print("MultiLabelTest : Running values_single_test...")
Scan(self.x,
self.y,
params=self.values_single,
model=ta.templates.models.iris)
def values_list_test(self):
print("MultiLabelTest : Running values_list_test...")
Scan(self.x,
self.y,
x_val=self.x_val,
y_val=self.y_val,
params=self.values_list,
round_limit=5,
dataset_name='MultiLabelTest',
experiment_no='000',
model=ta.templates.models.iris,
random_method='crypto_uniform',
seed=2423,
search_method='linear',
permutation_filter=lambda p: p['first_neuron'] * p['hidden_layers'] < 9,
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_loss',
reduce_loss=True,
last_epoch_value=True,
clear_tf_session=False,
disable_progress_bar=True,
debug=True)
# comprehensive
def values_range_test(self):
print("MultiLabelTest : Running values_range_test...")
Scan(self.x,
self.y,
params=self.values_range,
model=ta.templates.models.iris,
grid_downsample=0.0001,
random_method='sobol',
reduction_method='correlation',
reduction_interval=2,
reduction_window=2,
reduction_threshold=0.2,
reduction_metric='val_acc',
reduce_loss=False,
debug=True)
class ReportingTest:
def __init__(self):
print("ReportingTest : Running Binary test...")
r = Reporting('BinaryTest_000.csv')
x = r.data
x = r.correlate()
x = r.high()
x = r.low()
x = r.rounds()
x = r.rounds2high()
x = r.best_params()
x = r.plot_corr()
x = r.plot_hist()
x = r.plot_line()
print("ReportingTest : Running MultiLabel test...")
r = Reporting('MultiLabelTest_000.csv')
x = r.data
x = r.correlate()
x = r.high()
x = r.low()
x = r.rounds()
x = r.rounds2high()
x = r.best_params()
x = r.plot_corr()
x = r.plot_hist()
x = r.plot_line()
del x
class DatasetTest:
def __init__(self):
print("DatasetTest : Running tests...")
x = ta.templates.datasets.icu_mortality()
x = ta.templates.datasets.icu_mortality(100)
x = ta.templates.datasets.titanic()
x = ta.templates.datasets.iris()
x = ta.templates.datasets.cervical_cancer()
x = ta.templates.datasets.breast_cancer()
x = ta.templates.params.iris()
x = ta.templates.params.breast_cancer()
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,202
|
chris8447/talos
|
refs/heads/master
|
/talos/reducers/permutation_filter.py
|
def permutation_filter(self, ls, final_grid_size, virtual_grid_size):
'''Handles the filtering for ta.Scan(... permutation_filter= ...)'''
from ..parameters.round_params import create_params_dict
# handle the filtering with the current params grid
def fn(i):
params_dict = create_params_dict(self, i)
fn = self.main_self.permutation_filter(params_dict)
return fn
grid_indices = list(filter(fn, range(len(self.param_grid))))
self.param_grid = self.param_grid[grid_indices]
final_expanded_grid_size = final_grid_size
while len(self.param_grid) < final_grid_size and final_expanded_grid_size < virtual_grid_size:
final_expanded_grid_size *= 2
if final_expanded_grid_size > virtual_grid_size:
final_expanded_grid_size = virtual_grid_size
self.param_grid = self._create_param_grid(ls,
final_expanded_grid_size,
virtual_grid_size)
grid_indices = list(filter(fn, range(len(self.param_grid))))
self.param_grid = self.param_grid[grid_indices]
self.param_grid = self.param_grid[:final_grid_size]
return self
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,203
|
chris8447/talos
|
refs/heads/master
|
/test/core_tests/test_params_object.py
|
import talos as ta
def test_params_object():
'''Tests the object from Params()'''
print('Start testing Params object...')
p = ta.Params()
# without arguments
p.activations()
p.batch_size()
p.dropout()
p.epochs()
p.kernel_initializers()
p.layers()
p.neurons()
p.lr()
p.optimizers()
p.shapes()
p.shapes_slope()
p.automated()
p = ta.Params(replace=False)
# with arguments
p.activations()
p.batch_size(10, 100, 5)
p.dropout()
p.epochs(10, 100, 5)
p.kernel_initializers()
p.layers(12)
p.neurons(10, 100, 5)
p.lr()
p.optimizers('multi_label')
p.shapes()
p.shapes_slope()
p.automated('sloped')
return "Finished testing Params object!"
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,204
|
chris8447/talos
|
refs/heads/master
|
/test/core_tests/test_auto_scan.py
|
import talos as ta
def test_auto_scan():
'''Tests the object from Params()'''
print('Start auto Scan()...')
x, y = ta.templates.datasets.breast_cancer()
x = x[:50]
y = y[:50]
p = ta.Params().params
for key in p.keys():
p[key] = [p[key][0]]
ta.Scan(x, y, p, ta.KerasModel().model,
permutation_filter=lambda p: p['batch_size'] < 150,)
return "Finished testing auto Scan()"
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,205
|
chris8447/talos
|
refs/heads/master
|
/talos/commands/params.py
|
import numpy as np
from keras.optimizers import Adam, Nadam, Adadelta, SGD
loss = {'binary': ['binary_crossentropy', 'logcosh'],
'multi_class': ['sparse_categorical_crossentropy'],
'multi_label': ['categorical_crossentropy'],
'continuous': ['mae']}
last_activation = {'binary': ['sigmoid'],
'multi_class': ['softmax'],
'multi_label': ['softmax'],
'continuous': [None]}
class Params:
def __init__(self,
params=None,
task='binary',
replace=True,
auto=True,
network=True):
'''A facility for generating or appending params dictionary.
params : dict or None
task : str
'binary', 'multi_class', 'multi_label', or 'continuous'
replace : bool
Replace current dictionary entries with new ones.
auto : bool
Automatically generate or append params dictionary with
all available parameters.
network : bool
Adds several network architectures as parameters. This is to be
used as an input together with KerasModel(). If False then only
'dense' will be added.
'''
self.task = task
self.replace = replace
self.network = network
if params is None:
self.params = {}
else:
self.params = params
if auto:
self.automated()
def automated(self, shapes='fixed'):
'''Automatically generate a comprehensive
parameter dict to be used in Scan()
shapes : string
Either 'fixed' or 'sloped'
'''
if shapes == 'fixed':
self.shapes()
else:
self.shapes_slope()
self.layers()
self.dropout()
self.optimizers()
self.activations()
self.neurons()
self.losses()
self.batch_size()
self.epochs()
self.kernel_initializers()
self.lr()
if self.network:
self.networks()
else:
self.params['network'] = 'dense'
self.last_activations()
def shapes(self):
'''Uses triangle, funnel, and brick shapes.'''
self._append_params('shapes', ['triangle', 'funnel', 'brick'])
def shapes_slope(self):
'''Uses a single decimal float for values below 0.5 to
reduce the width of the following layer.'''
self._append_params('shapes', np.arange(0, .6, 0.1).tolist())
def layers(self, max_layers=6):
self._append_params('hidden_layers', list(range(max_layers)))
def dropout(self):
'''Dropout from 0.0 to 0.75'''
self._append_params('dropout', np.round(np.arange(0, .85, 0.1), 2).tolist())
def optimizers(self, task='binary'):
'''Adam, Nadam, SGD, and adadelta.'''
self._append_params('optimizer', [Adam, Nadam, Adadelta, SGD])
def activations(self):
self._append_params('activation', ['relu', 'elu'])
def losses(self):
self._append_params('losses', loss[self.task])
def neurons(self, bottom_value=8, max_value=None, steps=None):
'''max_value and steps has to be either None or
integer value at the same time.'''
if max_value is None and steps is None:
values = [int(np.exp2(i)) for i in range(3, 11)]
else:
values = range(bottom_value, max_value, steps)
self._append_params('first_neuron', values)
def batch_size(self, bottom_value=8, max_value=None, steps=None):
'''max_value and steps has to be either None or
integer value at the same time.'''
if max_value is None and steps is None:
values = [int(np.exp2(i/2)) for i in range(3, 15)]
else:
values = range(bottom_value, max_value, steps)
self._append_params('batch_size', values)
def epochs(self, bottom_value=50, max_value=None, steps=None):
'''max_value and steps has to be either None or
integer value at the same time.'''
if max_value is None and steps is None:
values = [int(np.exp2(i/2))+50 for i in range(3, 15)]
else:
values = range(bottom_value, max_value, steps)
self._append_params('epochs', values)
def kernel_initializers(self):
self._append_params('kernel_initializer',
['glorot_uniform', 'glorot_normal',
'random_uniform', 'random_normal'])
def lr(self):
a = np.round(np.arange(0.01, 0.2, 0.02), 3).tolist()
b = np.round(np.arange(0, 1, 0.2), 2).tolist()
c = list(range(0, 11))
self._append_params('lr', a + b + c)
def networks(self):
'''Adds four different network architectures are parameters:
dense, simplernn, lstm, conv1d.'''
self._append_params('network', ['dense',
'simplernn',
'lstm',
'bidirectional_lstm',
'conv1d'])
def last_activations(self):
self._append_params('last_activation', last_activation[self.task])
def _append_params(self, label, values):
if self.replace is False:
try:
self.params[label]
except KeyError:
self.params[label] = values
else:
self.params[label] = values
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,206
|
chris8447/talos
|
refs/heads/master
|
/talos/commands/kerasmodel.py
|
import numpy as np
from talos.model.layers import hidden_layers
from talos.model.normalizers import lr_normalizer
from keras.models import Sequential
from keras.layers import Dropout, Flatten
from keras.layers import LSTM, Conv1D, SimpleRNN, Dense, Bidirectional
try:
from wrangle.reshape_to_conv1d import reshape_to_conv1d as array_reshape_conv1d
except ImportError:
from wrangle import array_reshape_conv1d
class KerasModel:
def __init__(self):
'''An input model for Scan(). Optimized for being used together with
Params(). For example:
Scan(x=x, y=y, params=Params().params, model=KerasModel().model)
NOTE: the grid from Params() is very large, so grid_downsample or
round_limit accordingly in Scan().
'''
self.model = self._create_input_model
def _create_input_model(self, x_train, y_train, x_val, y_val, params):
model = Sequential()
if params['network'] != 'dense':
x_train = array_reshape_conv1d(x_train)
x_val = array_reshape_conv1d(x_val)
if params['network'] == 'conv1d':
model.add(Conv1D(params['first_neuron'], x_train.shape[1]))
model.add(Flatten())
elif params['network'] == 'lstm':
model.add(LSTM(params['first_neuron']))
if params['network'] == 'bidirectional_lstm':
model.add(Bidirectional(LSTM(params['first_neuron'])))
elif params['network'] == 'simplernn':
model.add(SimpleRNN(params['first_neuron']))
elif params['network'] == 'dense':
model.add(Dense(params['first_neuron'],
input_dim=x_train.shape[1],
activation='relu'))
model.add(Dropout(params['dropout']))
# add hidden layers to the model
hidden_layers(model, params, 1)
# output layer (this is scetchy)
try:
last_neuron = y_train.shape[1]
except IndexError:
if len(np.unique(y_train)) == 2:
last_neuron = 1
else:
last_neuron = len(np.unique(y_train))
model.add(Dense(last_neuron,
activation=params['last_activation']))
# bundle the optimizer with learning rate changes
optimizer = params['optimizer'](lr=lr_normalizer(params['lr'],
params['optimizer']))
# compile the model
model.compile(optimizer=optimizer,
loss=params['losses'],
metrics=['acc'])
# fit the model
out = model.fit(x_train, y_train,
batch_size=params['batch_size'],
epochs=params['epochs'],
verbose=0,
validation_data=[x_val, y_val])
# pass the output to Talos
return out, model
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,207
|
chris8447/talos
|
refs/heads/master
|
/talos/scan/scan_run.py
|
from tqdm import tqdm
from datetime import datetime
from ..utils.results import result_todf, peak_epochs_todf
from .scan_round import scan_round
from .scan_finish import scan_finish
def scan_run(self):
'''The high-level management of the scan procedures
onwards from preparation. Manages round_run()'''
# initiate the progress bar
self.pbar = tqdm(total=len(self.param_log),
disable=self.disable_progress_bar)
# start the main loop of the program
while len(self.param_log) != 0:
self = scan_round(self)
self.pbar.update(1)
if self.time_limit is not None:
if datetime.now() > self._stoptime:
print("Time limit reached, experiment finished")
break
self.pbar.close()
# save the results
self = result_todf(self)
self.peak_epochs_df = peak_epochs_todf(self)
self = scan_finish(self)
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,208
|
chris8447/talos
|
refs/heads/master
|
/talos/utils/__init__.py
|
# In this init we load everything under utils in the Talos namespace
try:
from kerasplotlib import TrainingLog as live
except ImportError:
print('Matplotlib backend loading failed')
from ..model.normalizers import lr_normalizer
from ..model.layers import hidden_layers
from ..model.early_stopper import early_stopper
from .generator import generator
from . import gpu_utils
import talos.metrics.keras_metrics as metrics
|
{"/talos/scan/Scan.py": ["/talos/scan/scan_run.py"], "/test/core_tests/test_templates.py": ["/talos/__init__.py"], "/test_script.py": ["/talos/__init__.py", "/test/core_tests/test_params_object.py", "/test/core_tests/test_auto_scan.py", "/test/core_tests/test_templates.py", "/test/core_tests/test_scan.py"], "/talos/__init__.py": ["/talos/scan/Scan.py", "/talos/commands/params.py", "/talos/commands/kerasmodel.py"], "/talos/parameters/ParamGrid.py": ["/talos/reducers/permutation_filter.py"], "/test/core_tests/test_scan.py": ["/talos/scan/Scan.py", "/talos/__init__.py"], "/test/core_tests/test_params_object.py": ["/talos/__init__.py"], "/test/core_tests/test_auto_scan.py": ["/talos/__init__.py"]}
|
37,209
|
elecro/antlerinator
|
refs/heads/master
|
/tests/test_install.py
|
# Copyright (c) 2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import pytest
import subprocess
import sys
import antlerinator
def run_antlr():
cmd = ('java', '-jar', antlerinator.antlr_jar_path)
proc = subprocess.Popen(cmd)
proc.communicate()
assert proc.returncode == 0
def run_install(args, exp_ok):
cmd = (sys.executable, '-m', 'antlerinator.install') + args
proc = subprocess.Popen(cmd)
proc.communicate()
if exp_ok:
assert proc.returncode == 0
else:
assert proc.returncode != 0
def test_cli():
run_install(args=('--force', ), exp_ok=True)
run_install(args=('--lazy', ), exp_ok=True)
run_install(args=(), exp_ok=False)
run_antlr()
def test_api():
antlerinator.install(force=True)
antlerinator.install(lazy=True)
with pytest.raises(FileExistsError):
antlerinator.install()
run_antlr()
|
{"/tests/test_install.py": ["/antlerinator/__init__.py"], "/antlerinator/__init__.py": ["/antlerinator/install.py"]}
|
37,210
|
elecro/antlerinator
|
refs/heads/master
|
/antlerinator/install.py
|
# Copyright (c) 2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import errno
import json
import pkgutil
import urllib.request
from argparse import ArgumentParser
from os import makedirs
from os.path import dirname, exists, expanduser, join
config = json.loads(pkgutil.get_data(__package__, 'config.json').decode('ascii'))
__version__ = config['version']
antlr_jar_path = join(expanduser('~'), '.antlerinator', config['tool_name'])
def install(*, force=False, lazy=False):
"""
Download the ANTLR v4 tool jar. (Raises :exception:`FileExistsError` if jar
is already available, unless ``lazy`` is ``True``.)
:param bool force: Force download even if local jar already exists.
:param bool lazy: Don't report an error if local jar already exists and
don't try to download it either.
"""
if exists(antlr_jar_path):
if lazy:
return
if not force:
raise FileExistsError(errno.EEXIST, 'file already exists', antlr_jar_path)
tool_url = config['tool_url']
with urllib.request.urlopen(tool_url) as response:
tool_jar = response.read()
makedirs(dirname(antlr_jar_path), exist_ok=True)
with open(antlr_jar_path, mode='wb') as tool_file:
tool_file.write(tool_jar)
def execute():
"""
Entry point of the install helper tool to ease the download of the right
version of the ANTLR v4 tool jar.
"""
arg_parser = ArgumentParser(description='Install helper tool to download the right version of the ANTLR v4 tool jar.')
arg_parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
mode_group = arg_parser.add_mutually_exclusive_group()
mode_group.add_argument('-f', '--force', action='store_true', default=False,
help='force download even if local antlr4.jar already exists')
mode_group.add_argument('-l', '--lazy', action='store_true', default=False,
help='don\'t report an error if local antlr4.jar already exists and don\'t try to download it either')
args = arg_parser.parse_args()
install(force=args.force, lazy=args.lazy)
if __name__ == '__main__':
execute()
|
{"/tests/test_install.py": ["/antlerinator/__init__.py"], "/antlerinator/__init__.py": ["/antlerinator/install.py"]}
|
37,211
|
elecro/antlerinator
|
refs/heads/master
|
/antlerinator/__init__.py
|
# Copyright (c) 2017 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from .install import __version__, antlr_jar_path, install
__all__ = [
'__version__',
'antlr_jar_path',
'install',
]
|
{"/tests/test_install.py": ["/antlerinator/__init__.py"], "/antlerinator/__init__.py": ["/antlerinator/install.py"]}
|
37,212
|
elecro/antlerinator
|
refs/heads/master
|
/setup.py
|
# Copyright (c) 2017-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import json
from os.path import dirname, join
from setuptools import setup, find_packages
with open(join(dirname(__file__), 'antlerinator', 'config.json'), 'r') as f:
config = json.load(f)
runtime_req = config['runtime_req']
version = config['version']
setup(
name='antlerinator',
version=version,
packages=find_packages(),
url='https://github.com/renatahodovan/antlerinator',
license='BSD',
author='Renata Hodovan, Akos Kiss',
author_email='hodovan@inf.u-szeged.hu, akiss@inf.u-szeged.hu',
description='ANTLeRinator',
long_description=open('README.rst').read(),
install_requires=[runtime_req, 'typing; python_version<"3.5"'],
zip_safe=False,
include_package_data=True,
entry_points={
'console_scripts': [
'antlerinator-install = antlerinator.install:execute'
]
},
)
|
{"/tests/test_install.py": ["/antlerinator/__init__.py"], "/antlerinator/__init__.py": ["/antlerinator/install.py"]}
|
37,216
|
outbreak-info/covid_imperial_college
|
refs/heads/main
|
/__init__.py
|
from .dump import ImperialDumper
from .upload import ImperialCollegeUploader
|
{"/__init__.py": ["/dump.py"]}
|
37,217
|
outbreak-info/covid_imperial_college
|
refs/heads/main
|
/dump.py
|
import os
import biothings, config
biothings.config_for_app(config)
from config import DATA_ARCHIVE_ROOT
import biothings.hub.dataload.dumper
class ImperialDumper(biothings.hub.dataload.dumper.DummyDumper):
SRC_NAME = "covid_imperial_college"
__metadata__ = {
"src_meta": {
"author":{
"name": "Ginger Tsueng",
"url": "https://github.com/gtsueng"
},
"code":{
"branch": "master",
"repo": "https://github.com/gtsueng/covid_imperial_college.git"
},
"url": "https://www.imperial.ac.uk/mrc-global-infectious-disease-analysis/covid-19/",
"license": "https://www.imperial.ac.uk/research-and-innovation/support-for-staff/scholarly-communication/open-access/oa-policy/"
}
}
# override in subclass accordingly
SRC_ROOT_FOLDER = os.path.join(DATA_ARCHIVE_ROOT, SRC_NAME)
SCHEDULE = "15 14 * * 1" # mondays at 14:15UTC/7:15PT
|
{"/__init__.py": ["/dump.py"]}
|
37,218
|
outbreak-info/covid_imperial_college
|
refs/heads/main
|
/parser.py
|
import requests
from bs4 import BeautifulSoup
import json
from datetime import datetime
import re
def create_curationObject():
now = datetime.now()
curatedBy = {
"@type": "Organization",
"identifier": "imperialcollege",
"url": "http://www.imperial.ac.uk/mrc-global-infectious-disease-analysis/covid-19/covid-19-reports/",
"name": "MRC Centre for Global Infectious Disease Analysis",
"affiliation": [{"name":"Imperial College London"}],
"curationDate":now.strftime("%Y-%m-%d")
}
return(curatedBy)
def get_report_links(reports_url):
recordlist = requests.get(reports_url)
spiralbase = "https://spiral.imperial.ac.uk"
parsedrecordlist = BeautifulSoup(recordlist.text, "html.parser")
urlstable = parsedrecordlist.findAll("table")[0]
urlstublist = urlstable.findAll("a")
url_list = []
for eachlink in urlstublist:
tmpurl = spiralbase+eachlink.get("href")
url_list.append(tmpurl)
return(url_list)
def get_meta_content(metacontentfield):
if len(metacontentfield) == 1:
metacontentlist = metacontentfield[0].get("content")
else:
metacontentlist = []
for eachitem in metacontentfield:
metaitem = eachitem.get("content")
metacontentlist.append(metaitem)
return(metacontentlist)
def transform_pub_meta(soupobject):
urlfield = soupobject.findAll("meta", {"name":"citation_pdf_url"})
url = get_meta_content(urlfield)
titlefield = soupobject.findAll("meta", {"name":"citation_title"})
title = get_meta_content(titlefield)
datePublishedfield = soupobject.findAll("meta", {"name":"citation_date"})
datePublished = get_meta_content(datePublishedfield)
abstractfield = soupobject.findAll("meta", {"name":"DCTERMS.abstract"})
abstract = get_meta_content(abstractfield)
defaultidurlfield = soupobject.findAll("meta", {"scheme":"DCTERMS.URI"})
defaultid = get_meta_content(defaultidurlfield)
tmpdict = {
"@context": {
"schema": "http://schema.org/",
"outbreak": "https://discovery.biothings.io/view/outbreak/"
},
"@type": "Publication",
"journalName": "Imperial College London",
"journalNameAbbreviation": "imperialcollege",
"publicationType": "Report",
"abstract":abstract,
"name":title,
"datePublished":datePublished,
"url":url,
"identifier":defaultid
}
keywordsfield = soupobject.findAll("meta", {"name":"DC.subject"})
if len(keywordsfield)>0:
keywordsobject = get_meta_content(keywordsfield)
tmpdict["keywords"] = keywordsobject
licensefield = soupobject.findAll("meta", {"name":"DC.rights"})
if len(licensefield)>0:
license = get_meta_content(licensefield)
tmpdict["license"] = license
identifiersfield = soupobject.findAll("meta", {"name":"DC.identifier"})
for eachitem in identifiersfield:
eachitemcontent = eachitem.get("content")
if "doi" in eachitemcontent:
doi = eachitemcontent.replace("https://doi.org/","")
tmpdict["identifier"] = "icl_"+doi.split('/', 1)[-1]
tmpdict["doi"] = doi
elif "10." in eachitemcontent:
doi = eachitemcontent
tmpdict["identifier"] = "icl_"+doi.split('/', 1)[-1]
tmpdict["doi"] = doi
tmpdict['_id'] = tmpdict["identifier"]
return(tmpdict)
def get_authors(soupobject):
authorsfield = soupobject.findAll("meta", {"name":"citation_author"})
authors = get_meta_content(authorsfield)
authorlist = []
for eachauthor in authors:
authparse = eachauthor.split(",")
if (len(authparse) == 2) and len(authparse[1])<3:
authdict = {'@type': 'outbreak:Person', 'affiliation': [], 'name': eachauthor,
'familyName':authparse[0]}
else:
authdict = {'@type': 'outbreak:Person', 'affiliation': [], 'name': eachauthor}
authorlist.append(authdict)
return(authorlist)
def generate_funding_dict(funder,identifier=None):
fundict = {"@type": "MonetaryGrant",
"funder": {"name": funder},
"name": ""
}
if identifier != None:
fundict["identifier"]=identifier
return(fundict)
def get_funding(soupobject):
fundersfield = soupobject.findAll("meta", {"name":"DC.contributor"})
funders = get_meta_content(fundersfield)
fundercheck = len(fundersfield)
if fundercheck > 0:
identifiersfield = soupobject.findAll("meta", {"name":"DC.identifier"})
fundidlist = []
for eachitem in identifiersfield:
eachitemcontent = eachitem.get("content")
if ("https:" in eachitemcontent) or ("http:" in eachitemcontent):
miscurls = eachitemcontent
else:
fundingid = eachitemcontent
fundidlist.append(fundingid)
fundlist = []
i=0
if len(funders)==len(fundidlist): ## There are the same amount of funders as ids
while i < len(funders):
fundict = generate_funding_dict(funders[i],fundidlist[i])
fundlist.append(fundict)
i=i+1
elif len(funders)>len(fundidlist): ## There are more funders than ids, map the MR ones, then ignore ids
mrfunds = [x for x in funders if "MRC" in x]
mrids = [x for x in fundidlist if "MR" in x]
while i < len(mrfunds):
fundict = generate_funding_dict(mrfunds[i],mrids[i])
fundlist.append(fundict)
i=i+1
remaining_funders = [x for x in funders if x not in mrfunds]
remaining_fundids = [x for x in fundidlist if x not in mrids]
j=0
if (len(remaining_fundids)==0) and (len(remaining_funders)>0):
while j<len(remaining_funders):
fundict = generate_funding_dict(remaining_funders[j])
fundlist.append(fundict)
j=j+1
else: ##There are more ids than funders, and it will be impossible to map them
while i < len(funders):
fundict = generate_funding_dict(funders[i])
fundlist.append(fundict)
i=i+1
fundflag = True
else:
fundlist = []
fundflag = False
return(fundlist, fundflag)
def create_id(description_text):
words = description_text.lower().split()
letters = [word[0] for word in words]
identifier = "icl_"+"".join(e for e in letters if e.isalnum())
return(identifier)
def transform_resource_meta(metaobject):
baseurl = "http://www.imperial.ac.uk"
tmpdict = {
"@context": {
"schema": "http://schema.org/",
"outbreak": "https://discovery.biothings.io/view/outbreak/"
},
"author": [{
"@type": "Organization",
"name": 'Imperial College COVID-19 Response Team',
"affiliation": [{"name":"MRC Centre for Global Infectious Disease Analysis"},
{"name":"Imperial College London"}]
}]
}
tmpdict['name'] = metaobject.find("h3",{"class":"title"}).get_text()
tmpdict['description'] = metaobject.find("p").get_text()
tmpdict['identifier'] = create_id(tmpdict['description'])
tmpdict['_id'] = tmpdict['identifier']
basetype = metaobject.find("span",{"class":"link primary"}).get_text()
try:
tmpurl = metaobject.find("a").get("href")
if "http" in tmpurl:
url = tmpurl
else:
url = baseurl+tmpurl
except AttributeError:
url = None
try:
basedate = re.findall("\(\d{2}\-\d{2}\-\d{4}\)", tmpdict['description'])[0].strip("(").strip(")")
datetime_object = datetime.strptime(basedate, '%d-%m-%Y')
datePublished = datetime_object.strftime("%Y-%m-%d")
except:
datePublished = "Not Available"
if "data" in basetype:
tmpdict['@type'] = "Dataset"
tmpdict['datePublished'] = datePublished
if url:
tmpdict['distribution'] = {
"contentUrl": url,
"dateModified": datePublished
}
tmpdict['species']: "Homo sapiens"
tmpdict['infectiousAgent']: "SARS-CoV-2"
elif "code" in basetype:
tmpdict['@type'] = "SoftwareSourceCode"
if url:
tmpdict['downloadUrl'] = url
tmpdict['datePublished'] = datePublished
elif "survey" in basetype:
tmpdict['@type'] = "Protocol"
if url:
tmpdict['url'] = url
tmpdict['datePublished'] = datePublished
tmpdict['protocolSetting'] = "public"
tmpdict["protocolCategory"] = "protocol"
if "for \"Report" in tmpdict['description']:
report_check = tmpdict['description'].replace("for \"Report","for|Report").split("|")
citedByTitle = report_check[1].replace('"','')
tmpdict['citedBy'] = {"name": citedByTitle,
"type": "Publication"}
return(tmpdict)
def get_reports():
reports_url = 'https://spiral.imperial.ac.uk/handle/10044/1/78555/simple-search?location=10044%2F1%2F78555&query=&filter_field_1=type&filter_type_1=equals&filter_value_1=Report&rpp=100&sort_by=dc.date.issued_dt&order=DESC&etal=0&submit_search=Update'
url_list = get_report_links(reports_url)
curatedBy = create_curationObject()
for each_url in url_list:
record_result = requests.get(each_url)
parsed_record = BeautifulSoup(record_result.text, "html.parser")
base_info = transform_pub_meta(parsed_record)
base_info["curatedBy"] = curatedBy
author_list = get_authors(parsed_record)
fund_list, fund_flag = get_funding(parsed_record)
## Create the Json
base_info["author"] = author_list
if fund_flag == True:
base_info["funding"] = fund_list
yield(base_info)
def get_resources():
curatedBy = create_curationObject()
url = 'http://www.imperial.ac.uk/mrc-global-infectious-disease-analysis/covid-19/covid-19-scientific-resources/'
response = requests.get(url)
parsedlisting = BeautifulSoup(response.text, "html.parser")
resourceclass = parsedlisting.findAll("div", {"class": "media-item full light-secondary reverse equal-height"})
resourcelist = []
for eachblock in resourceclass:
tmpdict = transform_resource_meta(eachblock)
tmpdict["curatedBy"] = curatedBy
yield(tmpdict)
def get_analyses():
baseurl = 'http://www.imperial.ac.uk'
curatedBy = create_curationObject()
analysislisturl = 'http://www.imperial.ac.uk/mrc-global-infectious-disease-analysis/covid-19/covid-19-planning-tools/'
analysisresponse = requests.get(analysislisturl)
analysislisting = BeautifulSoup(analysisresponse.text, "html.parser")
analysisclass = analysislisting.findAll("div", {"class": "media-item full light-secondary reverse equal-height"})
for eachblock in analysisclass:
tmpdict = {
"@context": {
"schema": "http://schema.org/",
"outbreak": "https://discovery.biothings.io/view/outbreak/"
},
"author": [{
"@type": "Organization",
"name": 'Imperial College COVID-19 Response Team',
"affiliation": [{"name":"MRC Centre for Global Infectious Disease Analysis"},
{"name":"Imperial College London"}]
}]
}
tmpdict['name'] = eachblock.find("h3",{"class":"title"}).get_text()
tmpdict['@type'] = 'Analysis'
tmpurl = eachblock.find("a").get("href")
tmpdict['species'] = "Homo sapiens"
tmpdict['infectiousAgent'] = "SARS-CoV-2"
tmpdict['infectiousDisease'] = "COVID-19"
tmpdict['description'] = eachblock.find("p").get_text()
tmpdict['identifier'] = create_id(tmpdict['description'])
tmpdict['_id'] = tmpdict['identifier']
tmpdict["curatedBy"] = curatedBy
if "http" in tmpurl:
tmpdict['url'] = tmpurl
else:
tmpdict['url'] = baseurl+tmpurl
tmpdict['datePublished'] = '0000-00-00'
yield(tmpdict)
def load_annotations():
report_list = get_reports()
yield from(report_list)
resource_list = get_resources()
yield from(resource_list)
analyses_list = get_analyses()
yield from(analyses_list)
|
{"/__init__.py": ["/dump.py"]}
|
37,219
|
HALINA9000/ai.exp.0001.cat_detector
|
refs/heads/master
|
/data.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 10 15:20:07 2018
@author: tom.s (at) halina9000.com
"""
import numpy as np
import h5py
import os
def load_data(data_path='datasets', reshape=True, normalize=True):
"""
Load, reshape and normalize train and test data.
Parameters
----------
data_path : str, optional
Path where dataset files are located.
reshape : bool, optional
If True data will be reshaped to form
(m, height * width * number of channels).
normalize : bool, optional
If True data will be normalized in range (0, 1).
Returns
-------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
test_x : np.array(float)
Test `x` set (test features).
test_y : np.array(int)
Test `y` set (test labels).
"""
train_datafile = os.path.join(data_path, 'train_catvnoncat.h5')
test_datafile = os.path.join(data_path, 'test_catvnoncat.h5')
train_dataset = h5py.File(train_datafile, 'r')
test_dataset = h5py.File(test_datafile, 'r')
train_x = np.array(train_dataset['train_set_x'][:])
train_y = np.array(train_dataset['train_set_y'][:])
test_x = np.array(test_dataset['test_set_x'][:])
test_y = np.array(test_dataset['test_set_y'][:])
train_dataset.close()
test_dataset.close()
if reshape:
train_x = train_x.reshape(train_x.shape[0], -1)
test_x = test_x.reshape(test_x.shape[0], -1)
if normalize:
train_x = train_x / 255.
test_x = test_x / 255.
return train_x, train_y, test_x, test_y
def show_data_stats(set_y):
"""
Show basic dataset statistics.
Total amount of images, amount of images with cat and percent
of cat images in dataset.
Parameters
----------
set_y : np.array(int)
Training or test `y` set (labels).
Returns
-------
set_size : int
Total amount of images in dataset.
set_amount : int
Amount of cat images in dataset.
set_cat_percent : int
Percent of cat images in dataset.
"""
set_size = set_y.shape[0]
cat_amount = np.sum(set_y)
cat_percent = np.int(cat_amount / set_size * 100)
return cat_percent, cat_amount, set_size
|
{"/catDetector.py": ["/data.py", "/model.py", "/presentation.py"]}
|
37,220
|
HALINA9000/ai.exp.0001.cat_detector
|
refs/heads/master
|
/model.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 10 15:14:47 2018
@author: tom.s (at) halina9000.com
"""
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.initializers import RandomUniform
from keras.callbacks import Callback
from keras.optimizers import TFOptimizer, Adam
from keras import regularizers
from time import time
import os
import h5py
class BestAccs(Callback):
"""Saves weights after epoch if accuracies are better than previously.
Criterion `better` is defined as follows:
- both accuracies are >= min_accs,
- difference between accuracies is <= diff,
- current minimum accuracy (training or test) is better than previous one.
# Arguments
filepath (str): filepath (file name and directory) for saving
weights.
min_accs (float): minimum value of both acc and val_acc.
Default: 0.0.
diff (float): maximum difference between acc and val_acc.
Default: 1.0
"""
def __init__(self, filepath, min_accs=0.0, diff=1.0):
super(BestAccs, self).__init__()
self.filepath = filepath
self.min_accs = min_accs
self.diff = diff
def on_train_begin(self, logs={}):
self.best_acc = 0.0
def on_epoch_end(self, epoch, logs={}):
acc = logs.get('acc')
val_acc = logs.get('val_acc')
epoch_acc = np.minimum(acc, val_acc)
if epoch_acc > np.max(self.best_acc):
if np.absolute(acc - val_acc) <= self.diff:
self.best_acc = epoch_acc
if epoch_acc >= self.min_accs:
filepath = self.filepath.format(epoch_acc, **logs)
self.model.save_weights(filepath, overwrite=True)
def best_batch_size(train_x, train_y, epochs=200):
"""
Determine optimal batch size.
Parameters
----------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
epochs : int, optional
Number of epochs.
Returns
-------
batch_size : int
Size of most efficient (fastest) batch size.
batch_exe_time: list
List of execution time for given range of batch size.
"""
lr = 0.005
optimizer = TFOptimizer(tf.train.GradientDescentOptimizer(lr))
batch_size_limit = int(np.log2(train_x.shape[0])) + 1
batch_size_set = [2**x for x in range(5, batch_size_limit + 1)]
model = Sequential()
model.add(Dense(1,
kernel_initializer='zeros',
bias_initializer='zeros',
input_dim=train_x.shape[1],
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Measeure of execution time for each batch_size
batches_exe_time = []
for batch_size in batch_size_set:
time_start = time()
model.fit(train_x, train_y,
epochs=epochs,
batch_size=batch_size,
verbose=0)
time_end = time() - time_start
batches_exe_time.append([time_end, batch_size])
batches_exe_time.sort()
batch_size = batches_exe_time[0][1]
return batch_size, batches_exe_time
def course_assignment(train_x, train_y, test_x, test_y,
file_output_path,
initializer='zeros', batch_size=256):
"""
First Coursera Deep Learning course programming assignment in Keras.
Training of single neuron with sigmoid activation function and set
of images. Our goal is to teach that neuron to recognize images with cat.
Parameters
----------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
test_x : np.array(float)
Test `x` set (test features).
test_y : np.array(int)
Test `y` set (test labels).
file_output_path : str
Path to store h5 files generated by custom BestAccs callback.
initializer : str, optional
Type of kernel and bias initializer.
batch_size : int, optional
Size of batch (amount of samples) for model fitting.
Returns
-------
history : keras.callbacks.History object
History of loss, accuracy, validation loss and validation
accuracy during model fitting.
"""
if not os.path.exists(file_output_path):
os.makedirs(file_output_path)
# Define form of h5 file name prefix
metrics = '{:.2f}-{acc:.2f}-{val_acc:.2f}'
filename = metrics + '-' + initializer + '.h5'
path_and_filename = os.path.join(file_output_path, '')
path_and_filename += filename # workaround of os.path.join issue
best_accs = BestAccs(path_and_filename, min_accs=0.7, diff=0.02)
lr = 0.005
optimizer = TFOptimizer(tf.train.GradientDescentOptimizer(lr))
model = Sequential()
model.add(Dense(1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_dim=train_x.shape[1],
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(train_x, train_y,
epochs=2000,
callbacks=[best_accs],
validation_data=(test_x, test_y),
batch_size=batch_size,
verbose=0)
return history
def compare_kernels(files, file_output_path):
"""
Compare two kernels saved as h5 files.
Load kernels from h5 files and then calculate norm of kernels, norm
of vector difference between kernels and the angle between kernels.
Parameters
----------
kernel_files : list(str)
List of two kernel h5 files to compare.
file_output_path : str
Path where kernel h5 files were saved.
Returns
-------
norms : list(float)
List with norms of both compared kernels and norm of vector
difference between them.
angle : float
Angle (radians) between two compared kernels.
"""
kernels = []
# Load kernel and bias from both best files
for file in files:
path_and_filename = os.path.join(file_output_path, file)
h5f = h5py.File(path_and_filename, 'r')
list_of_names = []
h5f.visit(list_of_names.append)
kernel = h5f[list_of_names[3]].value
h5f.close()
kernels.append(kernel)
# Norms of kernel and norm of difference between kernels
norms = []
norms.append(np.linalg.norm(kernels[0]))
norms.append(np.linalg.norm(kernels[1]))
norms.append(np.linalg.norm(kernels[0] - kernels[1]))
# Angle between kernels
norms_product = np.linalg.norm(kernels[0]) * np.linalg.norm(kernels[1])
angle_cos = np.dot(kernels[0].T, kernels[1])/ norms_product
# Sometimes due to float type accuracy angle_cos becomes greater than 1
if angle_cos > 1.0:
angle_cos = 1.0
angle = float(np.arccos(angle_cos))
return norms, angle
def best_learning_rate(train_x, train_y, test_x, test_y, lr, batch_size=256):
"""
Perform 10 random initializations of model, then compile and fit it.
Model is initialized randomly (random_uniform), compiled and fitted.
Operation is repeated 10 times. Then all histories are returned as a list.
Parameters
----------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
test_x : np.array(float)
Test `x` set (test features).
test_y : np.array(int)
Test `y` set (test labels).
lr : float
Learning rate.
batch_size : int, optional
Size of batch (amount of samples) for model fitting.
Returns
-------
history_set : list(keras.callbacks.History object)
History of loss, accuracy, validation loss and validation
accuracy during model fitting.
"""
optimizer = TFOptimizer(tf.train.GradientDescentOptimizer(lr))
history_set = []
for i in range(10):
model = Sequential()
initializer = RandomUniform(minval=-1.0, maxval=1.0, seed=None)
model.add(Dense(1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_dim=train_x.shape[1],
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(train_x, train_y,
epochs=1000,
validation_data=(test_x, test_y),
batch_size=batch_size,
verbose=0)
history_set.append(history)
return history_set
def sampling_hypersurface(train_x, train_y, test_x, test_y,
file_output_path,
suffix = '',
batch_size=256,
iterations=1000,
verbose=1):
"""
Sampling hypersurface by different random initialization.
Parameters
----------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
test_x : np.array(float)
Test `x` set (test features).
test_y : np.array(int)
Test `y` set (test labels).
file_output_path : str
Path to store h5 files generated by custom BestAccs callback.
suffix: str, optional
File name suffix.
batch_size : int, optional
Size of batch (amount of samples) for model fitting.
iterations : int, optional
Defines how many times defining, compilation and fitting
procedure has to be executed.
verbose : int, optional
Verbosity level: 0 or 1. 0 means quite run, 1 means progress
will be shown.
"""
if not os.path.exists(file_output_path):
os.makedirs(file_output_path)
# Define form of h5 file name prefix
metrics = '{:.3f}-{acc:.3f}-{val_acc:.3f}'
initializer = RandomUniform(minval=-1.0, maxval=1.0, seed=None)
lr = 0.1
optimizer = TFOptimizer(tf.train.GradientDescentOptimizer(lr))
for iteration in range(iterations):
filename = metrics + '-iteration-' + str(iteration) + suffix + '.h5'
path_and_filename = os.path.join(file_output_path, '')
path_and_filename += filename # workaround of os.path.join issue
best_accs = BestAccs(path_and_filename, min_accs=0.88, diff=0.04)
model = Sequential()
model.add(Dense(1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_dim=train_x.shape[1],
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.fit(train_x, train_y,
epochs=1000,
callbacks=[best_accs],
validation_data=(test_x, test_y),
batch_size=batch_size,
verbose=0)
if verbose == 1:
if (iteration + 1) % 100 == 0:
end = '\n'
else:
end = ''
if (iteration + 1) % 10 == 0:
print('|', end = end)
else:
print('.', end = end)
#%%
def best_model_evaluate(train_x, train_y, test_x, test_y,
file_output_path, file_name,
batch_size=256):
"""
Evaluate accuracy on training and test set.
Parameters
----------
train_x : np.array(float)
Training `x` set (training features).
train_y : np.array(int)
Training `y` set (training labels).
test_x : np.array(float)
Test `x` set (test features).
test_y : np.array(int)
Test `y` set (test labels).
file_output_path : str
Path to store h5 files generated by custom BestAccs callback.
file_name : str
Name of file that contains weights
batch_size : int, optional
Size of batch (amount of samples) for model fitting.
Returns
-------
acc_train : float
Accuracy on training set.
acc_test : float
Accuracy on test set.
"""
initializer = 'zeros'
lr = 0.01
optimizer = TFOptimizer(tf.train.GradientDescentOptimizer(lr))
model = Sequential()
model.add(Dense(1,
kernel_initializer=initializer,
bias_initializer=initializer,
input_dim=train_x.shape[1],
activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
file_output_path = 'samplingHypersurface'
model.load_weights(os.path.join(file_output_path, file_name))
metrics_train = model.evaluate(train_x, train_y, batch_size=256, verbose=0)
acc_train = metrics_train[1]
metrics_test = model.evaluate(test_x, test_y, batch_size=256, verbose=0)
acc_test = metrics_test[1]
return acc_train, acc_test
|
{"/catDetector.py": ["/data.py", "/model.py", "/presentation.py"]}
|
37,221
|
HALINA9000/ai.exp.0001.cat_detector
|
refs/heads/master
|
/catDetector.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 10 13:02:16 2018
@author: tom.s (at) halina9000.com
Caviar strategy in single neuron training - analysis of Coursera cat detector.
Usage of caviar strategy with Early Stopping.
Wider description in my blog entry:
http://www.halina9000.com/blog-0002-Results-of-caviar-strategy-in-single-neuron-training-further-analysis-of-Coursera-cat-detector.html
"""
#%%
"""Load libraries."""
import os
from itertools import combinations
import numpy as np
from data import load_data, show_data_stats
from model import best_batch_size, course_assignment, compare_kernels
from model import best_learning_rate, sampling_hypersurface
from model import best_model_evaluate
from presentation import history_plot
#%%
"""Load datasets"""
train_x, train_y, test_x, test_y = load_data()
#%%
"""Show basic statistics for both training and test datasets."""
cat_percent, cat_amount, set_size = show_data_stats(train_y)
stat_output = 'Cat images in training set: %2d%% (%2d/%2d).'
print(stat_output % (cat_percent, cat_amount, set_size))
cat_percent, cat_amount, set_size = show_data_stats(test_y)
stat_output = 'Cat images in test set: %2d%% (%2d/%2d).'
print(stat_output % (cat_percent, cat_amount, set_size))
#%%
"""Determine best batch size."""
batch_size, batches_exe_time = best_batch_size(train_x, train_y)
print('%5s %8s' % ('size:', 'time:'))
print(5 * '-', 8 * '-')
for exe_time, size in batches_exe_time:
print('%5d %8.4f' % (size, exe_time))
print('Most efficient batch size is:', batch_size)
#%%
"""Recreate in Keras original course assignment with its results."""
file_output_path = 'originalAssignment'
history = course_assignment(train_x, train_y,
test_x, test_y,
file_output_path,
batch_size=batch_size)
chart_title = 'Original course assignment results'
history_plot([history], file_output_path, chart_title)
#%%
"""Course assignment with random initialization."""
history = course_assignment(train_x, train_y,
test_x, test_y,
file_output_path,
initializer='random_uniform',
batch_size=batch_size)
chart_title = 'Modified course assignment results'
history_plot([history], file_output_path, chart_title)
#%%
"""Find best weights for zero and random initialization saved by BestAccs."""
suffix = 'zeros.h5'
weight_zero = [f for f in os.listdir(file_output_path) if f[15:] == suffix]
best_weight_zero = weight_zero[-1]
print('Best weight with zero initialization in', best_weight_zero, 'file.')
suffix = 'random_uniform.h5'
weight_random = [f for f in os.listdir(file_output_path) if f[15:] == suffix]
best_weight_random = weight_random[-1]
print('Best weight with random initialization in', best_weight_random, 'file.')
#%%
"""Quick review of the result: zero vs. random initialization."""
norms, angle = compare_kernels([best_weight_zero, best_weight_random],
file_output_path)
print('Norm of kernel with zeros initialization: %.4f' % norms[0])
print('Norm of kernel with random initialization: %.4f' % norms[1])
print('Norm of vector difference between them: %.4f' % norms[2])
print('Angle between kernels (rad): %.4f' % angle)
#%%
"""Finding learning rate that gives most unstable charts."""
lr_set = [0.1, 0.01, 0.001, 0.0001]
for lr in lr_set:
history_set = best_learning_rate(train_x, train_y,
test_x, test_y,
lr=lr,
batch_size=batch_size)
file_output_path = 'learningRateTuning'
chart_title = 'Learning rate: ' + str(lr)
history_plot(history_set, file_output_path, chart_title)
#%%
"""Sampling hypersurface with random initialization (uniform)."""
file_output_path = 'samplingHypersurface'
sampling_hypersurface(train_x, train_y, test_x, test_y,
file_output_path,
batch_size=batch_size,
iterations=1000)
#%%
"""File with best weights."""
files = [f for f in os.listdir(file_output_path)]
files.sort(reverse=True)
print(files[0])
#%%
"""Final results of best file."""
acc_train, acc_test = best_model_evaluate(train_x, train_y, test_x, test_y,
file_output_path, files[0],
batch_size=batch_size)
print('Accuracy on training set: %.3f' % acc_train)
print('Accuracy on test set: %.3f' % acc_test)
#%%
"""Analysis of best weights"""
# Files with accuracy greater equal to 90%
prefixes = ['0.9', '1.0']
files_90 = [f for f in files if f[:3] in prefixes]
# Iteration has to be unique - given iteration sometimes gives multiple files
iterations = []
files_90_unique = []
for file in files_90:
if file[25:] not in iterations:
iterations.append(file[25:])
files_90_unique.append(file)
angles = []
norms = []
diffs = []
for file_1, file_2 in combinations(files_90_unique, 2):
[norm_1, norm_2, diff], angle = compare_kernels([file_1, file_2],
file_output_path)
angles.append(angle)
norms.append([norm_1, norm_2])
diffs.append(diff)
print('Angles between vectors')
print('Minimum: %.4f' % np.min(angles))
print('Maximum: %.4f' % np.max(angles), end='\n\n')
print('Norm of vectors')
print('Minimum: %.4f' % np.min(norms))
print('Maximum: %.4f' % np.max(norms), end='\n\n')
print('Norm of difference between vectors')
print('Minimum: %.4f' % np.min(diffs))
print('Maximum: %.4f' % np.max(diffs))
|
{"/catDetector.py": ["/data.py", "/model.py", "/presentation.py"]}
|
37,222
|
HALINA9000/ai.exp.0001.cat_detector
|
refs/heads/master
|
/presentation.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 10 15:14:47 2018
@author: tom.s (at) halina9000.com
"""
import matplotlib.pyplot as plt
import numpy as np
import os
def history_plot(history_set,
file_output_path, chart_title,
y_axis_min=0.0, y_axis_max=1.0,
acc=True, val_acc=True,
alpha_acc=0.7, alpha_val_acc=0.7):
"""
Summarize history of model(s).
Generates history of model(s) accuracy as a plot and saves it to file.
Parameters
----------
history_set : list
Set of model history.
file_output_path : str
Path to store chart.
chart_title : str
Title of chart. Also used as filename.
y_axis_min : float, optional
Lower limit of y axis.
y_axis_max : float, optional
Upper limit of y axis.
acc : bool, optional
Defines if accuracy on training set should be present
on chart.
val_acc : bool, optional
Defines if accuracy on test set should be present on chart.
alpha_acc : float, optional
Value of alpha for accuracy on training set plot.
Range 0.0 - 1.0.
alpha_val_acc : float
Value of alpha for accuracy on test set plot.
Range 0.0 - 1.0.
"""
if not os.path.exists(file_output_path):
os.makedirs(file_output_path)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))
ax.xaxis.grid(linestyle='dotted', color='#000000')
ax.yaxis.grid(linestyle='dotted', color='#000000')
y_axis_tick = (y_axis_max - y_axis_min) / 10.
plt.yticks(np.arange(y_axis_min, y_axis_max, step=y_axis_tick))
ax.set_ylim(y_axis_min, y_axis_max)
plt.title('Model accuracy' + '\n' + chart_title)
plt.ylabel('accuracy')
plt.xlabel('epoch')
# Plot accuracy chart for training (blue line) and test (orange line)
for history in history_set:
if acc:
plt.plot(history.history['acc'],
c='#1976D2',
linewidth=1,
alpha=alpha_acc)
if val_acc:
plt.plot(history.history['val_acc'],
c='#FF9800',
linewidth=1,
alpha=alpha_val_acc)
# Saving chart as file
chart_title = chart_title.replace('-', '')
chart_title = chart_title.replace(':', '')
chart_title = chart_title.replace(' ', '_')
path_and_filename = os.path.join(file_output_path, chart_title + '.svg')
plt.savefig(path_and_filename)
plt.close()
|
{"/catDetector.py": ["/data.py", "/model.py", "/presentation.py"]}
|
37,233
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/help/token.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/17 2:03 PM'
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import JSONWebSignatureSerializer
from functools import wraps
from .http_error_code import retrun_error
import traceback
from flask import current_app
from global_reference import get_env
def get_token(message, expires=None):
'''
:param message: 生成token的数据,后续可以解析
:param expires: 超时时间 默认3600秒
:return:
'''
s = Serializer(secret_key=get_env("SECRET_KEY"), salt=get_env("SALT"), expires_in=expires)
return s.dumps(message).decode()
class TokenAuth(Serializer):
def __init__(self, token_func=None, token_name=None, bind=True):
super(TokenAuth, self).__init__(secret_key=get_env("SECRET_KEY"), salt=get_env("SALT"))
self.token_func = token_func
self.token_name = token_name or "access_token"
self.bind = bind
def loads_token(self, token):
try:
payload, header = JSONWebSignatureSerializer.loads(self, token, return_header=True)
if 'exp' not in header:
retrun_error(4001)
if not (isinstance(header['exp'], int) and header['exp'] > 0):
retrun_error(4002)
if header['exp'] < self.now():
retrun_error(4003)
return payload
except:
traceback.print_exc()
retrun_error(4004)
def __call__(self, func):
@wraps(func)
def call(*args, **kwargs):
if callable(self.token_func):
token = self.token_func(self.token_name)
else:
token = self.token_func
if not token:
return retrun_error(4005)
payload = self.loads_token(token)
if payload is None:
retrun_error(4005)
if self.bind:
return func(*args, payload=payload, **kwargs)
else:
return func(*args, **kwargs)
return call
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,234
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/start.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/14 3:58 PM'
from global_reference import app,db
if __name__ == '__main__':
db.create_all()
app.run()
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,235
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_route/user/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/14 4:46 PM'
from flask import Blueprint
from base.base_api import BaseApi
from global_reference import app
user_buleprint = Blueprint("user",__name__)
user_api = BaseApi(user_buleprint)
from . import user
app.register_blueprint(user_buleprint,url_prefix="/user")
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,236
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_model/model_user/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/11/9 3:57 PM'
from ..model_base import TableBase
from global_reference import db
class User(db.Model, TableBase):
_log_user = False
name = db.Column(db.String(32), comment="用户名称", nullable=True, index=True)
passwd = db.Column(db.String(32), comment="密码MD5", nullable=True)
head = db.Column(db.String(64), comment="头像图片地址")
email = db.Column(db.String(128), comment="注册邮箱", nullable=True, index=True)
is_use = db.Column(db.Boolean, comment="是否激活")
def __init__(self, name, passwd, email, *args, **kwargs):
TableBase.__init__(self, *args, **kwargs)
self.name = name
self.passwd = passwd
self.email = email
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,237
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/help/http_error_code.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/17 9:24 AM'
from global_reference import HttpError
error_code = {
200: "请求成功",
405: "未定义的路由请求方式",
1001: "登录异常,用户名为空",
1002: "登录异常,密码错误",
1003: "您还没有注册,请先登记一下吧!",
1004: "您的账号尚未激活,请联系管理员",
2001: "参数不完整,缺少必填参数",
2002: "参数不合法,检测失败",
2003: "参数不允许为空",
3001: "两次输入的密码不一致",
3002: "用户名已存在",
3003: "邮箱已存在",
3004: "账号已存在",
4001: "登录凭证异常,超时时间丢失,请重新登录",
4002: "登录凭证异常,非法的超时时间,请重新登录",
4003: "登录凭证异常,超时,请重新登录",
4004: "登录凭证异常,解析错误,请重新登录",
4005: "登录凭证异常,访问拒绝,请登录",
}
def retrun_error(code, message=None):
message = message or error_code.get(code, "未知错误")
raise HttpError(code=code, message=message)
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,238
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_route/admin/admin.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/24 9:12 AM'
from . import admin_api
from flask_restful import Resource, request
from flask_migrate import Migrate, migrate, init, upgrade
from global_reference import db, app
import os
from base.help.token import TokenAuth
Migrate(app, db)
@admin_api.resource("/dbUpgrade")
class AdminDbUpgrade(Resource):
@TokenAuth(token_func=lambda x: request.args.get(x))
def get(self, payload):
if not os.path.exists("db_back"):
init(directory="db_back")
migrate(directory="db_back")
upgrade(directory="db_back")
return "database {} update success!".format(db.engine.url.database)
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,239
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/default_config.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/22 8:10 PM'
SQLALCHEMY_DATABASE_URI = "mysql://root:root@127.0.0.1:3306/test" # 配置数据库连接地址
SECRET_KEY = "cd17cb5e-fa95-45bd-98b8-fd6a8dd45957"
SALT = "GooDoss"
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,240
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/help/flask_params.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/24 1:18 PM'
from flask import request
class ArgumentError(Exception):
def __init__(self, flag, comment, field):
'''参数检查错误类 用于传递到error_func 交由 调用方处理此错误
:param flag: 错误标识 not_fund(参数缺失) type_error(参数类型错误) is_None(不允许为None值)
:type flag str
:param comment: 错误描述
:type comment str
'''
self.flag = flag
self.comment = comment
self.field = field
class Rule(object):
def __init__(self, field, rule_funcs, location=("json", "form"), error_func=None, call_back=None, require=False,
nullable=True):
'''
:param field: 字段名称
:type field str
:param rule_funcs: 参数校验方法 校验通过返回 True 反之返回False
:type rule_funcs [function,]
:param error_func: 参数校验不通过后的回调方法
:type error_func function
:param call_back: 参数校验通过之后的回调方法
:type call_back: function
:param require: 是否为必须参数,True field必传 且执行rule_func校验 False 参数非必传 如果不传递field则不会进行rule_func校验
但仍然会调用 call_back
:type require: bool
:param nullable 是否允许为None值
:type nullable bool
:param location flask.request 中的域 json,form,values
:type location (str,) ("json","values")
'''
self.field = field
self.rule_funcs = rule_funcs
self.location = location
self.error_func = error_func
self.call_back = call_back
self.require = require
self.nullable = nullable
self.not_find = False
def set_default(self, **kwargs):
self.error_func = self.error_func or kwargs.get("error_func")
self.call_back = self.call_back or kwargs.get("call_back")
self.require = self.require or kwargs.get("require")
self.nullable = self.nullable or kwargs.get("nullable")
def _run(self):
field_value = self.get_field_value()
if self.require and self.not_find:
self.error_func(ArgumentError("not_fund", "field{0}缺失".format(self.field), self.field))
return False, field_value
if field_value is None and not self.nullable:
self.error_func(ArgumentError("is_None", "field{0}不允许为None".format(self.field), self.field))
return False, field_value
for func in self.rule_funcs:
if not callable(func):
continue
if not func(field_value):
self.error_func(ArgumentError("type_error", "field{0},检查失败,不允许的规则".format(self.field), self.field))
return False, field_value
return True, field_value
def run(self):
_ok, field_value = self._run()
if callable(self.call_back):
return self.call_back(_ok, field_value)
return field_value
def get_field_value(self):
self.not_find = False
for scope in self.location:
if not hasattr(request, scope):
continue
local = getattr(request, scope)
if local is None:
continue
if self.field not in local:
continue
return local.get(self.field)
self.not_find = True
class Rules(object):
def __init__(self, *rules, **kwargs):
self.rules = rules
# for rule in self.rules:
# rule.set_default(**kwargs)
def run(self):
params = {}
for rule in self.rules:
params[rule.field] = rule.run()
return params
def phone(field_value):
pass
import re
def email(field_value):
return re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}$', field_value)
def rule_len(n=-1, m=-1):
def _len(field_value):
if n >= 0 and len(field_value) < n:
return False
if len(field_value) > m >= 0:
return False
return True
return _len
def type_of(type):
def _type(field_value):
return
return _type
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,241
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/lib/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/11/9 4:40 PM'
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,242
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/global_reference.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description 在此实例化全局引用的内容,例如全局的数据库连接配置'''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/11/9 4:02 PM'
import os
from flask import Flask, request
import json
# 实例化Flask类
app = Flask(__name__)
@app.before_request
def before_request():
'''注册在请求响应之前的方法
将template与static修改到jinja的查找路径中
'''
if request.blueprint is not None:
bp = app.blueprints[request.blueprint]
if bp.jinja_loader is not None:
newsearchpath = bp.jinja_loader.searchpath + app.jinja_loader.searchpath
app.jinja_loader.searchpath = newsearchpath
else:
app.jinja_loader.searchpath = app.jinja_loader.searchpath[-1:]
else:
app.jinja_loader.searchpath = app.jinja_loader.searchpath[-1:]
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
app.config.from_mapping({
"APP_NAME": "main",
"PROJECT": "app",
"PROJECT_ROOT": os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"LOG_FOLDER": os.path.join(PROJECT_ROOT, 'log'),
"SQLALCHEMY_TRACK_MODIFICATIONS": False
})
# 加载用户配置
app.config.from_pyfile("default_config.py")
def get_env(name, default=None):
if name in os.environ:
return os.environ[name]
elif name in app.config:
return app.config[name]
else:
return default
# 设置跨域访问
from flask_cors import CORS
CORS(app, supports_credentials=True)
# 初始化数据库
from flask_sqlalchemy import SQLAlchemy
# 将pymysql映射成MySQLdb
from pymysql import install_as_MySQLdb
install_as_MySQLdb()
db = SQLAlchemy(app, session_options={"autocommit": False, "autoflush": False, })
# 全局自定义错误类
class HttpError(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
def __str__(self):
return "{0}-{1}".format(self.code,self.message)
import traceback
def module_load():
for user_module in _get_modules_path():
try:
__import__(user_module)
except ImportError:
traceback.print_exc()
return
def _get_modules_path():
return ["base." + path for path in os.listdir("base") if
os.path.isdir("base/" + path) and ("." not in path)]
module_load()
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,243
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_route/user/user.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/14 4:42 PM'
from . import user_api
from flask_restful import Resource
from base.base_model.model_user import User
from flask import request
from base.help.http_error_code import retrun_error
from global_reference import db
import hashlib
from sqlalchemy import or_
from base.help import token
from base.help.flask_params import Rules, Rule, rule_len, email
from functools import partial
@user_api.resource("/login")
class UserLogin(Resource):
def post(self):
user_name = request.form.get("user_name", "")
passwd = request.form.get("passwd", "")
if not user_name:
raise retrun_error(1001)
user_recode = db.session.query(User.name, User.passwd,User.is_use).filter(User.name == user_name).one_or_none()
if user_recode is None: # 检测用户是否存在
return retrun_error(1003)
if not user_recode.is_use: # 检测用户是否被激活
return retrun_error(1004)
passwd_md5 = hashlib.new("md5", passwd.encode()).hexdigest()
if passwd_md5 != user_recode.passwd: # 校验密码
return retrun_error(1002)
return {"access_token": token.get_token({"name": user_name})}
@user_api.resource("/register")
class UserRegister(Resource):
nickname_rule = partial(Rule, field="nickname", location=("form", "json"), rule_funcs=[rule_len(6, 20)],
error_func=user_api.error_func, require=True, nullable=False)
email_rule = partial(Rule, field="email", location=("form", "json"), rule_funcs=[email],
error_func=user_api.error_func, require=True, nullable=False)
password_rule = partial(Rule, field="password", location=("form", "json"), rule_funcs=[rule_len(6, 12)],
error_func=user_api.error_func, require=True, nullable=False)
repass_rule = partial(Rule, field="repass", location=("form", "json"), rule_funcs=[rule_len(6, 12)],
error_func=user_api.error_func, require=True, nullable=False)
def post(self):
params = Rules(self.nickname_rule(), self.email_rule(), self.password_rule(), self.repass_rule()).run()
if params["password"] != params["repass"]:
return retrun_error(3001)
pwd_md5 = hashlib.new("md5", params["password"].encode()).hexdigest()
already_user = db.session.query(User.name, User.email).filter(
or_(User.name == params["nickname"], User.email == params["email"])).one_or_none()
if already_user is not None:
if already_user.name == params["nickname"]:
return retrun_error(3002,message="用户{0}已注册".format(params["nickname"]))
if already_user.email == params["email"]:
return retrun_error(3003, message="邮箱{0}已注册".format(params["email"]))
return retrun_error(3004)
user = User(name=params["nickname"], passwd=pwd_md5, email=params["email"])
db.session.add(user)
db.session.commit()
return "用户{0}创建完成".format(params["nickname"])
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,244
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_api.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/22 8:48 PM'
from flask_restful import Api
from flask import jsonify
from global_reference import HttpError, db
from base.help.http_error_code import retrun_error
from flask.app import MethodNotAllowed
from base.help.flask_params import ArgumentError
import traceback
class BaseApi(Api):
def handle_error(self, e):
try:
if isinstance(e, HttpError):
return self.error_response(code=e.code, message=e.message)
elif isinstance(e, MethodNotAllowed):
return self.error_response(code=405, message="未经允许的访问方式")
traceback.print_exc()
return self.error_response(code=500, message=str(e))
finally:
db.session.rollback()
db.session.close()
def error_response(self, code, message):
return jsonify({"code": code, "message": message, "data": None})
def make_response(self, data, *args, **kwargs):
try:
default = {"code": 200, "message": "请求成功", "data": data}
resp = super(BaseApi, self).make_response(default, *args, **kwargs)
return resp
finally:
db.session.rollback()
db.session.close()
@staticmethod
def error_func(e: ArgumentError):
if e.flag == "not_fund":
return retrun_error(2001, message="缺少必填参数{0}".format(e.field))
elif e.flag == "type_error":
return retrun_error(2002, message="参数{0}不合法,检测失败".format(e.field))
else:
return retrun_error(2003, message="参数{0}不允许为空".format(e.field))
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,245
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_model/model_base/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description 数据库模型基础类'''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/11/12 8:40 AM'
from global_reference import db
from sqlalchemy.ext.declarative import AbstractConcreteBase, declared_attr, declarative_base
Base = declarative_base()
class TableBase(AbstractConcreteBase, Base):
_log_access = True # 是否添加时间类字段
_log_user = True # 是否添加用户字段
@declared_attr
def id(cls):
return db.Column(db.Integer, primary_key=True, autoincrement=True, comment="The primary key of table")
@declared_attr
def create_time(cls):
if cls._log_access:
return db.Column(db.DateTime, comment="Create on datetime")
return None
@declared_attr
def update_time(cls):
if cls._log_access:
return db.Column(db.DateTime, comment="Update on datetime")
return None
@declared_attr
def create_user(cls):
if cls._log_user:
return db.Column(db.Integer, db.ForeignKey("user.id"), comment="create by user")
return None
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
return {'polymorphic_identity': cls.__name__, 'concrete': True} if cls.__name__ != "TableBase" else {}
def __init__(self, *args, **kwargs):
self.create_user = kwargs.get("user_id")
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,246
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/help/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/17 9:21 AM'
from .http_error_code import retrun_error
from .token import get_token, TokenAuth
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,247
|
wjshan/base_package
|
refs/heads/dev/dandan
|
/base/base_route/admin/__init__.py
|
# -*- coding: utf-8 -*-
# (C) shan weijia, 2018
# All rights reserved
'''Description '''
__author__ = 'shan weijia <shanweijia@jiaaocap.com>'
__time__ = '2018/12/24 9:12 AM'
from flask import Blueprint
from base.base_api import BaseApi
from global_reference import app
admin_buleprint = Blueprint("admin",__name__)
admin_api = BaseApi(admin_buleprint)
from . import admin
app.register_blueprint(admin_buleprint,url_prefix="/admin")
|
{"/base/help/token.py": ["/base/help/http_error_code.py", "/global_reference.py"], "/start.py": ["/global_reference.py"], "/base/base_route/user/__init__.py": ["/base/base_api.py", "/global_reference.py"], "/base/base_model/model_user/__init__.py": ["/base/base_model/model_base/__init__.py", "/global_reference.py"], "/base/help/http_error_code.py": ["/global_reference.py"], "/base/base_route/admin/admin.py": ["/base/base_route/admin/__init__.py", "/global_reference.py", "/base/help/token.py"], "/base/base_route/user/user.py": ["/base/base_route/user/__init__.py", "/base/base_model/model_user/__init__.py", "/base/help/http_error_code.py", "/global_reference.py", "/base/help/__init__.py", "/base/help/flask_params.py"], "/base/base_api.py": ["/global_reference.py", "/base/help/http_error_code.py", "/base/help/flask_params.py"], "/base/base_model/model_base/__init__.py": ["/global_reference.py"], "/base/help/__init__.py": ["/base/help/http_error_code.py", "/base/help/token.py"], "/base/base_route/admin/__init__.py": ["/base/base_api.py", "/global_reference.py"]}
|
37,250
|
halliganbs/candida_project
|
refs/heads/main
|
/clean.py
|
import pandas as pd
import numpy as np
from progress.bar import Bar
from os import listdir
from os.path import isfile, join
from search import get_name
from join import join
from find import find_missing
PATH_TO_META = 'data/meta/'
PATH_OUT = 'out/'
# df.loc[df.ID == 103, 'FirstName'] = "Matt"
# creates list of files
# meta = listdir(PATH_TO_META)
# # Clean Data
# df = pd.DataFrame()
# print('Finding Names:')
# for plate in meta:
# # create a dataframe and list rows missing compound names
# df, missing = find_missing(PATH_TO_META+plate)
# # get CATALOG id number
# cat_num = df.loc[missing, 'CATALOG']
# bar = Bar(plate, max=len(cat_num))
# for c in cat_num:
# df.loc[df.CATALOG == c, 'COMPOUND_NAME'] = get_name(c)
# bar.next()
# bar.finish()
# df.to_csv(PATH_OUT+plate)
def get_meta_data(plate):
df, missing = find_missing(PATH_TO_META+plate)
cat_num = df.loc[missing, 'CATALOG']
bar = Bar('Finding Loss data', max=len(cat_num))
for c in cat_num:
df.loc[df.CATALOG == c, 'COMPOUND_NAME'] = get_name(c)
bar.next()
bar.finish()
df.to_csv(PATH_OUT+plate, index=False)
get_meta_data('Stock_Plate70012.csv')
|
{"/clean.py": ["/search.py", "/join.py", "/find.py"], "/data_explorer.py": ["/find.py", "/join.py"], "/find.py": ["/search.py"]}
|
37,251
|
halliganbs/candida_project
|
refs/heads/main
|
/search.py
|
'''
search sellcheck for compounds
'''
import pandas as pd
import numpy as np
import re # REEEEEE
import requests
from time import sleep # kill me
REGEX = '(?:<a\ href="/products/)(.*)(?:\.html">)'
LINK = 'https://www.selleckchem.com/search.html?searchDTO.searchParam='
OK = 200
TIMEOUT = 429
FORTYFIVE_SECONDS = 45
'''
cat_num : Catalouge number
reg : regex for displayed html
link : website search link
'''
def get_name(cat_num, reg=REGEX, link=LINK):
path = link+cat_num
name = ""
request = requests.get(path)
code = request.status_code
if code == OK:
page = request.text
out = re.search(reg, page)
name = out.group(1)
elif code == TIMEOUT:
sleep(FORTYFIVE_SECONDS)
name = get_name(cat_num=cat_num) # oh yeah baby recursion
else:
print(f'STATUS CODE: {code}')
name = "MISSING"
return(name)
|
{"/clean.py": ["/search.py", "/join.py", "/find.py"], "/data_explorer.py": ["/find.py", "/join.py"], "/find.py": ["/search.py"]}
|
37,252
|
halliganbs/candida_project
|
refs/heads/main
|
/data_explorer.py
|
import numpy as np
import pandas as pd
from find import find_missing
from join import join
# df= pd.read_csv('joined/70003.csv')
df = pd.read_csv('data/Candida2ndBatch_allResults.tsv', sep='\t')
dt = pd.read_csv('data/Well_vs_Cond.csv')
print(df.columns)
print(df.shape)
print()
print(dt.columns)
print(dt.shape)
# NOTE: Stock_Plate70012.csv S3768 is not in selleckchem database
# Stock_plate70011.csv S2023 missing
# Candid2nd wellLocation
# Well v Cond LC_WELLID
# temp = join(meta='data/Well_vs_Cond.csv',instrument='data/Candida2ndBatch_allResults.tsv',
# metaID='LC_WELLID',instID='wellLocation')
temp = dt.join(df.set_index('wellLocation'), on='LC_WellID')
temp.to_csv('test.csv')
print(temp.columns)
print(temp.shape)
|
{"/clean.py": ["/search.py", "/join.py", "/find.py"], "/data_explorer.py": ["/find.py", "/join.py"], "/find.py": ["/search.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.