index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
31,812
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/utils/process_sentence.py
|
from __future__ import print_function
from nltk.tokenize import word_tokenize
import train_utils
import math
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from nltk.tokenize import word_tokenize
# import matplotlib.pyplot as plt
def tokenize_text(text, length = 1000):
# _tokenizer = nltk.tokenize.RegexpTokenizer('\w')
tokens = word_tokenize(text.lower())
if len(tokens) > length:
tokens = tokens[:length]
tokens_ = []
for word in tokens:
if re_obj.search(word):
tokens_.append(word)
return tokens_
def statistic_web():
#dict_url = train_utils.extract_text()
length_list = []
thefile = open('../data/length_list.out','w')
with open('../data/train.pairs') as train_file:
# lines = train_file.readlines()
for line in train_file:
print(line)
pairs = line.strip().split()
en_url = pairs[0]
fr_url = pairs[1]
en_text = train_utils.get_doc_by_url(en_url)
fr_text = train_utils.get_doc_by_url(fr_url)
# length_list.append(len(en_text))
thefile.write(str(len(en_text.split())))
thefile.write('\t')
thefile.write(str(len(fr_text.split())))
thefile.write('\n')
thefile.close()
# length_list.append(len(fr_text))
# return length_list
def analysis():
length_en_list = []
length_fr_list = []
flag = False
with open('../data/length_list.out') as length_list:
for length in length_list:
length = length.split()
# if flag == False:
# print math.log10(float(length))
length_en_list.append(length[0])
# length_en_list.append(math.log10(float(length)))
# flag = True
# else:
length_fr_list.append(length[1])
# length_fr_list.append(math.log10(float(length)))
# flag = False
x = [x for x in range(len(length_en_list))]
dislist = []
count = 0
alpha = 0.4
for en,fr in zip(length_en_list, length_fr_list):
en = en.strip()
fr = fr.strip()
dis = abs(int(en) - int(fr))
dis_for = (int(en) * alpha + int(fr) * alpha) /2
if (dis > dis_for):
print(int(en),int(fr))
count += 1
dislist.append(dis)
print(count)
# plt.plot(x,dislist,'r')
# plt.show()
if __name__ == "__main__":
analysis()
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,813
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/data/decodefile.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import gzip
import sys
from collections import namedtuple
Page = namedtuple("Page", "url, html, text, mime_type, encoding, lang")
def read_lett_iter(f, decode = True):
pass
def decode_file(file):
fh = file
flag = False
if file.endswith('gz'):
print 'ok'
flag = True
#fh = gzip.GzipFile(fileobj = fh, mode = 'r')
if flag:
f = gzip.open(fh)
else:
f = open(fh)
for line in f:
lang, mime, enc, url, html, text = line.split("\t")
html = base64.b64decode(html)
text = base64.b64decode(text)
html = html.decode("utf-8")
text = text.decode("utf-8")
p = Page(url, html, text, mime, enc, lang)
yield p
if __name__ == "__main__":
#write_file = "bugadacargnel.com.lett.decode"
write_file = "schackportalen.nu.lett.decode"
wf = open(write_file, 'w')
count =1
for i in decode_file("schackportalen.nu.lett.gz"):
if i.lang == 'fr' or i.lang == 'en':
count +=1
wf.write(i.url.encode('utf-8'))
wf.write("\t")
wf.write(i.html.encode('utf-8'))
wf.write("\t")
wf.write("-----------text------------")
wf.write(i.text.encode('utf-8'))
wf.write("-----------text------------")
wf.write("\t")
wf.write(i.mime_type.encode('utf-8'))
wf.write("\t")
wf.write(i.encoding.encode('utf-8'))
wf.write("\t")
wf.write(i.lang.encode('utf-8'))
wf.write("\n")
wf.write("--------------devide---------------")
wf.write("\n")
#print i.url, i.html, i.text, i.mime_type, i.encoding, i.lang
wf.close()
print count
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,814
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/configs/config.py
|
import os
CORPORA_DIR = '../data/'
CORPUS_ENG = '../data/eng_text.out'
CORPUS_FR = '../data/fr_text.out'
TEST_DIR = '../data/test/lett.test'
TEST_DIR_UNZIP = '../data/test'
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,815
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/lib/model_cnn.py
|
# from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Lambda
from keras.layers.embeddings import Embedding
from keras.layers.convolutional import Convolution1D
from keras.layers import Merge
from keras import backend as K
import sys
import os
# import pickle
import re
import json
import time
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import wordVec
import utils.train_utils
import utils.process_sentence as process_sentence
max_features = 5000
word_len = 1000
maxlen = 1000
batch_size = 32
embedding_dims = 200
nb_filter = 250
filter_length = 2
hidden_dims = 250
nb_epoch = 1
re_obj = re.compile('[a-zA-Z]')
def prapare_train():
en_vec_dict = wordVec.load_wordVec_mem('../data/envec2.txt')
fr_vec_dict = wordVec.load_wordVec_mem('../data/frvec2.txt')
dict_url_text, dict_url_en, dict_url_fr = utils.train_utils.extract_text()
train_file = open('../data/train_matrix.out','w')
# test_file = open('../data/test_matrix.out','w')
X_train = []
Y_train = []
X_test = []
Y_test = []
count_num = 0
with open('../data/train_data.pairs') as train_lines:
for line in train_lines:
line.split()
Y_train.append(line[0])
text = dict_url_text.setdefault(line[1], None)
if text is not None:
#if isinstance(text, unicode):
text = text.replace('\n','\t')
text = process_sentence.tokenize_text(text)
else:
#print en_url
text = []
en_text_list = text
x_ = []
count = 0
for word in en_text_list:
if re_obj.search(word) and count < 1000:
x_.append(en_vec_dict.setdefault(word, [0]*200))
count += 1
if len(x_) < 1000:
for i in range(1000-len(x_)):
x_.append([0]*200)
text = dict_url_text.setdefault(line[2], None)
if text is not None:
#if isinstance(text, unicode):
text = text.replace('\n','\t')
text = process_sentence.tokenize_text(text)
else:
#print en_url
text = []
fr_text_list = text
# x_ = []
count = 0
for word in fr_text_list:
if re_obj.search(word) and count < 1000:
x_.append(en_vec_dict.setdefault(word, [0]*200))
count += 1
if len(x_) < 2000:
for i in range(2000-len(fr_text_list)):
x_.append([0]*200)
if count_num > 3:
break
count_num += 1
X_train.append(x_)
json.dump(X_train, train_file,encoding='utf-8')
print 'json train ok'
with open('../data/dev_data.pairs') as train_lines:
for line in train_lines:
line.split()
Y_test.append(line[0])
text = dict_url_text.setdefault(line[1], None)
if text is not None:
#if isinstance(text, unicode):
text = text.replace('\n','\t')
text = process_sentence.tokenize_text(text)
else:
#print en_url
text = []
en_text_list = text
x_ = []
count = 0
for word in en_text_list:
if re_obj.search(word) and count < 1000:
x_.append(en_vec_dict.setdefault(word, [0]*200))
count += 1
if len(x_) < 1000:
for i in range(1000-len(x_)):
x_.append([0]*200)
text = dict_url_text.setdefault(line[2], None)
if text is not None:
#if isinstance(text, unicode):
text = text.replace('\n','\t')
text = process_sentence.tokenize_text(text)
else:
#print en_url
text = []
fr_text_list = text
# x_ = []
count = 0
for word in fr_text_list:
if re_obj.search(word) and count < 1000:
x_.append(en_vec_dict.setdefault(word, [0]*200))
count += 1
if len(x_) < 2000:
for i in range(2000-len(fr_text_list)):
x_.append([0]*200)
if count_num > 6:
break
count_num += 1
X_test.append(x_)
print 'start json'
print time.localtime( time.time() )
json_test_file = open('../data/test_matrix.out','w')
json.dump(X_test,json_test_file, encoding='utf-8')
print time.localtime( time.time() )
train_file.close()
print 'json test ok'
# return (X_train, Y_train, X_test, Y_test)
def train_model():
# (X_train, Y_train, X_test, Y_test) = prapare_train()
X_ = []
with open('../data/train_matrix.out') as train_file:
X_train = json.load(train_file)
for x in X_train:
a = len(x)
print a/2
x1 = x[:a/2]
x2 = x[a/2:]
x3 = []
x3.append(x1)
x3.append(x2)
X_.append(x3)
# X_test = pickle.load('../data/test_matrix.out')
Y_train = [1,0,0]*3
# Y_test = [1,0,0]*3
# print len(X_train) - len(Y_train)
# print len(X_test) - len(Y_test)
model = Sequential()
model = get_nn_model()
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# model.fit(X_train, Y_train,
# batch_size=batch_size,
# nb_epoch=nb_epoch,
# validation_data=(X_test, Y_test))
#2
model.fit(X_, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_split = 0.2)
print 'ok'
def get_nn_model():
# (X_train, Y_train, X_test, Y_test) = prapare_train()
print('Build model...')
model_en = Sequential()
model_en.add(Embedding(max_features,
embedding_dims,
input_length=word_len,
dropout=0.2))
model_en.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
def max_1d(X):
return K.max(X, axis=1)
model_en.add(Lambda(max_1d, output_shape=(nb_filter,)))
# We add a vanilla hidden layer:
model_en.add(Dense(hidden_dims))
model_en.add(Dropout(0.2))
model_en.add(Activation('relu'))
model_fr = Sequential()
model_fr.add(Embedding(max_features,
embedding_dims,
input_length=word_len,
dropout=0.2))
model_fr.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
model_fr.add(Lambda(max_1d, output_shape=(nb_filter,)))
model_fr.add(Dense(hidden_dims))
model_fr.add(Dropout(0.2))
model_fr.add(Activation('relu'))
merged = Merge([model_en, model_fr], mode='concat')
final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(2, activation='softmax'))
return final_model
if __name__ == '__main__':
#prapare_train()
train_model()
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,816
|
Turbo-wang/tmw6102
|
refs/heads/master
|
/utils/train_utils.py
|
from __future__ import print_function
from os import listdir
from os.path import isfile, join
import base64
import gzip
import sys
# reload(sys)
# sys.setdefaultencoding('utf8')
import numpy
import os
import random
import re
import time
import numpy as np
from scipy.spatial.distance import cosine
re_obj = re.compile('[a-zA-Z]')
# sys.path.append("../")
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import configs.config
# import lib.model_cnn
from lib import wordVec
import process_sentence
from nltk.translate.bleu_score import corpus_bleu, sentence_bleu
from collections import namedtuple
Page = namedtuple("Page", "url, html, text, mime_type, encoding, lang")
corpora_dir = configs.config.CORPORA_DIR
file_eng = configs.config.CORPUS_ENG
file_fr = configs.config.CORPUS_FR
def extract_domain(file):
reload(sys)
sys.setdefaultencoding('utf-8')
dict_url_text = {}
dict_url_en = []
dict_url_fr = []
#outputfile = open('extract_text.out', 'w')
# files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and (f.endswith('lett') or f.endswith('gz'))]
# if write_file == 1:
# wf_eng = open(join(corpora_dir,file_eng), 'w')
# wf_fr = open(join(corpora_dir,file_fr), 'w')
# for file in files_list:
# print file
for line in decode_file(join(corpora_dir, file)):
if line.lang == 'en':
if isinstance(line.text, unicode):
dict_url_text[line.url] = line.text
dict_url_en.append(line.url)
else:
print(line.text)
dict_url_text[line.url.encode('utf-8')] = line.text.encode('utf-8')
dict_url_en.append(line.url.encode('utf-8'))
elif line.lang == 'fr':
if isinstance(line.text, unicode):
dict_url_text[line.url] = line.text
dict_url_fr.append(line.url)
else:
print(line.text)
dict_url_text[line.url.encode('utf-8')] = line.text.encode('utf-8')
dict_url_fr.append(line.url.encode('utf-8'))
else:
continue
# print 'ok'
return dict_url_text, dict_url_en, dict_url_fr
def text_from_translate(domain):
pass
def get_translation_for_url():
text_list = []
url = " "
url_last = 'http://bugadacargnel.com/fr/pages/presse.php?presse=19'
en_text_trans = open('../data/en_train_trans.out','w')
with open('../data/translations.train/url2text.en') as en_lines:
for line in en_lines:
content = line.split()
url_new = content[0]
text = '\t'.join(content[1:])
if url_last == url_new:
text_list.append(text)
else:
# print url_last
en_text_trans.write(url_last)
en_text_trans.write('\n')
en_text_trans.write('\t'.join(text_list))
en_text_trans.write('\n')
url_last = url_new
text_list = []
en_text_trans.close()
def load_translation(file_name):
url_dict = {}
domain = file_name[:-8]
print(domain)
with open('../data/en_train_trans.out','r') as en_train_trans:
flag = False
lines = en_train_trans.readlines()
for url, text in zip(lines[::2],lines[1::2]):
url = url.strip()
text = text.strip()
# text = text.replace('\t',' ')
# line_url = en_train_trans.readline()
# # print line_url
# line_text = en_train_trans.readline()
if re.search(domain, url) != None:
url_dict[url] = text
# flag = True
# # print line_url, files_name
# elif flag == True:
# break
# print url_dict
return url_dict
def test():
en_url = []
en_url.append(enurl1)
en_url.append(enurl2)
en_url.append(enurl3)
frurl = 'http://www.krn.org/fr/106.aspx'
file_name = 'www.krn.org.lett.gz'
url_text_trans = load_translation(file_name)
dict_url_text, dict_url_en, dict_url_fr = extract_domain(file_name)
fr_text = url_text_trans[frurl]
# fr_text = process_sentence.tokenize_text(fr_text)
for enurl in en_url:
en_text = dict_url_text[enurl]
en_text = process_sentence.tokenize_text(en_text)
print(sentence_bleu(en_text,fr_text))
def vector_test():
en_vector_dict = wordVec.load_wordVec_mem('../data/envec2.txt')
fr_vector_dict = wordVec.load_wordVec_mem('../data/frvec2.txt')
files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and (f.endswith('lett') or f.endswith('gz'))]
match_url = []
train_pair = []
count = 0
with open('../data/train.pairs','r') as pairs:
for pair in pairs:
train_pair.append(pair.strip())
match_url += pair.strip().split()
predict_file = open('../data/predict_unlimit.pairs','w')
url_set = set(match_url)
alpha = 0.35
unk_vec_en = en_vector_dict['unknown']
unk_vec_fr = fr_vector_dict['(unk)']
count = 0
for file_name in files_list[:1]:
file_name = 'www.krn.org.lett.gz'
dict_url_text, dict_url_en, dict_url_fr = extract_domain(file_name)
for fr_url in dict_url_fr:
distance_list = []
for en_url in dict_url_en:
en_text = dict_url_text[en_url]
en_length = len(en_text.strip().split())
fr_text = dict_url_text[fr_url]
fr_length = len(fr_text.strip().split())
dis = abs(en_length - fr_length)
dis_for = fr_length * alpha
if (dis_for < dis):
continue
en_text = process_sentence.tokenize_text(en_text)
fr_text = process_sentence.tokenize_text(fr_text)
en_web_vec = np.zeros(200)
en_web_vec_length = len(en_text)
for text in en_text:
vec = en_vector_dict.setdefault(text,unk_vec_en)
vec = np.asarray(vec, dtype='float32')
en_web_vec += vec
en_web_vec = np.divide(en_web_vec, float(en_web_vec_length))
# print(en_web_vec)
fr_web_vec = np.zeros(200)
fr_web_vec_length = len(fr_text)
for text in fr_text:
vec = fr_vector_dict.setdefault(text,unk_vec_fr)
vec = np.asarray(vec, dtype='float32')
fr_web_vec += vec
fr_web_vec = np.divide(fr_web_vec, float(fr_web_vec_length))
# print(fr_web_vec)
distance = cosine(en_web_vec,fr_web_vec)
print(distance)
tmp = []
url_pair = en_url + ' ' + fr_url
tmp.append(url_pair)
tmp.append(distance)
distance_list.append(tmp)
predict_file.write(url_pair)
predict_file.write('\t')
predict_file.write(str(distance))
predict_file.write('\n')
distance_list = sorted(distance_list, key=lambda d:d[1])
pre = distance_list[0][0]
if pre in match_url:
count +=1
print(distance_list)
def get_sentence_vec(text):
pass
def bleu_test():
files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and (f.endswith('lett') or f.endswith('gz'))]
match_url = []
train_pair = []
count = 0
with open('../data/train.pairs','r') as pairs:
for pair in pairs:
train_pair.append(pair.strip())
match_url += pair.strip().split()
predict_file = open('../data/predict_unlimit.pairs','w')
url_set = set(match_url)
alpha = 0.35
for file_name in files_list[:1]:
file_name = 'www.krn.org.lett.gz'
url_text_trans = load_translation(file_name)
dict_url_text, dict_url_en, dict_url_fr = extract_domain(file_name)
en_text_list = []
print('extract ok')
reference_list = []
for url in dict_url_fr:
time_start = time.time()
# if url in url_set:
# continue
pos = -1
score_list = []
text = url_text_trans.setdefault(url, None)
fr_length = len(text.strip().split())
text = process_sentence.tokenize_text(text)
for en_url in dict_url_en:
# if en_url in url_set:
# continue
en_text = dict_url_text[en_url]
en_length = len(en_text.strip().split())
dis = abs(en_length - fr_length)
dis_for = fr_length * alpha
if (dis_for < dis) or text == None:
continue
en_text = process_sentence.tokenize_text(en_text)
# print "en_text", text
if len(en_text) != 0 and len(text) != 0:
url_pair = en_url+' '+url
print('computing')
score = sentence_bleu(text,en_text)
tmp = []
tmp.append(url_pair)
tmp.append(score)
score_list.append(tmp)
if len(score_list) != 0:
score_list = sorted(score_list, key=lambda d:d[1],reverse = True)
print(score_list)
# pre = []
for score in score_list[:5]:
pre = score[0]
score = score[1]
print(pre,'\tbleu', score)
predict_file.write(pre)
predict_file.write('\n')
# if pre in match_url:
# count +=1
time_end = time.time()
print((time_end - time_start),'for',url,'\t',count)
predict_file.close()
print(count)
def cal():
count = 0
with open('../data/predict_unlimit.pairs') as pre:
with open('../data/train.pairs') as ans:
for urls in pre:
urls = urls.split()
en_url = urls[0]
fr_url = urls[1]
for ans_urls in ans:
ans_urls = ans_urls.split()
en_ans = ans_urls[0]
fr_ans = ans_urls[1]
if en_url == en_ans and fr_url == fr_ans:
count += 1
print(count)
def extract_text(write_file = 0):
reload(sys)
sys.setdefaultencoding('utf-8')
dict_url_text = {}
dict_url_en = []
dict_url_fr = []
#outputfile = open('extract_text.out', 'w')
files_list = [f for f in listdir(corpora_dir) if isfile(join(corpora_dir, f)) and (f.endswith('.lett') or f.endswith('.gz'))]
if write_file == 1:
wf_eng = open(join(corpora_dir,file_eng), 'w')
wf_fr = open(join(corpora_dir,file_fr), 'w')
for file in files_list:
print(file)
for line in decode_file(join(corpora_dir, file)):
if line.lang == 'fr':
if isinstance(line.text, unicode):
dict_url_text[line.url] = line.text
dict_url_en.append(line.url)
else:
dict_url_text[line.url.encode('utf-8')] = line.text.encode('utf-8')
dict_url_en.append(line.url.encode('utf-8'))
if write_file == 1:
wf_fr.write(line.text.encode('utf-8'))
wf_fr.write('\n')
elif line.lang == 'en':
if isinstance(line.text, unicode):
dict_url_text[line.url] = line.text
dict_url_en.append(line.url)
else:
dict_url_text[line.url.encode('utf-8')] = line.text.encode('utf-8')
dict_url_fr.append(line.url.encode('utf-8'))
if write_file == 1:
wf_eng.write(line.text.encode('utf-8'))
wf_eng.write('\n')
else:
continue
if write_file == 1:
wf_eng.close()
wf_fr.close()
print('extract all file ok')
return dict_url_text, dict_url_en, dict_url_fr
def get_doc_by_url(url, dict_url_text):
#url_pair = pair.split()
#en_url = url_pair[0]
#fr_url = url_pair[1]
text = dict_url_text.setdefault(url, None)
if text is not None:
#if isinstance(text, unicode):
# text = text.replace('\n','\t')
pass
else:
print(url)
return text
def get_para_text():
par_en = open('../data/para_for_train.en','w')
par_fr = open('../data/para_for_train.fr','w')
with open('../data/test/translations.test/url2text.en') as en_file:
with open('../data/test/translations.test/url2text.fr') as fr_file:
for en_line, fr_line in zip(en_file,fr_file):
en = en_line.split()
en_url = en[0]
en_text = '\t'.join(en[1:])
fr = fr_line.split()
fr_url = fr[0]
fr_text = '\t'.join(fr[1:])
par_en.write(en_text)
par_en.write('\n')
par_fr.write(fr_text)
par_fr.write('\n')
par_en.close()
par_fr.close()
def decode_file(file):
fh = file
flag = False
if file.endswith('gz'):
flag = True
if flag:
f = gzip.open(fh)
else:
f = open(fh)
for line in f:
# print(line.split())
lang, mime, enc, url, html, text = line.split("\t")
html = base64.b64decode(html)
text = base64.b64decode(text)
html = html.decode("utf-8")
text = text.decode("utf-8")
p = Page(url, html, text, mime, enc, lang)
yield p
f.close()
def compose_train_data():
train_data = open('../data/train_data.pairs','w')
# dev_data = open('../data/dev_data.pairs','w')
#re_obj = re.compile('((http|https)://(\w+\\.?)+?/)')
re_obj = re.compile('((?<=http://)(\w+-?\w\\.?)+?(?=/))')
with open('../data/train.pairs') as train_file:
lines = train_file.readlines()
for line in lines[:1300]:
line = line.strip().split()
print(line)
domain = re_obj.findall(line[0])
print(domain[0][0])
domainfile = str(domain[0][0]) + '.lett.gz'
dict_domain, dict_url_domain_en, dict_url_domain_fr = extract_domain(domainfile)
train_data.write('1')
train_data.write('\t')
train_data.write(line[0])
train_data.write('\t')
train_data.write(line[1])
train_data.write('\n')
train_data.write('0')
train_data.write('\t')
train_data.write(line[0])
train_data.write('\t')
error_url = line[0]
url_num = len(dict_url_domain_en)
while True:
index = random.randint(0,url_num-1)
error_url = dict_url_domain_en[index]
if error_url != line[0]:
break
train_data.write(error_url)
train_data.write('\n')
train_data.write('0')
train_data.write('\t')
error_url = line[1]
url_num = len(dict_url_domain_fr)
while True:
index = random.randint(0,url_num-1)
error_url = dict_url_domain_fr[index]
if error_url != line[1]:
break
train_data.write(error_url)
train_data.write('\t')
train_data.write(line[1])
train_data.write('\n')
# for line in lines[1300:]:
# line = line.strip().split()
# print line
# domain = re_obj.findall(line[0])
# print domain[0][0]
# domainfile = str(domain[0][0]) + '.lett.gz'
# dict_domain, dict_url_domain_en, dict_url_domain_fr = extract_domain(domainfile)
# dev_data.write('1')
# dev_data.write('\t')
# dev_data.write(line[0])
# dev_data.write('\t')
# dev_data.write(line[1])
# dev_data.write('\n')
# dev_data.write('0')
# dev_data.write('\t')
# dev_data.write(line[0])
# dev_data.write('\t')
# error_url = line[0]
# url_num = len(dict_url_domain_en)
# while True:
# index = random.randint(0,url_num-1)
# error_url = dict_url_domain_en[index]
# if error_url != line[0]:
# break
# dev_data.write(error_url)
# dev_data.write('\n')
# dev_data.write('0')
# dev_data.write('\t')
# error_url = line[1]
# url_num = len(dict_url_domain_fr)
# while True:
# index = random.randint(0,url_num-1)
# error_url = dict_url_domain_fr[index]
# if error_url != line[1]:
# break
# dev_data.write(error_url)
# dev_data.write('\t')
# dev_data.write(line[1])
# dev_data.write('\n')
def calculate_vector_text(text):
eng_vector_dict = lib.load_wordVec_mem('../data/envec.txt')
fr_vector_dict = lib.load_wordVec_mem('../data/frvec.txt')
vector = np.zeros()
# for word in text:
# dict_url_text, dict_url_en, dict_url_fr = extract_text()
#
if __name__ == '__main__':
vector_test()
cal()
# bleu_test()
# text_url_dict = extract_text()
# with open('../data/train.pairs') as file:
# pairs = file.readlines()
# for pair in pairs:
# eng_url , fr_url = pair.split('\t')
# eng_text = text_url_dict[eng_url]
# fr_text = text_url_dict[fr_url]
#
|
{"/utils/train_utils.py": ["/configs/config.py"]}
|
31,834
|
kfinn/elizabeth-pipeline
|
refs/heads/master
|
/scripts/generate_maximum_projection.py
|
import cli.log
class GenerateMaximumProjectionJob:
def __init__(self, source_image_prefix, destination):
self.source_image_prefix = source_image_prefix
self.destination = destination
def start(self):
# read all the images
# maximially project them
# compute the z-axis distance distribution
# serialize it all out, probably via `numpy.save(Path(self.destination) / ("%_maximal_projection.npy" % self.source_image_prefix))`
pass
def generate_maximum_projection_cli_str(source_image_prefix, destination):
# TODO: not sure this file will be in the path in swarm. might need to configure the swarm env?
return "pipenv run python %s %s %s" % (__file__, source_image_prefix, destination)
@cli.log.LoggingApp
def generate_maximum_projection_cli(app):
generate_all_maximum_projections(app.params.source_image_prefix, app.params.destination)
generate_maximum_projection_cli.add_param("source_image_prefix")
generate_maximum_projection_cli.add_param("destination")
if __name__ == "__main__":
generate_maximum_projection_cli.run()
|
{"/scripts/generate_all_maximum_projections.py": ["/generate_maximum_projection.py"], "/generate_all_maximum_projections.py": ["/generate_maximum_projection.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_all_spot_positions.py": ["/generate_spot_positions.py", "/models/paths.py", "/models/swarm_job.py", "/models/image_filename.py", "/models/image_filename_glob.py"], "/generate_all_cropped_cell_images.py": ["/generate_cropped_cell_image.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_all_nuclear_masks.py": ["/generate_nuclear_masks.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_maximum_projection.py": ["/models/paths.py", "/models/z_sliced_image.py"], "/generate_spot_positions.py": ["/models/generate_spot_positions_config.py", "/models/image_filename.py", "/models/paths.py"], "/generate_cropped_cell_image.py": ["/models/image_filename.py", "/models/nuclear_mask.py", "/models/paths.py"], "/generate_all_distance_transforms.py": ["/generate_distance_transform.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_distance_transform.py": ["/models/paths.py"], "/generate_all_nuclear_segmentations.py": ["/generate_nuclear_segmentation.py", "/models/paths.py", "/models/swarm_job.py", "/models/image_filename_glob.py"], "/models/z_sliced_image.py": ["/models/image_filename.py"], "/models/image_filename.py": ["/models/image_name_dictionaries/image_filename_CV.py", "/models/image_name_dictionaries/image_filename_LSM.py"], "/generate_spot_result_line.py": ["/models/image_filename.py", "/models/paths.py"], "/generate_nuclear_segmentation.py": ["/models/image_filename.py", "/models/paths.py"], "/generate_nuclear_masks.py": ["/models/nuclear_mask.py", "/models/paths.py"], "/models/image_filename_glob.py": ["/models/image_name_dictionaries/image_filename_glob_CV.py", "/models/image_name_dictionaries/image_filename_glob_LSM.py"], "/generate_all_spot_result_lines.py": ["/generate_spot_result_line.py", "/models/paths.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/swarm_job.py"], "/generate_spot_results_file.py": ["/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py"]}
|
31,835
|
kfinn/elizabeth-pipeline
|
refs/heads/master
|
/scripts/generate_all_maximum_projections.py
|
import traceback
from datetime import datetime
from pathlib import Path
import cli.log
import logging
from generate_maximum_projection import generate_maximum_projection_cli_str
import subprocess
from time import sleep
class GenerateAllMaximumProjectionsJob:
def __init__(self, source, destination):
self.source = source
self.destination = destination
self.job_name = "generate_all_maximum_projections_%s" % datetime.now().strftime("%Y%m%d%H%M%S")
self.logger = logging.getLogger()
def start(self):
self.generate_swarm_file()
self.submit_swarm_job()
while not self.is_swarm_job_complete():
sleep(5)
def generate_swarm_file(self):
with self.swarm_file_path.open("w") as swarm_file:
for image_file in self.source_path.glob("*.c01.tiff"):
swarm_file.write("%s\n" % generate_maximum_projection_cli_str(image_file, self.destination))
def submit_swarm_job(self):
subprocess.run("swarm -f %s --job-name %s" % (self.swarm_file_path, self.job_name)).check_returncode()
def is_swarm_job_complete(self):
sjobs_result = subprocess.run("squeue -n %s -o \"%T\"" % self.job_name)
sjobs_result.check_returncode()
# heavy assumptions here:
# - sjobs will only include our one job line, no others
# - sjobs will return our line when it's completed
# - sjobs outputs to stdout
return sjobs_result.stdout.find("COMPLETED") != -1
@property
def swarm_file_path(self):
if not hasattr(self, "_swarm_file_path"):
self._swarm_file_path = self.destination_path / "generate_all_maximum_projections.swarm"
return self._swarm_file_path
@property
def source_path(self):
if not hasattr(self, "_source_path"):
self._source_path = Path(self.source)
if not self._source_path.is_dir():
raise Exception("source does not exist")
return self._source_path
@property
def destination_path(self):
if not hasattr(self, "_destination_path"):
self._destination_path = Path(self.destination)
if not self._destination_path.exists():
Path.mkdir(self._destination_path, parents=True)
elif not self._destination_path.is_dir():
raise Exception("destination already exists, but is not a directory")
return self._destination_path
@cli.log.LoggingApp
def generate_all_maximum_projections_cli(app):
try:
GenerateAllMaximumProjectionsJob(
app.params.source,
app.params.destination
).start()
except Exception as exception:
traceback.print_exc()
generate_all_maximum_projections_cli.add_param("source")
generate_all_maximum_projections_cli.add_param("destination")
if __name__ == "__main__":
generate_all_maximum_projections_cli.run()
|
{"/scripts/generate_all_maximum_projections.py": ["/generate_maximum_projection.py"], "/generate_all_maximum_projections.py": ["/generate_maximum_projection.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_all_spot_positions.py": ["/generate_spot_positions.py", "/models/paths.py", "/models/swarm_job.py", "/models/image_filename.py", "/models/image_filename_glob.py"], "/generate_all_cropped_cell_images.py": ["/generate_cropped_cell_image.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_all_nuclear_masks.py": ["/generate_nuclear_masks.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_maximum_projection.py": ["/models/paths.py", "/models/z_sliced_image.py"], "/generate_spot_positions.py": ["/models/generate_spot_positions_config.py", "/models/image_filename.py", "/models/paths.py"], "/generate_cropped_cell_image.py": ["/models/image_filename.py", "/models/nuclear_mask.py", "/models/paths.py"], "/generate_all_distance_transforms.py": ["/generate_distance_transform.py", "/models/paths.py", "/models/swarm_job.py"], "/generate_distance_transform.py": ["/models/paths.py"], "/generate_all_nuclear_segmentations.py": ["/generate_nuclear_segmentation.py", "/models/paths.py", "/models/swarm_job.py", "/models/image_filename_glob.py"], "/models/z_sliced_image.py": ["/models/image_filename.py"], "/models/image_filename.py": ["/models/image_name_dictionaries/image_filename_CV.py", "/models/image_name_dictionaries/image_filename_LSM.py"], "/generate_spot_result_line.py": ["/models/image_filename.py", "/models/paths.py"], "/generate_nuclear_segmentation.py": ["/models/image_filename.py", "/models/paths.py"], "/generate_nuclear_masks.py": ["/models/nuclear_mask.py", "/models/paths.py"], "/models/image_filename_glob.py": ["/models/image_name_dictionaries/image_filename_glob_CV.py", "/models/image_name_dictionaries/image_filename_glob_LSM.py"], "/generate_all_spot_result_lines.py": ["/generate_spot_result_line.py", "/models/paths.py", "/models/image_filename.py", "/models/image_filename_glob.py", "/models/swarm_job.py"], "/generate_spot_results_file.py": ["/models/image_filename.py", "/models/image_filename_glob.py", "/models/paths.py"]}
|
31,874
|
marcocajeao/walllet_python
|
refs/heads/master
|
/utils/mongo.py
|
# coding=utf_8
import pymongo
import utils.config as config
client = pymongo.MongoClient(config.get_database_server_url(), config.get_database_server_port())
db = client['wallet']
transaction = db.transaction
|
{"/wallet/route.py": ["/wallet/crud_service.py", "/wallet/find_service.py"], "/wallet/find_service.py": ["/utils/mongo.py"], "/wallet/crud_service.py": ["/utils/mongo.py", "/wallet/transaction_schema.py"]}
|
31,875
|
marcocajeao/walllet_python
|
refs/heads/master
|
/wallet/route.py
|
# coding=utf_8
import flask
import wallet.crud_service as crud
import wallet.find_service as find
import utils.json_serializer as json
import utils.errors as errors
import utils.security as security
def init(app):
"""
Inicializa las rutas para Wallet\n
app: Flask
"""
@app.route('/v1/wallet/<user_id>/deposit', methods=['POST'])
def deposit(user_id):
try:
security.validateAdminRole(flask.request.headers.get("Authorization"))
params = json.body_to_dic(flask.request.data)
result = crud.addDeposit(params, user_id)
return json.dic_to_json(result)
except Exception as err:
return errors.handleError(err)
@app.route('/v1/wallet/<user_id>/withdraw', methods=['POST'])
def withdraw(user_id):
try:
security.validateAdminRole(flask.request.headers.get("Authorization"))
params = json.body_to_dic(flask.request.data)
result = crud.addWithdraw(params, user_id)
return json.dic_to_json(result)
except Exception as err:
return errors.handleError(err)
@app.route('/v1/wallet/<user_id>', methods=['GET'])
def getFunds(user_id):
try:
security.validateAdminRole(flask.request.headers.get("Authorization"))
result = find.getFundsWallet(user_id)
return json.dic_to_json({
"user_id": user_id,
"balance": result
})
except Exception as err:
return errors.handleError(err)
@app.route('/v1/wallet/<user_id>/send', methods=['POST'])
def sendFunds(user_id):
try:
security.validateAdminRole(flask.request.headers.get("Authorization"))
params = json.body_to_dic(flask.request.data)
result = crud.addSend(params, user_id)
return json.dic_to_json(result)
except Exception as err:
return errors.handleError(err)
@app.route('/v1/wallet/<user_id>/history', methods=['POST','GET'])
def getHistory(user_id):
try:
security.validateAdminRole(flask.request.headers.get("Authorization"))
params = json.body_to_dic(flask.request.data)
result = find.getHistoryWallet(params, user_id)
return json.dic_to_json(result)
except Exception as err:
return errors.handleError(err)
|
{"/wallet/route.py": ["/wallet/crud_service.py", "/wallet/find_service.py"], "/wallet/find_service.py": ["/utils/mongo.py"], "/wallet/crud_service.py": ["/utils/mongo.py", "/wallet/transaction_schema.py"]}
|
31,876
|
marcocajeao/walllet_python
|
refs/heads/master
|
/wallet/find_service.py
|
# coding=utf_8
import utils.mongo as db
import utils.errors as error
def getFundsWallet(user_id):
"""
Busca la cantidad de dinero disponible de la billetera de un usuario.\n
user_id string usuario a buscar
"""
"""
@api {get} /v1/wallet/:user_id/ Consultar fondos
@apiName Consultar fondos
@apiGroup Wallet
@apiDescription Consulta los fondos de una billetera de usuario
@apiSuccessExample {json} Respuesta
HTTP/1.1 200 OK
{
"user_id": "{id de usuario}"
"balance": "{cantidad de dinero}"
}
@apiUse Errors
"""
try:
funds = 0
"""
results = []
cursor = db.articles.find({
"$and": [{
"to_user_id": bson.ObjectId(user_id)
}]
})
for doc in cursor:
results.append(doc)
for item in results:
funds = funds + item["amount"]
"""
cursor = db.transaction.aggregate({
'$match': {
'$and': [
{ 'to_user_id': bson.ObjectId(user_id) }
]}
},
{'$group': {
'_id' : null,
'sum':
{ '$sum': '$amount' }
}
})
funds = funds + cursor["sum"]
cursor = db.transaction.aggregate({
'$match': {
'$and': [
{ 'from_user_id': bson.ObjectId(user_id) }
]}
},
{'$group': {
'_id' : null,
'sum':
{ '$sum': '$amount' }
}
})
funds = funds - cursor['sum']
return funds
except Exception:
raise error.InvalidRequest("Invalid criteria")
def getHistoryWallet(params, user_id):
"""
Busca transacciones en un rango de fechas.\n
user_id string usuario a buscar
"""
"""
@api {get} /v1/wallet/:user_id/history Consultar transacciones en rango de fechas
@apiName Consultar transacciones
@apiGroup Wallet
@apiDescription Consulta de las transacciones de una billetera de usuario en un rango de fechas
@apiSuccessExample {json} Respuesta
HTTP/1.1 200 OK
{
"_id": "{id de Transaccion}"
"amount": "{importe}",
"datetime": "{fecha y hora de creacion}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiUse Errors
"""
try:
results = []
"""
cursor = db.transaction.find({
"$or": [{
"from_user_id": bson.ObjectId(user_id)
}, {
"to_user_id": bson.ObjectId(user_id)
}],
"datetime": {
"$gte": ISODate(params["date_since"]),
"$lte": ISODate(params["date_until"])
}
})
"""
cursor = db.transaction.find({})
for doc in cursor:
results.append(doc)
except Exception:
raise error.InvalidRequest("Invalid criteria")
|
{"/wallet/route.py": ["/wallet/crud_service.py", "/wallet/find_service.py"], "/wallet/find_service.py": ["/utils/mongo.py"], "/wallet/crud_service.py": ["/utils/mongo.py", "/wallet/transaction_schema.py"]}
|
31,877
|
marcocajeao/walllet_python
|
refs/heads/master
|
/wallet/crud_service.py
|
# coding=utf_8
import utils.mongo as db
import utils.errors as error
import bson.objectid as bson
import datetime
import wallet.transaction_schema as schema
def addDeposit(params, user_id):
"""
Agrega un deposito.\n
params: dict<propiedad, valor> Transaccion\n
return dict<propiedad, valor> Transaccion
"""
"""
@api {post} /v1/wallet Crear un deposito
@apiName Crear Transaccion
@apiGroup Wallet
@apiUse AuthHeader
@apiExample {json} Body
{
"amount": "{importe}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiSuccessExample {json} Respuesta
HTTP/1.1 200 OK
{
"_id": "{id de Transaccion}"
"amount": "{importe}",
"datetime": "{fecha y hora de creacion}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiUse Errors
"""
transaction = schema.newDeposit(user_id)
# Actualizamos los valores validos a actualizar
transaction.update(params)
schema.validateSchema(transaction)
transaction["_id"] = db.transaction.insert_one(transaction).inserted_id
return transaction
def addWithdraw(params, user_id):
"""
Agrega un retiro.\n
params: dict<propiedad, valor> Transaccion\n
return dict<propiedad, valor> Transaccion
"""
"""
@api {post} /v1/wallet Crear un retiro
@apiName Crear Transaccion
@apiGroup Wallet
@apiUse AuthHeader
@apiExample {json} Body
{
"amount": "{importe}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiSuccessExample {json} Respuesta
HTTP/1.1 200 OK
{
"_id": "{id de Transaccion}"
"amount": "{importe}",
"datetime": "{fecha y hora de creacion}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiUse Errors
"""
transaction = schema.newWithdraw(user_id)
# Actualizamos los valores validos a actualizar
transaction.update(params)
schema.validateSchema(transaction)
transaction["_id"] = db.transaction.insert_one(transaction).inserted_id
return transaction
def addSend(params, user_id):
"""
Agrega un Envio de fondos.\n
params: dict<propiedad, valor> Transaccion\n
return dict<propiedad, valor> Transaccion
"""
"""
@api {post} /v1/wallet Crear un Envio de fondos
@apiName Crear Transaccion
@apiGroup Wallet
@apiUse AuthHeader
@apiExample {json} Body
{
"amount": "{importe}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiSuccessExample {json} Respuesta
HTTP/1.1 200 OK
{
"_id": "{id de Transaccion}"
"amount": "{importe}",
"datetime": "{fecha y hora de creacion}",
"observation": "{observacion de la transaccion}",
"type": "{tipo de transaccion}",
"to_user_id": {Para cual usuario},
"from_user_id": {Desde cual usuario}
}
@apiUse Errors
"""
transaction = schema.newSend(user_id)
# Actualizamos los valores validos a actualizar
transaction.update(params)
schema.validateSchema(transaction)
transaction["_id"] = db.transaction.insert_one(transaction).inserted_id
return transaction
|
{"/wallet/route.py": ["/wallet/crud_service.py", "/wallet/find_service.py"], "/wallet/find_service.py": ["/utils/mongo.py"], "/wallet/crud_service.py": ["/utils/mongo.py", "/wallet/transaction_schema.py"]}
|
31,878
|
marcocajeao/walllet_python
|
refs/heads/master
|
/wallet/transaction_schema.py
|
# coding=utf_8
import numbers
import datetime
import utils.schema_validator as validator
import utils.errors as errors
# Validaciones generales del esquema, se valida solo lo que el usuario puede cambiar
TRANSACTION_DB_SCHEMA = {
"amount": {
"required": True,
"type": numbers.Real,
"min": 0
},
"description": {
"required": False,
"type": str,
"maxLen": 2048
},
"type": {
"required": True,
"type": str,
"minLen": 1,
"maxLen": 12
},
"to_user_id": {
"required": False,
"type": str,
"minLen": 22,
"maxLen": 26
},
"from_user_id": {
"required": False,
"type": str,
"minLen": 22,
"maxLen": 26
}
}
def newDeposit(user_id):
"""
Crea un nuevo deposito en blanco.\n
return dict<propiedad, valor> Wallet
"""
return {
"amount": 0.0,
"datetime": datetime.datetime.utcnow(),
"observation": "",
"type": "",
"to_user_id": user_id
}
def newWithdraw(user_id):
"""
Crea un nuevo retiro en blanco.\n
return dict<propiedad, valor> Wallet
"""
return {
"amount": 0.0,
"datetime": datetime.datetime.utcnow(),
"observation": "",
"type": "",
"from_user_id": user_id
}
def newSend(user_id):
"""
Crea un nuevo retiro en blanco.\n
return dict<propiedad, valor> Wallet
"""
return {
"amount": 0.0,
"datetime": datetime.datetime.utcnow(),
"observation": "",
"type": "",
"to_user_id": "",
"from_user_id": user_id
}
def validateSchema(document):
err = validator.validateSchema(TRANSACTION_DB_SCHEMA, document)
if (len(err) > 0):
raise errors.MultipleArgumentException(err)
|
{"/wallet/route.py": ["/wallet/crud_service.py", "/wallet/find_service.py"], "/wallet/find_service.py": ["/utils/mongo.py"], "/wallet/crud_service.py": ["/utils/mongo.py", "/wallet/transaction_schema.py"]}
|
31,913
|
MarieBonifacio/Projet-Simplon
|
refs/heads/master
|
/snippets/views.py
|
from django.shortcuts import render
from django.views import View
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from snippets.models import Profile
from snippets.serializers import ProfileSerializer
from .forms import ProfileForm
@csrf_exempt
def profiles(request):
"""
List all profiles, or create a new profile.
"""
if request.method == 'GET':
profiles = Profile.objects.all()
serializer = ProfileSerializer(profiles, many=True)
return JsonResponse(serializer.data, safe=False)
elif request.method == 'POST':
data = JSONParser().parse(request)
serializer = ProfileSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
@csrf_exempt
def profile(request, pk):
"""
Retrieve, update or delete a profile.
"""
try:
profile = Profile.objects.get(pk=pk)
except Profile.DoesNotExist:
return HttpResponse(status=404)
if request.method == 'GET':
serializer = ProfileSerializer(profile)
return JsonResponse(serializer.data)
elif request.method == 'PUT':
data = JSONParser().parse(request)
serializer = ProfileSerializer(profile, data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data)
return JsonResponse(serializer.errors, status=400)
elif request.method == 'DELETE':
profile.delete()
return HttpResponse(status=204)
def get_profile(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ProfileForm(request.POST)
# check whether it's valid:
if form.is_valid():
# process the data in form.cleaned_data as required
# ...
# redirect to a new URL:
data = form.cleaned_data
serializer = ProfileSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse(serializer.data, status=201)
return JsonResponse(serializer.errors, status=400)
return HttpResponseRedirect('/thanks/')
# if a GET (or any other method) we'll create a blank form
else:
form = ProfileForm()
return render(request, 'profile.html', {'form': form})
# @csrf_exempt
# def profiles(request):
# """
# List all profile.
# """
# if request.method == 'GET':
# profiles = Profile.objects.all()
# serializer = ProfileSerializer(profiles, many=True)
# return JsonResponse(serializer.data, safe=False)
# @csrf_exempt
# def profile(request, pk):
# try:
# profile = Profile.objects.get(pk=pk)
# except Profile.DoesNotExist:
# return HttpResponse(status=404)
# serializer = ProfileSerializer(profile)
# return JsonResponse(serializer.data)
|
{"/snippets/views.py": ["/snippets/serializers.py"]}
|
31,914
|
MarieBonifacio/Projet-Simplon
|
refs/heads/master
|
/snippets/urls.py
|
from django.urls import path
from snippets import views
urlpatterns = [
# # path('snippets/', views.profile_list),
# # path('snippets/<int:pk>/', views.profile_detail),
]
|
{"/snippets/views.py": ["/snippets/serializers.py"]}
|
31,915
|
MarieBonifacio/Projet-Simplon
|
refs/heads/master
|
/snippets/serializers.py
|
from rest_framework import serializers
from snippets.models import Profile, LANGUAGE_CHOICES, STYLE_CHOICES
class ProfileSerializer(serializers.Serializer):
type = serializers.CharField(required=False, allow_blank=True, max_length=100)
name = serializers.CharField(required=False, allow_blank=True, max_length=100)
description = serializers.CharField(required=False, allow_blank=True, max_length=100)
def create(self, validated_data):
"""Create and return a new profile instance given the validated data"""
return Profile.objects.create(**validated_data)
def update(self, instance, validated_data):
"""Update and return an existing `Profile` instance, given the validated data."""
instance.type = validated_data.get('type', instance.type)
instance.name = validated_data.get('name', instance.name)
instance.description = validated_data.get('description', instance.description)
instance.save()
return instance
# def delete(self, validated_data):
# return Profile.objects.delete(**validated_data)
# def read(self, validated_data):
# return Profile.objects.read(**validated_data)
|
{"/snippets/views.py": ["/snippets/serializers.py"]}
|
31,916
|
MarieBonifacio/Projet-Simplon
|
refs/heads/master
|
/snippets/test/test_models.py
|
from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from snippets import models
from snippets.models import Profile
class ModelTests(TestCase):
def test_create_profile(self):
"""Test creating a new profile"""
type = 'A'
name = 'idealiste'
description = 'lorem ipsum'
profile = Profile.objects.create(
type=type,
name=name,
description=description,
)
self.assertEqual(profile.type, type)
self.assertEqual(profile.name, name)
self.assertEqual(profile.description, description)
|
{"/snippets/views.py": ["/snippets/serializers.py"]}
|
31,917
|
captainGeech42/daas
|
refs/heads/main
|
/daas/models.py
|
import enum
from sqlalchemy import Column, Integer, DateTime, String, Boolean, Enum
from sqlalchemy.sql import func
from .exts import db
class User(db.Model):
id = Column(Integer, primary_key=True)
ts = Column(DateTime, server_default=func.now())
apikey = Column(String(64), unique=True)
desc = Column(String(100))
class DecompilationStatus(enum.Enum):
queued = 1
completed = 2
failed = 3
removed = 4
class Binary(db.Model):
id = Column(Integer, primary_key=True)
ts = Column(DateTime, server_default=func.now())
requestor = Column(String(50))
status = Column(Enum(DecompilationStatus))
output_dir = Column(String(20))
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,918
|
captainGeech42/daas
|
refs/heads/main
|
/daas/decompile.py
|
import base64
import os
import random
import shutil
import string
import subprocess
import _thread
from flask import Blueprint, request
from .exts import db
from .models import Binary, DecompilationStatus
from .auth import auth_required
BINARY = "binary"
DECOMP_OUTPUT = "output.c"
decompile = Blueprint("decompile", __name__)
def _decompile_binary(id, bindir):
# ATTN: DB CODE SHOULD BE MADE MULTITHREADED
# CURRENTLY IT IS NOT BECAUSE I AM LAZY
# WHEN IT BECOMES MT COMPATIBLE, UNCOMMENT THIS STUFF
#row = Binary.query.filter_by(id=id).first()
#if row is None:
# return False
try:
input_path = os.path.join(bindir, BINARY)
output_path = os.path.join(bindir, DECOMP_OUTPUT)
# decompile with a 5 minute timeout
#subprocess.run(["/ida/idat64", "-A", '-S"/decompile.py \\"--output\\" \\"{output_path}\\""', input_path], env={"TERM":"XTERM"}, timeout=300)
#subprocess.run(["/ida/idat64", "-A", f'-S"/decompile.py \\"--output\\" \\"{output_path}\\""', input_path], timeout=300)
os.system(f'/ida/idat64 -A -S"/decompile.py \\"--output\\" \\"{output_path}\\"" {input_path}')
# decompliation succeeded
#row.status = DecompilationStatus.completed
except subprocess.TimeoutExpired:
# decompilation timed out
#row.status = DecompilationStatus.failed
pass
# save row update
#db.session.commit()
def _spawn_decompilation_thread(id, bindir):
try:
_thread.start_new_thread(_decompile_binary, (id, bindir))
except:
return False
return True
def _gen_dir():
keyspace = string.ascii_letters
dirname = f"/tmp/" + "".join([random.choice(keyspace) for _ in range(10)])
if not os.path.isdir(dirname):
os.mkdir(dirname)
return dirname
else:
return _gen_dir()
@decompile.route("/request_decomp", methods=["POST"])
@auth_required
def request_decomp():
# parse out the binary
body = request.json
if body is None:
return {"status": "error", "msg": "need to specify binary and requestor"}, 400
if "binary" not in body.keys():
return {"status": "error", "msg": "need to specify binary"}, 400
if "requestor" not in body.keys():
return {"status": "error", "msg": "need to specify binary"}, 400
binary = base64.b64decode(body["binary"])
# save the binary to disk
dirname = _gen_dir()
with open(os.path.join(dirname, BINARY), "wb") as f:
f.write(binary)
# add to database
rec = Binary(requestor=body["requestor"], status=DecompilationStatus.queued, output_dir=dirname)
db.session.add(rec)
db.session.commit()
# spawn thread
if _spawn_decompilation_thread(rec.id, dirname):
return {"status": "ok", "msg": "started analysis", "id": rec.id}
else:
return {"status": "err", "msg": "failed to start analysis"}, 500
@decompile.route("/status/<id>")
@auth_required
def status(id=0):
binary = Binary.query.filter_by(id=id).first()
if not binary:
return {"status": "err", "msg": f"failed to find binary {id}"}, 404
# check if there is decomp
path = os.path.join(binary.output_dir, DECOMP_OUTPUT)
if os.path.isfile(path):
# see if the file has stuff
with open(path, "r") as f:
contents = f.read()
if len(contents.split()) > 10:
binary.status = DecompilationStatus.completed
db.session.commit()
return {"status": "ok", "analysis_status": str(binary.status).split(".")[1]}
@decompile.route("/get_decompilation/<id>")
@auth_required
def get_decompilation(id=0):
binary = Binary.query.filter_by(id=id).first()
if not binary:
return {"status": "err", "msg": f"failed to find binary {id}"}, 404
if binary.status != DecompilationStatus.completed:
return {"status": "err", "msg": "decompilation not finished, did you check the status?"}, 400
if binary.status == DecompilationStatus.removed:
return {"status": "err", "msg": "decompilation was already returned, please re-request"}, 400
try:
with open(os.path.join(binary.output_dir, DECOMP_OUTPUT), "r") as f:
decomp = f.read()
except FileNotFoundError:
return {"status": "err", "msg": "decompilation not found"}, 500
# delete binary to prevent disk from filling up
shutil.rmtree(binary.output_dir)
binary.status = DecompilationStatus.removed
db.session.commit()
return {"status": "ok", "output": base64.b64encode(decomp.encode()).decode()}
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,919
|
captainGeech42/daas
|
refs/heads/main
|
/daas/auth.py
|
import base64
from functools import wraps
import json
from flask import abort, Blueprint, request
from .exts import db
from .models import User
auth = Blueprint("auth", __name__)
def _generate_api_key():
with open("/dev/urandom", "rb") as f:
r = f.read(32)
key = base64.b64encode(r).decode()
return key
# decorator to ensure valid credentials
def auth_required(f):
@wraps(f)
def check_auth(*args, **kwargs):
header = request.headers.get("Authorization")
if header is None:
return {"status": "error", "msg": "bad auth"}, 403
try:
key = header.split("Bearer ")[1]
except IndexError:
return {"status": "error", "msg": "bad auth"}, 403
user = User.query.filter_by(apikey=key).first()
if not user:
return {"status": "error", "msg": "bad auth"}, 403
return f(*args, **kwargs)
return check_auth
# generate an account if there aren't any
# if there are any rows in the `user` table, this should 403
@auth.route("/auth/setup_acc", methods=["POST"])
def setup_acc():
num_rows = db.session.query(User).count()
if num_rows > 0:
abort(403)
key = _generate_api_key()
new_user = User(apikey=key, desc="account from /auth/gen_first_acc")
db.session.add(new_user)
db.session.commit()
return {"status": "ok", "apikey": key}
# add a new user
@auth.route("/auth/register", methods=["POST"])
@auth_required
def register():
body = request.json
if body is None:
return {"status": "error", "msg": "need to specify desc"}, 400
if "desc" not in body.keys():
return {"status": "error", "msg": "need to specify desc"}, 400
if len(body["desc"]) > User.desc.property.columns[0].type.length:
return {"status": "error", "msg": "desc too long"}, 400
key = _generate_api_key()
new_user = User(apikey=key, desc=body["desc"])
db.session.add(new_user)
db.session.commit()
return {"status": "ok", "apikey": key}
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,920
|
captainGeech42/daas
|
refs/heads/main
|
/decompile.py
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# Author : <github.com/tintinweb>
"""
This script was written by tintinweb, and is from https://github.com/tintinweb/ida-batch_decompile
"""
"""
IdaBatchDecompile Plugin and Script adds annotation and batch decompilation functionality to IDA Pro
* requires hexrays decompiler plugin
Usage:
* as idascript in ida gui mode: IDA Pro -> File/Script file... -> IdaDecompileBatch ...
* as idascript in ida cmdline mode: ida(w|w64) -B -M -S"<path_to_this_script> \"--option1\" \"--option2\"", "<target>"
* see --help for options
* as Plugin: follow ida documentation on how to add python plugins
"""
import sys
import json
import glob
import subprocess
import shutil
import os
import tempfile
from optparse import OptionParser
import idaapi
import idautils
from idc import *
if idaapi.IDA_SDK_VERSION >= 700:
import ida_idaapi
import ida_kernwin
from idaapi import *
from idc import *
import logging
logger = logging.getLogger(__name__)
class IdaLocation(object):
""" Wrap idautils Function
"""
def __init__(self, location):
self.at = location
# self.name = GetFunctionName(location)
self.name = GetFuncOffset(location)
self.start = 0
self.end = 0
self.func_offset = 0
try:
_func = idaapi.get_func(location)
self.start = _func.startEA
self.end = _func.endEA # ==FindFuncEnd(location)
self.func_offset = self.start - self.at
except Exception, e:
logger.exception(e)
if not self.name:
self.indirect = True
else:
self.indirect = False
def __repr__(self, *args, **kwargs):
return "<Function %r at 0x%x (0x%x::0x%x)>" % (self.name, self.at,
self.start, self.end)
def get_xrefs(self):
return (IdaLocation(x.frm) for x in idautils.XrefsTo(self.at))
def get_coderefs(self):
return (IdaLocation(frm) for frm in idautils.CodeRefsTo(self.at, 0))
def as_dict(self):
return {'at': self.at, 'name': self.name}
def decompile(self):
""" decompile function
"""
try:
return idaapi.decompile(self.at)
except idaapi.DecompilationFailure, e:
return repr(str(e))
text = str(idaapi.decompile(self.at)).strip()
'''
sprintf:
Python>for w in idaapi.decompile(0x00001578 ).lvars: print w.name
s
format
result
'''
# decompile.arguments
# for w in idaapi.decompile(0x00001EF0 ).lvars: print w.name
if not grep:
return text.split('\n')
# return all lines
return [line.strip() for line in text.split('\n') if grep in line]
def get_function_args(self):
# find the stack frame
stack = GetFrame(self.start)
stack_size = GetStrucSize(stack)
# figure out all of the variable names
# base is either ' s' ... saved register or ' r' ... return address
base = GetMemberOffset(stack, ' s')
if base == -1:
base = GetMemberOffset(stack, ' r')
if base == -1:
# no ' s' no ' r' assume zero
base == 0
stack_vars = []
for memberoffset in xrange(stack_size):
previous = stack_vars[-1] if len(stack_vars) else None
var_name = GetMemberName(stack, memberoffset)
if not var_name or (previous and var_name == previous.get("name")):
# skip that entry, already processed
continue
offset = GetMemberOffset(stack, var_name) - base
size = GetMemberSize(stack, memberoffset)
if previous:
diff = offset - previous['offset']
previous['diff_size'] = diff
stack_vars.append({'name': var_name,
'offset': offset,
'offset_text': '[bp%Xh]' % offset if offset < 0 else '[bp+%Xh]' % offset,
'size': size,
'diff_size': size})
return stack_size, stack_vars
class IdaHelper(object):
""" Namespace for ida helper functions
"""
@staticmethod
def get_functions():
return (IdaLocation(f) for f in idautils.Functions())
@staticmethod
def get_imports():
for i in xrange(0, idaapi.get_import_module_qty()):
name = idaapi.get_import_module_name(i)
if name:
yield name
@staticmethod
def decompile_full(outfile):
return idaapi.decompile_many(outfile, None, 0)
@staticmethod
def annotate_xrefs():
stats = {'annotated_functions': 0, 'errors': 0}
for f in IdaHelper.get_functions():
try:
function_comment = GetFunctionCmt(f.start, 0)
if '**** XREFS ****' in function_comment:
logger.debug("[i] skipping function %r, already annotated." % f.name)
continue
xrefs = [x.name for x in f.get_coderefs()]
comment = []
if function_comment:
comment.append(function_comment)
comment.append("***** XREFS *****")
comment.append("* # %d" % len(xrefs))
comment.append(', '.join(xrefs))
comment.append("*******************")
SetFunctionCmt(f.start, '\n'.join(comment), 0)
stats['annotated_functions'] += 1
except Exception as e:
print ("Annotate XRefs: %r"%e)
stats['errors'] += 1
print "[+] stats: %r" % stats
print "[+] Done!"
@staticmethod
def annotate_functions_with_local_var_size():
stats = {'annotated_functions': 0, 'errors': 0}
for f in IdaHelper.get_functions():
try:
function_comment = GetFunctionCmt(f.start, 0)
if '**** Variables ****' in function_comment:
logger.debug("[i] skipping function %r, already annotated." % f.name)
continue
size, stack_vars = f.get_function_args()
comment = []
if function_comment:
comment.append(function_comment)
comment.append("**** Variables ****")
comment.append("* stack size: %s" % size)
for s in stack_vars:
comment.append(json.dumps(s))
comment.append("*******************")
SetFunctionCmt(f.start, '\n'.join(comment), 0)
stats['annotated_functions'] += 1
except Exception, e:
print ("Annotate Funcs: %r" % e)
stats['errors'] += 1
print "[+] stats: %r" % stats
print "[+] Done!"
class IdaDecompileBatchController(object):
def __init__(self):
self.is_windows = sys.platform.startswith('win')
self.is_ida64 = GetIdbPath().endswith(".i64") # hackhackhack - check if we're ida64 or ida32
logger.debug("[+] is_windows: %r" % self.is_windows)
logger.debug("[+] is_ida64: %r" % self.is_ida64)
self.my_path = os.path.abspath(__file__)
self.temp_path = None
self._init_target()
# settings (form)
# todo: load from configfile if available.
self.output_path = None
self.chk_annotate_stackvar_size = False
self.chk_annotate_xrefs = False
self.chk_decompile_imports = False
self.chk_decompile_imports_recursive = False
self.chk_decompile_alternative = False
# self.ida_home = idaapi.idadir(".")
self.ida_home = GetIdaDirectory()
# wait for ida analysis to finish
self.wait_for_analysis_to_finish()
if not idaapi.init_hexrays_plugin():
logger.warning("forcing hexrays to load...")
self.load_plugin_decompiler()
if not idaapi.init_hexrays_plugin():
raise Exception("hexrays decompiler is not available :(")
def _init_target(self):
self.target_path = idc.GetInputFilePath()
self.target_file = idc.GetInputFile()
self.target_dir = os.path.split(self.target_path)[0]
logger.debug("reinitializing target: %r" % self.target_file)
def init_tempdir(self):
self.temp_path = self.temp_path or tempfile.mkdtemp(prefix="idbc_")
logger.debug("[i] using tempdir: %r" % self.temp_path)
def remove_tempdir(self):
if not self.temp_path:
return
logger.debug("[i] removing tempdir: %r" % self.temp_path)
shutil.rmtree(self.temp_path)
self.temp_path = None
def wait_for_analysis_to_finish(self):
logger.debug("[+] waiting for analysis to finish...")
idaapi.autoWait()
idc.Wait()
logger.debug("[+] analysis finished.")
def load_plugin_decompiler(self):
# load decompiler plugins (32 and 64 bits, just let it fail)
logger.debug("[+] trying to load decompiler plugins")
if self.is_ida64:
# 64bit plugins
idc.RunPlugin("hexx64", 0)
else:
# 32bit plugins
idc.RunPlugin("hexrays", 0)
idc.RunPlugin("hexarm", 0)
logger.debug("[+] decompiler plugins loaded.")
def run(self):
files_decompiled = []
self._init_target()
if self.chk_decompile_imports:
self.init_tempdir()
if self.chk_decompile_imports_recursive:
pass
for image_type, image_name, image_path in self.enumerate_import_images():
try:
self.exec_ida_batch_decompile(target = image_path, output = self.output_path,
annotate_stackvar_size = self.chk_annotate_stackvar_size,
annotate_xrefs = self.chk_annotate_xrefs,
imports = self.chk_decompile_imports,
recursive = self.chk_decompile_imports_recursive,
experimental_decomile_cgraph = self.chk_decompile_alternative)
files_decompiled.append(image_path)
except subprocess.CalledProcessError, cpe:
logger.warning("[!] failed to decompile %r - %r" % (image_path, cpe))
self.remove_tempdir()
if self.chk_annotate_stackvar_size:
self.annotate_stack_variable_size()
if self.chk_annotate_xrefs:
self.annotate_xrefs()
if self.chk_decompile_alternative:
raise NotImplemented("Not yet implemented")
pass
else:
self.decompile_all(self.output_path)
files_decompiled.append(self.target_file)
logger.info("[+] finished decompiling: %r" % files_decompiled)
logger.info(" output dir: %s"%self.output_path if self.output_path else self.target_dir)
return files_decompiled
def annotate_stack_variable_size(self):
logger.debug("[+] annotating function stack variables")
IdaHelper.annotate_functions_with_local_var_size()
logger.debug("[+] done.")
def annotate_xrefs(self):
logger.debug("[+] annotating function xrefs")
IdaHelper.annotate_xrefs()
logger.debug("[+] done.")
def file_is_decompilable(self, path):
with open(path, 'rb') as ftest:
magic = ftest.read(4)
if magic == 'MZ\x90\x00':
return 'pe/dos'
elif magic == "\x7fELF":
return 'elf'
return None
def enumerate_import_images(self):
for import_name in IdaHelper.get_imports():
logger.debug("[i] trying to find image for %r" % import_name)
for image_path in glob.glob(os.path.join(self.target_dir, import_name) + '*'):
image_type = self.file_is_decompilable(image_path)
if image_type:
logger.debug("[i] got image %r as %r" % (image_path, image_type))
yield image_type, os.path.split(image_path)[1], image_path
# I do not think there's any need to check other files with the same name ?!
break
def enumerate_files(self, recursive=False):
for root, dirs, files in os.walk(self.target_dir):
for name in files:
fpath = os.path.join(root, name)
logger.debug("[+] checking %r" % fpath)
try:
ftype = self.file_is_decompilable(fpath)
if ftype:
logger.debug("[+] is candidate %r" % [fpath, ftype])
yield ftype, name, fpath
except IOError: pass
def decompile_all(self, outfile=None):
outfile = self._get_suggested_output_filename(outfile or self.target_path)
logger.warning(outfile)
logger.debug("[+] trying to decompile %r as %r" % (self.target_file,
os.path.split(outfile)[1]))
IdaHelper.decompile_full(outfile)
logger.debug("[+] finished decompiling %r as %r" % (self.target_file,
os.path.split(outfile)[1]))
def _get_suggested_output_filename(self, target):
# /a/b/c/d/e/bin.ext
# target is a directory
if os.path.isdir(target):
fname, fext = os.path.splitext(self.target_file)
return '%s.c' % os.path.join(target, fname)
# target is not a directory
root, fname = os.path.split(target)
if fname:
fname, fext = os.path.splitext(fname) # bin,ext
else:
fname, fext = os.path.splitext(self.target_file)
# obsolete
# suggested_outpath = '%s.c'%os.path.join(root,fname)
# if not os.path.exists(suggested_outpath):
# return suggested_outpath
return '%s.c' % os.path.join(root, fname)
def exec_ida_batch_decompile(self, target, output, annotate_stackvar_size, annotate_xrefs, imports, recursive,
experimental_decomile_cgraph):
logger.debug("[+] batch decompile %r" % target)
# todo: pass commandlines,
# todo parse commandline
script_args = ['--output=%s' % output]
if annotate_stackvar_size:
script_args.append("--annotate-stackvar-size")
if annotate_xrefs:
script_args.append("--annotate-xrefs")
if imports:
script_args.append("--imports")
if recursive:
script_args.append("--recursive")
if experimental_decomile_cgraph:
script_args.append("--experimental-decompile-cgraph")
script_args = ['\\"%s\\"' % a for a in script_args]
command = "%s %s" % (self.my_path, ' '.join(script_args))
self._exec_ida_batch(target, command)
def _exec_ida_batch(self, target, command):
# build exe path
if self.is_windows:
ida_exe = os.path.join(self.ida_home, 'idaw64.exe' if self.is_ida64 else 'idaw.exe')
else:
ida_exe = os.path.join(self.ida_home, 'idal64' if self.is_ida64 else 'idal')
'''
https://www.hex-rays.com/products/ida/support/idadoc/417.shtml
-B .. Batch mode
-M .. disable mouse
-c .. create new database
-o .. database output path
-S .. execute script
'''
#temp_path = os.path.join(self.temp_path, os.path.splitext(os.path.split(target)[1])[0] + '.idb')
cmd = [ida_exe, '-B', '-M', '-c', '-o"%s"'%self.temp_path if self.temp_path else '', '-S"%s"' % command, '"' + target + '"']
logger.debug(' '.join(cmd))
logger.debug('[+] executing: %r' % cmd)
#return 0
# TODO: INSECURE!
return subprocess.check_call(' '.join(cmd), shell=True)
class TestEmbeddedChooserClass(Choose,Choose2):
"""
A simple chooser to be used as an embedded chooser
"""
def __init__(self, title, nb = 5, flags=0):
Choose.__init__(self,
title,
[["Type", 10], ["Name", 10], ["Path", 30]],
flags=flags)
Choose2.__init__(self,
title,
[ ["Type", 10], ["Name", 10], ["Path", 30] ],
embedded=True, width=50, height=10, flags=flags)
self.n = 0
self.items = []
self.icon = 5
self.selcount = 0
self.selected = []
def OnClose(self):
pass
def OnGetLine(self, n):
return self.items[n]
def OnGetSize(self):
n = len(self.items)
return n
def OnRefresh(self, n):
print "refresh %s"%n
def OnSelectionChange(self, sel_list):
self.selected = sel_list
def getSelected(self):
for idx in self.selected:
yield self.items[idx-1]
def addItem(self, e):
if e not in self.items:
self.items.append(e)
class DecompileBatchForm(Form):
"""
Form to prompt for target file, backup file, and the address
range to save patched bytes.
"""
def __init__(self, idbctrl, enumerate_imports=True, enumerate_other=False):
self.idbctrl = idbctrl
self.EChooser = TestEmbeddedChooserClass("Batch Decompile", flags=Choose2.CH_MULTI)
self.propagateItems(enumerate_imports=enumerate_imports, enumerate_other=enumerate_other)
Form.__init__(self,
r"""Ida Batch Decompile ...
{FormChangeCb}
<##Target :{target}>
<##OutputPath:{outputPath}>
<##Annotate StackVar Size:{chkAnnotateStackVars}>
<##Annotate Func XRefs :{chkAnnotateXrefs}>
<##Process Imports :{chkDecompileImports}>
<##Cgraph (experimental) :{chkDecompileAlternative}>{cGroup1}>
<##Scan Target Directory:{btnLoad}> <##Recursive:{chkDecompileImportsRecursive}>{cGroup2}>
<##Decompile!:{btnProcessFiles}>
<Please select items to decompile:{cEChooser}>
""", {
'target': Form.FileInput(swidth=50, open=True, value=idbctrl.target_path),
'outputPath': Form.DirInput(swidth=50, value=idbctrl.output_path),
'cGroup1': Form.ChkGroupControl(("chkAnnotateStackVars", "chkAnnotateXrefs",
"chkDecompileImports",
"chkDecompileAlternative")),
'cGroup2': Form.ChkGroupControl(("chkDecompileImportsRecursive", )),
'FormChangeCb': Form.FormChangeCb(self.OnFormChange),
'btnLoad': Form.ButtonInput(self.OnButtonLoad),
'btnProcessFiles': Form.ButtonInput(self.OnButtonProcess),
'cEChooser': Form.EmbeddedChooserControl(self.EChooser),
})
self.Compile()
def propagateItems(self, enumerate_imports=False, enumerate_other=False):
self.EChooser.addItem([self.idbctrl.file_is_decompilable(self.idbctrl.target_path),
os.path.split(self.idbctrl.target_path)[1],
self.idbctrl.target_path])
if enumerate_imports:
for candidate in self.idbctrl.enumerate_import_images():
self.EChooser.addItem(list(candidate))
if enumerate_other:
for candidate in self.idbctrl.enumerate_files(recursive=self.chkDecompileImportsRecursive.checked):
self.EChooser.addItem(list(candidate))
def OnButtonProcess(self, code=0):
### process selected files
if not len(list(self.EChooser.getSelected())):
logger.warning("[!] Aborting. Please select at least one item from the list!")
return
self.idbctrl.target = self.target.value
outputPath = self.GetControlValue(self.outputPath)
if outputPath == '' or os.path.exists(outputPath):
self.idbctrl.output_path = outputPath
else:
logger.warning("[!!] output path not valid! %r" % outputPath)
self.idbctrl.output_path = None
self.idbctrl.chk_annotate_stackvar_size = self.chkAnnotateStackVars.checked
self.idbctrl.chk_decompile_imports = self.chkDecompileImports.checked
self.idbctrl.chk_decompile_imports_recursive = self.chkDecompileImportsRecursive.checked
self.idbctrl.chk_annotate_xrefs = self.chkAnnotateXrefs.checked
self.idbctrl.chk_decompile_alternative = self.chkDecompileAlternative.checked
logger.debug("[+] config updated")
files_decompiled = []
decompile_main_binary = False
self.idbctrl.init_tempdir()
for _type, name, image_path in self.EChooser.getSelected():
if image_path is self.idbctrl.target_path:
decompile_main_binary = True
continue
try:
self.idbctrl.exec_ida_batch_decompile(target=image_path, output=outputPath,
annotate_stackvar_size=self.idbctrl.chk_annotate_stackvar_size,
annotate_xrefs=self.idbctrl.chk_annotate_xrefs,
imports=self.idbctrl.chk_decompile_imports,
recursive=self.idbctrl.chk_decompile_imports_recursive,
experimental_decomile_cgraph=self.idbctrl.chk_decompile_alternative)
files_decompiled.append(image_path)
except subprocess.CalledProcessError, cpe:
logger.warning("[!] failed to decompile %r - %r" % (image_path, cpe))
self.idbctrl.remove_tempdir()
## process current file
if decompile_main_binary:
# well, loop here even though we know it can only
logger.debug("[+] decompiling current file...")
files_decompiled += self.idbctrl.run() # decompile main binary
logger.info("[+] finished decompiling: %r" % files_decompiled)
logger.info(" output dir: %s" % self.idbctrl.output_path if self.idbctrl.output_path else self.idbctrl.target_dir)
def OnButtonLoad(self, code=0):
self.Close(0)
self.propagateItems(enumerate_other=True, enumerate_imports=True)
self.Execute()
def OnFormChange(self, fid):
# Set initial state
INIT = -1
BTN_OK = -2
if fid == INIT:
self.EnableField(self.target, False)
self.EnableField(self.outputPath, True)
self.EnableField(self.chkDecompileAlternative, False)
elif fid == BTN_OK:
# just return
return True
# Toggle backup checkbox
elif fid == self.chkAnnotateStackVars.id:
self.chkAnnotateStackVars.checked = not self.chkAnnotateStackVars.checked
elif fid == self.chkDecompileImports.id:
self.chkDecompileImports.checked = not self.chkDecompileImports.checked
elif fid == self.chkDecompileImportsRecursive.id:
self.chkDecompileImportsRecursive.checked = not self.chkDecompileImportsRecursive.checked
elif fid == self.chkDecompileAlternative.id:
self.chkDecompileAlternative.checked = not self.chkDecompileAlternative.checked
elif fid == self.chkAnnotateXrefs.id:
self.chkAnnotateXrefs.checked = not self.chkAnnotateXrefs.checked
return False
if idaapi.IDA_SDK_VERSION >= 700:
class IdaDecompileUiActionHandler(idaapi.action_handler_t):
def __init__(self, caller):
idaapi.action_handler_t.__init__(self)
self.caller = caller
def activate(self, ctx):
self.caller.menu_config()
return 1
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
def update(self, ctx):
return idaapi.AST_ENABLE_ALWAYS
class IdaDecompileBatchPlugin(idaapi.plugin_t):
""" IDA Plugin Base"""
flags = idaapi.PLUGIN_FIX
comment = "Batch Decompile"
help = "github.com/tintinweb"
wanted_name = "Ida Batch Decompile"
wanted_hotkey = ""
wanted_menu = "File/Produce file/", "{} ...".format(wanted_name)
wanted_menu_id = 'tintinweb:batchdecompile'
def init(self):
NO_HOTKEY = ""
SETMENU_INS = 0
NO_ARGS = tuple()
logger.debug("[+] %s.init()" % self.__class__.__name__)
self.menuitems = []
logger.debug("[+] setting up menus for ida version %s" % idaapi.IDA_SDK_VERSION)
if idaapi.IDA_SDK_VERSION >= 700:
# >= 700
action_desc = idaapi.action_desc_t("tintinweb:batchdecompile:load", self.wanted_name, IdaDecompileUiActionHandler(self))
idaapi.register_action(action_desc)
idaapi.attach_action_to_menu(''.join(self.wanted_menu), "tintinweb:batchdecompile:load", idaapi.SETMENU_APP)
else:
menu = idaapi.add_menu_item(self.wanted_menu[0],
self.wanted_menu[1],
NO_HOTKEY,
SETMENU_INS,
self.menu_config,
NO_ARGS)
self.menuitems.append(menu)
return idaapi.PLUGIN_KEEP
def run(self, arg=None):
logger.debug("[+] %s.run()" % self.__class__.__name__)
def term(self):
logger.debug("[+] %s.term()" % self.__class__.__name__)
if idaapi.IDA_SDK_VERSION < 700:
for menu in self.menuitems:
idaapi.del_menu_item(menu)
def menu_config(self):
logger.debug("[+] %s.menu_config()" % self.__class__.__name__)
self.idbctrl._init_target() # force target reinit
DecompileBatchForm(self.idbctrl).Execute()
def set_ctrl(self, idbctrl):
logger.debug("[+] %s.set_ctrl(%r)" % (self.__class__.__name__, idbctrl))
self.idbctrl = idbctrl
def PLUGIN_ENTRY(mode=None):
""" check execution mode:
a) as Plugin, return plugin object
b) as script as part of a batch execution, do not spawn plugin object
"""
logging.basicConfig(level=logging.DEBUG,
format="[%(name)s/%(process)s][%(levelname)-10s] [%(module)s.%(funcName)-14s] %(message)s")
logger.setLevel(logging.DEBUG)
# always wait for analysis to finish
logger.debug("[+] initializing IdaDecompileBatchPlugin")
# create our controller interface
idbctrl = IdaDecompileBatchController()
# parse cmdline
if mode == '__main__':
# cmdline mode
if len(idc.ARGV) > 1:
# cmdline batch mode
logger.debug("[+] Mode: commandline")
parser = OptionParser()
parser.add_option("-o", "--output", dest="output",
help="output path")
parser.add_option("-S", "--annotate-stackvar-size",
action="store_true", default=False,
help="Generate stack variable size annotations")
parser.add_option("-X", "--annotate-xrefs",
action="store_true", default=False,
help="Generate xref annotations")
parser.add_option("-I", "--imports",
action="store_true", default=False,
help="try to decompile files referenced in IAT")
parser.add_option("-R", "--recursive",
action="store_true", default=False,
help="Recursive decompile files/imports")
parser.add_option("-Z", "--experimental-decompile-cgraph",
action="store_true", default=False,
help="[experimental] decompile funcs referenced in calltree manually")
options, args = parser.parse_args(idc.ARGV[1:])
# set options
idbctrl.output_path = options.output
idbctrl.chk_annotate_stackvar_size = options.annotate_stackvar_size
idbctrl.chk_annotate_xrefs = options.annotate_xrefs
idbctrl.chk_decompile_imports = options.imports
idbctrl.chk_decompile_imports_recursive = options.recursive
idbctrl.chk_decompile_alternative = options.experimental_decompile_cgraph
# set all the idbctrl checkboxes and files
idbctrl.run()
idc.Exit(0)
# return
logger.debug("[+] Mode: commandline w/o args")
# PluginMode
plugin = IdaDecompileBatchPlugin()
plugin.set_ctrl(idbctrl=idbctrl)
plugin.init()
logger.info("[i] %s loaded, see Menu: %s" % (IdaDecompileBatchPlugin.wanted_name,
IdaDecompileBatchPlugin.wanted_menu))
#plugin.menu_config()
return plugin
else:
logger.debug("[+] Mode: plugin")
# PluginMode
plugin = IdaDecompileBatchPlugin()
plugin.set_ctrl(idbctrl=idbctrl)
return plugin
if __name__ == '__main__':
PLUGIN_ENTRY(mode=__name__)
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,921
|
captainGeech42/daas
|
refs/heads/main
|
/daas/__init__.py
|
import os
from flask import Flask
from .exts import db
from .cmds import db_init
def create_app():
app = Flask(__name__)
app.config["SECRET_KEY"] = os.getenv("FLASK_SECRET_KEY", "goodsecretkeyhere")
app.config["SQLALCHEMY_DATABASE_URI"] = os.getenv("DB_URI", "sqlite:///daas.db")
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
db.init_app(app)
#if not os.path.exists(app.config["SQLALCHEMY_DATABASE_URI"].split("sqlite:///")[1]):
# db.create_all(app)
app.cli.add_command(db_init)
from .auth import auth as auth_blueprint
app.register_blueprint(auth_blueprint)
from .decompile import decompile as decompile_blueprint
app.register_blueprint(decompile_blueprint)
return app
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,922
|
captainGeech42/daas
|
refs/heads/main
|
/daas/cmds.py
|
import base64
from flask import current_app
from flask.cli import with_appcontext
import click
from .exts import db
@click.command("db-init")
@with_appcontext
def db_init():
db.create_all(app=current_app)
click.echo("Initialized SQLite database")
|
{"/daas/decompile.py": ["/daas/models.py", "/daas/auth.py"], "/daas/auth.py": ["/daas/models.py"], "/daas/__init__.py": ["/daas/cmds.py", "/daas/auth.py", "/daas/decompile.py"]}
|
31,923
|
TurbulenceLabs/CexAMM
|
refs/heads/master
|
/cex_model.py
|
import os
import hmac
import time
import hashlib
import requests
from retrying import retry
from datetime import datetime
def retry_if_not_interrupt(exception):
return not isinstance(exception, KeyboardInterrupt)
class AMM_Model(object):
def __init__(self, api_key, secret_key) -> None:
super().__init__()
# account Information
self.api_key = api_key
self.secret_key = secret_key
# headers
self.headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-BH-APIKEY': self.api_key,
}
# host & urls
host = 'https://api.hbtc.com/openapi'
self.urls = {
'account': os.path.join(host, 'v1/account'),
'brokerInfo': os.path.join(host, 'v1/brokerInfo'),
'bookTicker': os.path.join(host, 'quote/v1/ticker/bookTicker'),
'depth': os.path.join(host, 'quote/v1/depth'),
'historyOrders': os.path.join(host, 'v1/historyOrders'),
'order': os.path.join(host, 'v1/order'),
'openOrders': os.path.join(host, 'v1/openOrders'),
'price': os.path.join(host, 'quote/v1/ticker/price'),
'withdrawalOrders': os.path.join(host, 'v1/withdrawalOrders'),
}
# print parameters
self.self.text_colors = {
'logs': '\033[34m', # 033 is the escape code and 34 is the color code
'info': '\033[32m',
'warning': '\033[33m',
'error': '\033[31m',
'bold': '\033[1m',
'end_color': '\033[0m'
}
@retry(retry_on_exception=retry_if_not_interrupt)
def _hbtc_delete_func(self, url, headers={}, params={}):
req = requests.delete(url, headers=headers, params=params).json()
return req
@retry(retry_on_exception=retry_if_not_interrupt)
def _hbtc_get_func(self, url, headers={}, params={}):
req = requests.get(url, headers=headers, params=params).json()
return req
@retry(retry_on_exception=retry_if_not_interrupt)
def _hbtc_post_func(self, url, headers={}, params={}):
# avoid error code -1121
req = requests.post(url, headers=headers, params=params).json()
return req
def _get_signature_sha256(self, params: dict):
data = [f'{item}={str(params[item])}' for item in params.keys()]
signature = hmac.new(self.secret_key.encode('UTF8'), ('&'.join(data)).encode('UTF8'),
digestmod=hashlib.sha256).hexdigest()
return signature
def _get_order_depth(self):
params = {'symbol': self.symbol}
depth = self._hbtc_get_func(self.urls['depth'], params=params)
return depth
def _normalize_shares(self, shares: dict):
sums = sum([item for item in shares.values()])
for key, value in sums:
shares[key] = value / sums
return shares
def _get_price(self, pair: str):
info = self._hbtc_get_func(self.urls['price'], params={'symbol': pair.upper()})
return float(info['price']) if info.get('code') is None else -1
def _check_pair(self, pair):
if isinstance(pair, str):
assert self._get_price(pair) != -1, f'Can\'t find {pair} exchange pair'
return pair
elif isinstance(pair, list):
for item in pair:
assert self._get_price(item) != -1, f'Can\'t find {item} exchange pair'
return pair
elif isinstance(pair, dict):
for item in pair.values():
assert self._get_price(item) != -1, f'Can\'t find {item} exchange pair'
return pair
else:
raise ValueError('Wrong type of pair')
def _query_broker(self, token_name):
symbol = dict()
token_name = token_name.upper()
for item in (self._hbtc_get_func(self.urls['brokerInfo'], params={'type': 'token'})['symbols']):
if item['symbol'] == token_name:
symbol['minPrice'] = float(item['filters'][0]['minPrice'])
symbol['maxPrice'] = float(item['filters'][0]['maxPrice'])
symbol['pricePrecision'] = len(item['filters'][0]['tickSize'].split('.')[1])
symbol['minQty'] = float(item['filters'][1]['minQty'])
symbol['maxQty'] = float(item['filters'][1]['maxQty'])
symbol['quantityPrecision'] = len(item['filters'][1]['stepSize'].split('.')[1])
break
return symbol
def _check_token(self, token_name):
token_names = [token_name] if isinstance(token_name, str) else token_name
for item in token_names:
assert len(self._query_broker(item)) != 0, f'Can\'t find {item}'
return token_name
def _order_temp(self, symbol, side, price, quantity):
params = self._get_params({
'side': side,
'type': 'LIMIT',
'symbol': symbol,
'timeInForce': 'GTC',
'price': round(price, self.symbol_info['pricePrecision']),
'quantity': round(quantity, self.symbol_info['quantityPrecision']),
})
req = self._hbtc_post_func(self.urls['order'], self.headers, params)
return req
def _get_params(self, params={}):
params['timestamp'] = self.timestamp
params['signature'] = self._get_signature_sha256(params)
return params
def _show_order(self, orders):
for order in orders:
print(order)
def _make_order(self, orders):
while orders.qsize():
info = orders.get()
req = self._order_temp(symbol=info[0], side=info[1], price=info[2], quantity=info[3])
print(req)
def _get_steps(self, num, step) -> list:
"""
During step 1, make order as a certain step
:param num: total amounts need to be balance
:param step: int type
:return: a list of sub-money
"""
a, b = divmod(num, step)
c = [round(step, self.symbol_info['quantityPrecision']) for _ in range(int(a))]
b = round(b, self.symbol_info['quantityPrecision'])
if b > self.symbol_info['minQty']:
c += [b if b <= self.symbol_info['maxQty'] else self.symbol_info['maxQty']]
return c
@property
def timestamp(self):
return str(int(time.time() * 1000)) # millisecond -> microsecond
@property
def now(self):
return datetime.now().strftime('%H%M%S')
def query_history_order(self):
orders = self._hbtc_get_func(self.urls['historyOrders'], self.headers, self._get_params())
self._show_order(orders)
def query_withdraw_orders(self):
orders = self._hbtc_get_func(self.urls['withdrawalOrders'], self.headers, self._get_params())
self._show_order(orders)
def query_now_orders(self):
orders = self._hbtc_get_func(self.urls['openOrders'], self.headers, self._get_params())
return orders
def delete_orders(self, orders):
for order in orders:
params = {'orderId': order['orderId']}
req = self._hbtc_delete_func(self.urls['order'], self.headers, self._get_params(params))
print(req)
# -------------------- print functions --------------------
def _get_curr_time_stamp(self):
return time.strftime("%Y-%m-%d %H:%M:%S")
def print_error_message(self, message):
time_stamp = self._get_curr_time_stamp()
error_str = self.self.text_colors['error'] + self.self.text_colors['bold'] + 'ERROR ' + self.self.text_colors[
'end_color']
print('{} - {} - {}'.format(time_stamp, error_str, message))
print('{} - {} - {}'.format(time_stamp, error_str, 'Exiting!!!'))
exit(-1)
def print_log_message(self, message):
time_stamp = self._get_curr_time_stamp()
log_str = self.text_colors['logs'] + self.text_colors['bold'] + 'LOGS ' + self.text_colors['end_color']
print('{} - {} - {}'.format(time_stamp, log_str, message))
def print_warning_message(self, message):
time_stamp = self._get_curr_time_stamp()
warn_str = self.text_colors['warning'] + self.text_colors['bold'] + 'WARNING' + self.text_colors['end_color']
print('{} - {} - {}'.format(time_stamp, warn_str, message))
def print_info_message(self, message):
time_stamp = self._get_curr_time_stamp()
info_str = self.text_colors['info'] + self.text_colors['bold'] + 'INFO ' + self.text_colors['end_color']
print('{} - {} - {}'.format(time_stamp, info_str, message))
|
{"/cex_bivar.py": ["/cex_model.py"]}
|
31,924
|
TurbulenceLabs/CexAMM
|
refs/heads/master
|
/cex_bivar.py
|
import sys
import time
from queue import Queue
from cex_model import AMM_Model
class Bivar(AMM_Model):
def __init__(self, api_key, secret_key,
shares,
first_step=0.01, second_step=0.0,
second_order_depth=5,
symbol_name='GRIN',
) -> None:
super().__init__(api_key, secret_key)
self.symbol_name = self._check_token(symbol_name)
self.symbol = self._check_pair(symbol_name + 'USDT')
self.symbol_info = self._query_broker(self.symbol)
self.account = self.check_account()
if len(self.account) != 2:
self.print_error_message('This procedure only support 2 assets')
self.ratio = 0.0
self.shares = self._normalize_shares(shares)
self.ratio_ab = self.shares[self.symbol_name] / self.shares['USDT']
self.order_book_queue = Queue() # (symbol, side, price, quantity)
self.first_step = first_step
self.second_step = second_step
self.second_order_depth = second_order_depth
self._second_lambda_build()
self.second_orders = list() # symbol, side, price, quantity
self.second_total_orders = 0
self.second_fresh_base()
@property
def total_assets(self):
return sum([float(p['total_usdt_price']) for p in self.account])
@property
def free_assets(self):
return sum([float(p['free_usdt_price']) for p in self.account])
@property
def locked_assets(self):
return sum([float(p['locked_usdt_price']) for p in self.account])
def check_account(self):
assets = [dict() for _ in range(2)]
for item in self._hbtc_get_func(self.urls['account'], self.headers, self._get_params())['balances']:
if item['assetName'] == self.symbol_name:
assets[0] = item
if item['assetName'] == 'USDT':
assets[1] = item
if len(assets[1]) == 1:
assets[1] = {'asset': 'USDT', 'assetId': 'USDT', 'assetName': 'USDT',
'total': '0', 'free': '0', 'locked': '0'}
for asset in assets:
asset['total_usdt_price'] = str(float(asset['total']) * self._get_price_usdt(asset['assetName']))
asset['free_usdt_price'] = str(float(asset['free']) * self._get_price_usdt(asset['assetName']))
asset['locked_usdt_price'] = str(float(asset['locked']) * self._get_price_usdt(asset['assetName']))
return assets
def update_ratio(self):
self.account = self.check_account()
ratio = float(self.account[0]['total_usdt_price']) / float(self.account[1]['total_usdt_price'])
return ratio
def _get_book_price_usdt(self, symbol: str):
book_price_info = {'bidPrice': 1.0, 'askPrice': 1.0}
symbol = symbol.upper()
if symbol != 'USDT':
book_price_info = self._hbtc_get_func(self.urls['bookTicker'], params={'symbol': f'{symbol}USDT'})
return book_price_info
def _get_price_usdt(self, symbol: str):
return self._get_price(f'{symbol}USDT')
def is_best_price(self, order):
order_price, order_quantity = float(order['price']), float(order['origQty'])
order_book = self._get_order_depth()
if order['side'] == 'BUY':
new_price, quantity = float(order_book['bids'][0][0]), float(order_book['bids'][0][1])
else:
new_price, quantity = float(order_book['asks'][0][0]), float(order_book['asks'][0][1])
return (new_price == order_price) and (order_quantity != quantity)
def first_balance_symbol2usdt(self):
side = 'BUY' if self.ratio < self.ratio_ab else 'SELL'
order_step = self.total_assets * self.first_step
symbol_book_info = self._get_book_price_usdt(self.account[0]['assetName'])
symbol_quantity, usdt = float(self.account[0]['total']), float(self.account[1]['total'])
price = float(symbol_book_info['bidPrice']) if side == 'BUY' else float(symbol_book_info['askPrice'])
delta_qty = abs((self.ratio_ab * usdt - price * symbol_quantity) / (price * (1 + self.ratio_ab)))
operation_assets = self._get_steps(price * delta_qty, order_step)[0:1]
for item in operation_assets:
self.order_book_queue.put((self.symbol, side, price, item / price))
self.print_log_message(f'Step 1: ${self.total_assets}, ratio: {self.ratio}, order: {(len(operation_assets))}')
self._make_order(self.order_book_queue)
def _second_delete_orders(self, price_idxes):
orders = list()
_, prices, _ = self._second_price_idx2info(price_idxes)
for history_order in self.query_now_orders():
for price in prices:
if float(history_order['price']) == float(round(price, self.symbol_info['pricePrecision'])):
orders.append(history_order)
break
self.delete_orders(orders)
def _second_lambda_build(self):
# bp: base price, bq: base_quantity
self.new_price = lambda bp, j, step: round(bp * pow(1 + step, j), self.symbol_info['pricePrecision'])
# rate: a token / (a token + USDT)
# self.delta_qty = lambda bq, step, rate, j: round(
# bq * pow(1 + step * rate, j - 1) * step * rate * (1 - rate) / pow(1 + step, j),
# self.symbol_info['quantityPrecision'])
self.delta_qty = lambda bq, step, ratio_ab, j: round(abs(pow(1 + step, j) - 1) * bq / (1 + ratio_ab),
self.symbol_info['quantityPrecision'])
def _second_make_orders(self, price_idxes):
sides, prices, delta_qties = self._second_price_idx2info(price_idxes)
for idx1, idx2 in ([[i, -i - 1] for i in range(len(sides) // 2)][::-1]):
self.order_book_queue.put((self.symbol, sides[idx1], prices[idx1], delta_qties[idx1]))
self.order_book_queue.put((self.symbol, sides[idx2], prices[idx2], delta_qties[idx2]))
self._make_order(self.order_book_queue)
def _second_price_idx2info(self, price_idxes):
sides, prices, delta_qtys = list(), list(), list()
for price_idx in price_idxes:
side = 'SELL' if price_idx > (sum(self.second_idx_list) / len(self.second_idx_list)) else 'BUY'
price = self.new_price(self.second_base_price, price_idx, self.second_step)
delta_qty = self.delta_qty(self.second_base_qty, self.second_step, self.ratio_ab, price_idx)
sides.append(side)
prices.append(price)
delta_qtys.append(delta_qty)
return sides, prices, delta_qtys
def second_fresh_base(self):
self.second_total_orders = 0
order_book = self._get_order_depth()
self.second_base_price = (float(order_book['bids'][0][0]) + float(order_book['asks'][0][0])) * 0.5
self.second_base_qty = float(self.account[0]['total'])
self.second_idx_list = list(range(self.second_order_depth, 0, -1)) + list(
range(-1, -self.second_order_depth - 1, -1))
def second_get_now_order_idxes(self):
history_orders = self.query_now_orders()
if len(history_orders) == len(self.second_idx_list):
return list()
history_prices = sorted([float(order['price']) for order in history_orders])
complete_order_idxes = list()
for idx, prc_idx in enumerate(self.second_idx_list):
flag = True
for his_prc in history_prices:
_tem = round(self.new_price(self.second_base_price, prc_idx, self.second_step),
self.symbol_info['pricePrecision'])
if his_prc == _tem:
flag = False
break
if flag:
complete_order_idxes.append(prc_idx)
return complete_order_idxes
def second_fresh_idx_list(self, complete_order_idxes: list):
if len(complete_order_idxes) == 0:
return
elif len(complete_order_idxes) == 2 * self.second_order_depth:
new_order_idxes = self.second_idx_list
delete_order_idxes = list()
else:
if sum(complete_order_idxes) > (sum(self.second_idx_list) / len(self.second_idx_list)):
standard_index = complete_order_idxes[0]
order_idxes = list(range(standard_index + self.second_order_depth, standard_index, -1))
order_idxes += list(range(standard_index - 1, standard_index - 1 - self.second_order_depth, -1))
elif sum(complete_order_idxes) < (sum(self.second_idx_list) / len(self.second_idx_list)):
standard_index = complete_order_idxes[-1]
order_idxes = list(range(standard_index + self.second_order_depth, standard_index, -1))
order_idxes += list(range(standard_index - 1, standard_index - self.second_order_depth - 1, -1))
else:
order_idxes = self.second_idx_list
cur_orders = set(self.second_idx_list) - set(complete_order_idxes)
new_order_idxes = sorted(list(set(order_idxes) - cur_orders), reverse=True)
delete_order_idxes = sorted(list(cur_orders - set(order_idxes)), reverse=True)
self.second_idx_list = order_idxes
self.print_log_message(f'Step 2: ${self.total_assets}, ratio: {self.ratio} order: {len(self.second_idx_list)}')
self._second_delete_orders(delete_order_idxes)
self._second_make_orders(new_order_idxes)
self.second_total_orders += len(new_order_idxes)
if __name__ == '__main__':
# params
api_key, secret_key = sys.argv[1], sys.argv[2]
# ------------------------- Manual Parameters ---------------------------
# token information
symbol_name = 'GRIN'
shares = {'GRIN': 7, 'USDT': 3}
# hyper parameters
balance_ratio_condition = 0.20
first_step, second_step = 0.005, 0.01 # total asset as USDT ratio
second_order_depth = 5
second_total_orders_threshold = 100
second_restart_time = '03000'
# -----------------------------------------------------------------------
# Initiate monitor
bivar = Bivar(api_key=api_key, secret_key=secret_key,
shares=shares,
first_step=first_step, second_step=second_step,
second_order_depth=second_order_depth,
symbol_name=symbol_name,
)
bivar.print_info_message(f'CexAMM is standing by!!!!!!!!')
# AMM condition
if bivar.total_assets < 500:
bivar.print_error_message("Charge some USDT! BABY!!")
bivar.ratio = bivar.update_ratio()
orders = bivar.query_now_orders()
bivar.print_log_message(f'CexAMM is completed! ${bivar.total_assets}, ratio: {bivar.ratio}, order: {len(orders)}')
# restart step2
if abs(bivar.ratio_ab - bivar.ratio) < balance_ratio_condition:
bivar.delete_orders(orders)
# Main Procedure
while True:
# Condition 1
if abs(bivar.ratio_ab - bivar.ratio) >= balance_ratio_condition:
if len(orders) == 0:
bivar.first_balance_symbol2usdt()
elif len(orders) > 1 or (not bivar.is_best_price(orders[0])):
bivar.delete_orders(orders)
bivar.first_balance_symbol2usdt()
# Condition 2
else:
if (len(orders) <= 1) or (bivar.now[:5] == second_restart_time) or (
bivar.second_total_orders >= second_total_orders_threshold):
if bivar.now[:5] == second_restart_time:
time.sleep(10)
bivar.delete_orders(orders)
bivar.second_fresh_base()
idxes = bivar.second_idx_list
else:
idxes = bivar.second_get_now_order_idxes()
bivar.second_fresh_idx_list(idxes)
# restart information
time.sleep(1)
bivar.ratio = bivar.update_ratio()
orders = bivar.query_now_orders()
|
{"/cex_bivar.py": ["/cex_model.py"]}
|
31,925
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/admin.py
|
# -*- encoding: utf-8 -*-
from django.contrib import admin
from django.contrib.auth.models import Group
from User.models import UserData
admin.site.unregister(Group)
@admin.register(UserData)
class CustomUserAdmin(admin.ModelAdmin):
list_display = (
'id',
'first_name',
'last_name',
'email',
'document_number',
)
search_fields = ('first_name', 'last_name', 'email', 'document_number')
add_fieldsets = (
('Información básica', {
'fields': (
'first_name',
'last_name',
'document_type',
'document_number',
),
}),
# ('Información de acceso', {
# 'fields': (
# 'email',
# 'password1',
# 'password2',
# ),
# }),
)
fieldsets = (
('Información de acceso', {
'fields': (
'email',
# 'password',
),
}),
('Información básica', {
'fields': (
'first_name',
'last_name',
'document_type',
'document_number',
'document_expedition',
),
}),
('Información personal', {
'fields': (
'genre',
'nationality',
'birth_date',
'department',
'city',
'address',
'mobile_phone',
),
})
)
ordering = ('first_name', 'last_name')
def get_actions(self, request):
actions = super(CustomUserAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def get_readonly_fields(self, request, obj=None):
read_only_fields = super(CustomUserAdmin, self).get_readonly_fields(
request, obj
)
return read_only_fields + ('last_login', 'date_joined')
def has_delete_permission(self, request, obj=None):
return False
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,926
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/models.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
# from django.contrib.auth.models import AbstractBaseUser
# from django.contrib.auth.models import BaseUserManager
# from django.contrib.auth.models import PermissionsMixin
# from django.core.mail import send_mail
from django.db import models
class UserData(models.Model):
username = models.CharField(
unique=True,
verbose_name='Nombre de usuario',
max_length=100,
)
user_password = models.CharField(
unique=True,
verbose_name='Password de usuario',
max_length=100,
)
# class UserDataComplete(models.Model):
REQUIRED_FIELDS = [
'first_name',
'last_name',
'document_type',
'document_number',
]
DOCUMENT_CHOICES = (
(10, 'Cedula de ciudadanía'),
(20, 'Cedula de extranjería'),
(30, 'Pasaporte'),
)
GENRE_CHOICES = (
(10, 'Masculino'),
(20, 'Femenino'),
(30, 'Otro'),
)
email = models.EmailField(
unique=True,
verbose_name='Correo electrónico',
)
first_name = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Nombres',
)
last_name = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Apellidos',
)
document_type = models.PositiveSmallIntegerField(
choices=DOCUMENT_CHOICES,
verbose_name='Tipo de documento',
)
document_number = models.CharField(
max_length=64,
verbose_name='Número de documento',
)
document_expedition = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Lugar de expedición',
)
nationality = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Nacionalidad',
)
birth_date = models.DateField(
blank=True,
null=True,
verbose_name='Fecha de nacimiento',
)
department = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Departamento',
)
city = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Ciudad',
)
address = models.CharField(
blank=True,
null=True,
max_length=128,
verbose_name='Dirección',
)
mobile_phone = models.CharField(
blank=True,
null=True,
max_length=64,
verbose_name='Teléfono Celular',
)
genre = models.PositiveSmallIntegerField(
blank=True,
null=True,
choices=GENRE_CHOICES,
verbose_name='Genero',
)
date_joined = models.DateTimeField(
auto_now_add=True,
verbose_name='Fecha de registro',
)
def get_short_name(self):
return self.first_name
def get_full_name(self):
return '{0} {1}'.format(self.first_name, self.last_name)
# def email_user(self, subject, message, from_email=None, **kwargs):
# send_mail(subject, message, from_email, [self.email], **kwargs)
class Meta:
verbose_name = 'usuario'
verbose_name_plural = 'usuarios'
unique_together = ('document_type', 'document_number')
ordering = ('first_name', 'last_name')
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,927
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/bin/django-admin.py
|
#!/Users/ivanramirez/Documents/ivan/Corrosive-the-punk-rooster-/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,928
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/models.py
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from adminsortable.models import SortableMixin
class Team(SortableMixin):
name = models.CharField(
blank=True,
max_length=40,
verbose_name='Nombre del equipo',
)
CATEGORY_CHOICES = (
(10, 'A'),
(20, 'B'),
)
category = models.PositiveSmallIntegerField(
choices=CATEGORY_CHOICES,
verbose_name='Categoría de la institución',
)
logo = models.ImageField(
blank=True,
max_length=255,
verbose_name='Fotografía',
)
description = models.CharField(
max_length=400,
blank=True,
verbose_name="Descripción del equipo",
)
def __str__(self):
return self.name
class Meta:
ordering = ['category']
verbose_name = 'Equipo'
verbose_name_plural = 'Equipos'
# Create your models here.
class Picture(models.Model):
name = models.CharField(
blank=True,
max_length=255,
verbose_name='Nombre del equipo',
)
image = models.ImageField(
blank=True,
max_length=255,
verbose_name="Imagenes",
)
team = models.ForeignKey(
Team,
)
def __str__(self):
return self.name
class Meta:
ordering = ['-id']
verbose_name = 'Imagen'
verbose_name_plural = 'Imagenes'
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,929
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/migrations/0006_auto_20160425_1950.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-04-25 19:50
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('User', '0005_auto_20160424_2045'),
]
operations = [
migrations.DeleteModel(
name='UserDataComplete',
),
migrations.AlterModelOptions(
name='userdata',
options={'ordering': ('first_name', 'last_name'), 'verbose_name': 'usuario', 'verbose_name_plural': 'usuarios'},
),
migrations.AddField(
model_name='userdata',
name='address',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Direcci\xf3n'),
),
migrations.AddField(
model_name='userdata',
name='birth_date',
field=models.DateField(blank=True, null=True, verbose_name='Fecha de nacimiento'),
),
migrations.AddField(
model_name='userdata',
name='city',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Ciudad'),
),
migrations.AddField(
model_name='userdata',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 25, 19, 49, 42, 131357, tzinfo=utc), verbose_name='Fecha de registro'),
preserve_default=False,
),
migrations.AddField(
model_name='userdata',
name='department',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Departamento'),
),
migrations.AddField(
model_name='userdata',
name='document_expedition',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Lugar de expedici\xf3n'),
),
migrations.AddField(
model_name='userdata',
name='document_number',
field=models.CharField(default=1212, max_length=64, verbose_name='N\xfamero de documento'),
preserve_default=False,
),
migrations.AddField(
model_name='userdata',
name='document_type',
field=models.PositiveSmallIntegerField(choices=[(10, 'Cedula de ciudadan\xeda'), (20, 'Cedula de extranjer\xeda'), (30, 'Pasaporte')], default=10, verbose_name='Tipo de documento'),
preserve_default=False,
),
migrations.AddField(
model_name='userdata',
name='email',
field=models.EmailField(default='hola@hola.com', max_length=254, unique=True, verbose_name='Correo electr\xf3nico'),
preserve_default=False,
),
migrations.AddField(
model_name='userdata',
name='first_name',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Nombres'),
),
migrations.AddField(
model_name='userdata',
name='genre',
field=models.PositiveSmallIntegerField(blank=True, choices=[(10, 'Masculino'), (20, 'Femenino'), (30, 'Otro')], null=True, verbose_name='Genero'),
),
migrations.AddField(
model_name='userdata',
name='last_name',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Apellidos'),
),
migrations.AddField(
model_name='userdata',
name='mobile_phone',
field=models.CharField(blank=True, max_length=64, null=True, verbose_name='Tel\xe9fono Celular'),
),
migrations.AddField(
model_name='userdata',
name='nationality',
field=models.CharField(blank=True, max_length=128, null=True, verbose_name='Nacionalidad'),
),
migrations.AlterUniqueTogether(
name='userdata',
unique_together=set([('document_type', 'document_number')]),
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,930
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/admin.py
|
from django.contrib import admin
from cms.models import Team
from cms.models import Picture
# from cms.forms import TeamUseradminForm
from adminsortable.admin import SortableAdmin
class Pictureadmin(admin.TabularInline):
model = Picture
extra = 1
fields = (
'name',
'image',
)
ordering = (
'id',
)
@admin.register(Team)
class TeamUseradmin(SortableAdmin):
model = Team
inlines = [Pictureadmin]
list_display = (
'name',
'logo',
'description',
)
search_fields = (
'name',
'logo',
'description',
)
list_filter = (
'name',
)
ordering = (
'id',
)
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,931
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-14 20:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logo', models.ImageField(blank=True, max_length=255, null=True, upload_to=b'', verbose_name='Fotograf\xeda')),
],
options={
'verbose_name': 'Team',
'verbose_name_plural': 'Teams',
},
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,932
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/forms.py
|
from django import forms
from cms.models import Team
class TeamUseradminForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TeamUseradminForm, self).__init__(*args, **kwargs)
self.args = args
def clean(self):
cleaned_data = super(TeamUseradminForm, self).clean()
return cleaned_data
class Meta:
model = Team
fields = [
'name',
'logo',
'description',
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,933
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/views.py
|
# -*- encoding: utf-8 -*-
from cms.models import Team
from django.views.generic import ListView
# Create your views here.
#
#
class TeamListView(ListView):
model = Team
def get_query_set(self):
return Team.objects.all()
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,934
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/urls.py
|
# -*- encoding: utf-8 -*-
from django.conf.urls import url
from cms.views import TeamListView
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
url(r'^teams/$',TeamListView.as_view(),name='team_list'),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,935
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/migrations/0002_auto_20160318_1641.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-18 16:41
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, verbose_name='Nombre del equipo')),
('image', models.ImageField(blank=True, max_length=255, upload_to=b'', verbose_name='Imagenes')),
],
options={
'ordering': ['id'],
'verbose_name': 'Imagen',
'verbose_name_plural': 'Imagenes',
},
),
migrations.AlterModelOptions(
name='team',
options={'ordering': ['id'], 'verbose_name': 'Equipo', 'verbose_name_plural': 'Equipos'},
),
migrations.AddField(
model_name='team',
name='description',
field=models.CharField(blank=True, max_length=400, verbose_name='Descripci\xf3n del equipo'),
),
migrations.AddField(
model_name='team',
name='name',
field=models.CharField(blank=True, max_length=40, verbose_name='Nombre del equipo'),
),
migrations.AlterField(
model_name='team',
name='logo',
field=models.ImageField(blank=True, default='', max_length=255, upload_to=b'', verbose_name='Fotograf\xeda'),
preserve_default=False,
),
migrations.AddField(
model_name='picture',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cms.Team'),
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,936
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/migrations/0003_auto_20160318_1717.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-18 17:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0002_auto_20160318_1641'),
]
operations = [
migrations.AlterModelOptions(
name='picture',
options={'ordering': ['name'], 'verbose_name': 'Imagen', 'verbose_name_plural': 'Imagenes'},
),
migrations.AlterModelOptions(
name='team',
options={'ordering': ['name'], 'verbose_name': 'Equipo', 'verbose_name_plural': 'Equipos'},
),
migrations.AddField(
model_name='team',
name='category',
field=models.PositiveSmallIntegerField(choices=[(10, 'A'), (20, 'B')], default=20, verbose_name='Categor\xeda de la instituci\xf3n'),
preserve_default=False,
),
migrations.AlterField(
model_name='picture',
name='name',
field=models.CharField(blank=True, max_length=255, verbose_name='Nombre del equipo'),
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,937
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/migrations/0005_auto_20160424_2045.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-25 01:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('User', '0004_auto_20160423_0025'),
]
operations = [
migrations.CreateModel(
name='UserDataComplete',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Correo electr\xf3nico')),
('first_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Nombres')),
('last_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Apellidos')),
('document_type', models.PositiveSmallIntegerField(choices=[(10, 'Cedula de ciudadan\xeda'), (20, 'Cedula de extranjer\xeda'), (30, 'Pasaporte')], verbose_name='Tipo de documento')),
('document_number', models.CharField(max_length=64, verbose_name='N\xfamero de documento')),
('document_expedition', models.CharField(blank=True, max_length=128, null=True, verbose_name='Lugar de expedici\xf3n')),
('nationality', models.CharField(blank=True, max_length=128, null=True, verbose_name='Nacionalidad')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Fecha de nacimiento')),
('department', models.CharField(blank=True, max_length=128, null=True, verbose_name='Departamento')),
('city', models.CharField(blank=True, max_length=128, null=True, verbose_name='Ciudad')),
('address', models.CharField(blank=True, max_length=128, null=True, verbose_name='Direcci\xf3n')),
('mobile_phone', models.CharField(blank=True, max_length=64, null=True, verbose_name='Tel\xe9fono Celular')),
('genre', models.PositiveSmallIntegerField(blank=True, choices=[(10, 'Masculino'), (20, 'Femenino'), (30, 'Otro')], null=True, verbose_name='Genero')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de registro')),
],
options={
'ordering': ('first_name', 'last_name'),
'verbose_name': 'usuario',
'verbose_name_plural': 'usuarios',
},
),
migrations.AlterModelOptions(
name='userdata',
options={},
),
migrations.AlterUniqueTogether(
name='userdata',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='userdatacomplete',
unique_together=set([('document_type', 'document_number')]),
),
migrations.RemoveField(
model_name='userdata',
name='address',
),
migrations.RemoveField(
model_name='userdata',
name='birth_date',
),
migrations.RemoveField(
model_name='userdata',
name='city',
),
migrations.RemoveField(
model_name='userdata',
name='company_address',
),
migrations.RemoveField(
model_name='userdata',
name='company_city',
),
migrations.RemoveField(
model_name='userdata',
name='company_department',
),
migrations.RemoveField(
model_name='userdata',
name='company_name',
),
migrations.RemoveField(
model_name='userdata',
name='company_position',
),
migrations.RemoveField(
model_name='userdata',
name='date_joined',
),
migrations.RemoveField(
model_name='userdata',
name='department',
),
migrations.RemoveField(
model_name='userdata',
name='document_expedition',
),
migrations.RemoveField(
model_name='userdata',
name='document_number',
),
migrations.RemoveField(
model_name='userdata',
name='document_type',
),
migrations.RemoveField(
model_name='userdata',
name='email',
),
migrations.RemoveField(
model_name='userdata',
name='first_name',
),
migrations.RemoveField(
model_name='userdata',
name='genre',
),
migrations.RemoveField(
model_name='userdata',
name='home_phone',
),
migrations.RemoveField(
model_name='userdata',
name='is_active',
),
migrations.RemoveField(
model_name='userdata',
name='is_staff',
),
migrations.RemoveField(
model_name='userdata',
name='last_name',
),
migrations.RemoveField(
model_name='userdata',
name='mobile_phone',
),
migrations.RemoveField(
model_name='userdata',
name='nationality',
),
migrations.RemoveField(
model_name='userdata',
name='work_phone',
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,938
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/cms/migrations/0004_auto_20160321_2147.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-22 02:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cms', '0003_auto_20160318_1717'),
]
operations = [
migrations.AlterModelOptions(
name='picture',
options={'ordering': ['-id'], 'verbose_name': 'Imagen', 'verbose_name_plural': 'Imagenes'},
),
migrations.AlterModelOptions(
name='team',
options={'ordering': ['category'], 'verbose_name': 'Equipo', 'verbose_name_plural': 'Equipos'},
),
migrations.AlterField(
model_name='picture',
name='image',
field=models.ImageField(blank=True, max_length=255, upload_to='', verbose_name='Imagenes'),
),
migrations.AlterField(
model_name='team',
name='logo',
field=models.ImageField(blank=True, max_length=255, upload_to='', verbose_name='Fotografía'),
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,939
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/urls.py
|
# -*- encoding: utf-8 -*-
from django.conf.urls import url
from User.views import UserPageView, SuccessLoginView, SuccessSignUpView, SuccessLogoutView, HomePageView
# from django.contrib.auth import
from . import views
urlpatterns = [
url(r'^$', HomePageView.as_view(), name='home_view'),
url(r'^users', UserPageView.as_view(), name='user_list'),
url(r'^success-signup', SuccessSignUpView.as_view(), name='success-signup'),
url(r'^success-loguin', SuccessLoginView.as_view(), name='success-loguin'),
url(r'^success-logout', SuccessLogoutView.as_view(), name='success-logout'),
url(r'^login/$', views.authentication, name='authentication'),
url(r'^create', views.create_data, name='create_data'),
url(r'^signup', UserPageView.as_view(), name='signup'),
url(r'^home', HomePageView.as_view(), name='home_view'),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,940
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/forms.py
|
from django import forms
from User.models import UserData
class RegistrationForm(forms.ModelForm):
class Meta:
model = UserData
fields = "__all__"
# def __init__(self, *args, **kwargs):
# super(RegistrationForm, self).__init__(*args, **kwargs)
# cleaned_data = super(RegistrationForm, self).clean()
# return cleaned_data
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,941
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/views.py
|
from django.shortcuts import render, redirect
from django.views.generic import ListView, TemplateView
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth import logout
from User.forms import RegistrationForm
from User.models import UserData
from .mixins import LoginRequiredMixin
def authentication(request):
if request.method == 'POST':
action = request.POST.get('action', None)
username = request.POST.get('username', None)
password = request.POST.get('password', None)
if action == 'signup':
user = User.objects.create_user(username=username, password=password)
user.save()
return redirect('/success-signup')
elif action == 'login':
user = authenticate(username=username, password=password)
login(request, user)
return redirect('/success-loguin')
elif action == 'logout':
print 'hellow mtf'
logout(request)
return redirect('/success-logout')
return render(request, 'login.html', {})
def create_data(request):
if request.method == 'POST':
myForm = RegistrationForm(request.POST)
if myForm.is_valid():
myForm.save()
return redirect('/success-loguin')
else:
myForm = RegistrationForm()
return render(request, 'user_form.html', {'myForm': myForm})
class UserPageView(ListView):
model = UserData
template_name = "user_list.html"
def get_queryset(self):
return User.objects.all()
class HomePageView(ListView):
model = UserData
template_name = "home.html"
class SuccessLoginView(LoginRequiredMixin, TemplateView):
model = UserData
template_name = "success_login.html"
def get_queryset(self):
return UserData.objects.all()
class SuccessSignUpView(TemplateView):
template_name = "success_sign_up.html"
class SuccessLogoutView(TemplateView):
template_name = "success_logout.html"
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,942
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/User/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-03 13:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='Correo electr\xf3nico')),
('first_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Nombres')),
('last_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Apellidos')),
('document_type', models.PositiveSmallIntegerField(choices=[(10, 'Cedula de ciudadan\xeda'), (20, 'Cedula de extranjer\xeda'), (30, 'Pasaporte')], verbose_name='Tipo de documento')),
('document_number', models.CharField(max_length=64, verbose_name='N\xfamero de documento')),
('document_expedition', models.CharField(blank=True, max_length=128, null=True, verbose_name='Lugar de expedici\xf3n')),
('nationality', models.CharField(blank=True, max_length=128, null=True, verbose_name='Nacionalidad')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Fecha de nacimiento')),
('department', models.CharField(blank=True, max_length=128, null=True, verbose_name='Departamento')),
('city', models.CharField(blank=True, max_length=128, null=True, verbose_name='Ciudad')),
('address', models.CharField(blank=True, max_length=128, null=True, verbose_name='Direcci\xf3n')),
('mobile_phone', models.CharField(blank=True, max_length=64, null=True, verbose_name='Tel\xe9fono Celular')),
('home_phone', models.CharField(blank=True, max_length=64, null=True, verbose_name='Tel\xe9fono Fijo')),
('genre', models.PositiveSmallIntegerField(blank=True, choices=[(10, 'Masculino'), (20, 'Femenino'), (30, 'Otro')], null=True, verbose_name='Genero')),
('company_name', models.CharField(blank=True, max_length=128, null=True, verbose_name='Empresa')),
('company_department', models.CharField(blank=True, max_length=128, null=True, verbose_name='Departamento')),
('company_city', models.CharField(blank=True, max_length=128, null=True, verbose_name='Ciudad')),
('company_address', models.CharField(blank=True, max_length=128, null=True, verbose_name='Direcci\xf3n')),
('company_position', models.CharField(blank=True, max_length=128, null=True, verbose_name='Cargo')),
('work_phone', models.CharField(blank=True, max_length=64, null=True, verbose_name='Tel\xe9fono')),
('is_staff', models.BooleanField(default=False, help_text='Indica si puede entrar al sitio de administraci\xf3n.', verbose_name='Administrador')),
('is_active', models.BooleanField(default=False, help_text='Indica si el usuario puede ser tratado como activo.', verbose_name='Activo')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='Fecha de registro')),
],
options={
'ordering': ('first_name', 'last_name'),
'verbose_name': 'usuario',
'verbose_name_plural': 'usuarios',
},
),
migrations.AlterUniqueTogether(
name='user',
unique_together=set([('document_type', 'document_number')]),
),
]
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,943
|
ivancarrancho/Corrosive-the-punk-rooster-
|
refs/heads/master
|
/app/urls.py
|
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls import include
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('User.urls', namespace='user')),
url(r'', include('cms.urls', namespace='cms')),
]
# No se que hace esto
# + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
{"/User/admin.py": ["/User/models.py"], "/cms/admin.py": ["/cms/models.py"], "/cms/forms.py": ["/cms/models.py"], "/cms/views.py": ["/cms/models.py"], "/cms/urls.py": ["/cms/views.py"], "/User/urls.py": ["/User/views.py"], "/User/forms.py": ["/User/models.py"]}
|
31,948
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/models.py
|
from django.db import models
from django.db.models.fields import CharField
from actors.models import Actor
from django.contrib.auth.models import User
class Genre(models.Model):
"""
Action
ScienceFiction
Horror
Musical
"""
name = CharField(max_length=20)
def __str__(self) -> str:
return self.name
class Movie(models.Model):
name = models.CharField(max_length=100)
year = models.DateField(blank=True)
genre = models.ForeignKey(Genre, on_delete=models.CASCADE)
actors = models.ManyToManyField(Actor, related_name="movies")
created_at = models.DateTimeField(blank=False, null=False)
user = models.ForeignKey(User, blank=True, null=True, on_delete=models.SET_NULL)
"Blade Runner -> ScienceFiction"
def __str__(self) -> str:
return f"{self.name} - {self.year}"
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,949
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/actors/apps.py
|
from django.apps import AppConfig
class ActorsConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'actors'
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,950
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/migrations/0002_alter_movie_actors.py
|
# Generated by Django 3.2.4 on 2021-06-09 18:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actors', '0002_alter_actor_birthday'),
('movies', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='movie',
name='actors',
field=models.ManyToManyField(related_name='movies', to='actors.Actor'),
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,951
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/views.py
|
from django.forms import models
from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from .models import Movie, Genre
from datetime import date
from .forms import AddMovieForm, FeedbackForm
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView
from actors.models import Actor
from django.utils import timezone
# Create your views here.
class MovieListView(ListView):
model = Movie
context_object_name = 'movies'
def movies_list(request):
return render(request, 'movies/movies_list.html', {'movies': Movie.objects.all()})
def feedback_view(request):
if request.method == "POST":
form = FeedbackForm(request.POST)
if form.is_valid():
return HttpResponse("<p>Thx you for your feedback</p>")
else:
return HttpResponse(form.errors)
else:
return render(request, 'movies/feedback.html', {'form': FeedbackForm()})
def index(request):
return render(request, 'movies/index.html')
@login_required
def movie_new(request):
if request.method == "POST":
form = AddMovieForm(request.POST)
if form.is_valid():
movie = form.save(commit=False)
movie.created_at = timezone.now()
movie.user = request.user
movie.save()
return HttpResponse("<h3>Movie was saved</h3>")
print(form.errors)
return HttpResponse("<h3>Error</h3>")
else:
form = AddMovieForm()
movies = Movie.objects.all()
context = {'form': form, 'objects': movies}
return render(request, 'movies/create_movie_form.html', context)
@login_required
def movie_added_me(request):
return render(request, 'movies/movies_list.html', {'movies': Movie.objects.all().filter(user=request.user)})
# if request.method == "POST":
# form = AddMovieForm(request.POST)
# if form.is_valid():
# movie = form.save(commit=False)
# movie.created_at = timezone.now()
# movie.user = request.user
# movie.save()
# return HttpResponse("<h3>Movie was saved</h3>")
# print(form.errors)
# return HttpResponse("<h3>Error</h3>")
# else:
# form = AddMovieForm()
# movies = Movie.objects.all()
# context = {'form': form, 'objects': movies}
# return render(request, 'movies/create_movie_form.html', context)
def add_actor(request):
mell = Actor.objects.get(first_name="Mell")
mell.movies.all()
new_movie = Movie(name="Lalaland", year=date(2019, 2, 4), genre=Genre.objects.get(pk=1))
new_movie.save()
new_movie.actors.add(mell)
new_movie.save()
return HttpResponse("Mell was added")
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,952
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/migrations/0001_initial.py
|
# Generated by Django 3.2.4 on 2021-06-09 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('actors', '0002_alter_actor_birthday'),
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('year', models.DateField(blank=True)),
('actors', models.ManyToManyField(to='actors.Actor')),
('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='movies.genre')),
],
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,953
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/models.py
|
from django.db import models
from django.urls import reverse
# Create your models here.
class Exercises(models.Model):
title = models.CharField(max_length=255, verbose_name='title')
content = models.TextField(blank=True, verbose_name='content')
photo = models.ImageField(upload_to="photos/", verbose_name='photo')
time_create = models.DateTimeField(auto_now_add=True)
time_update = models.DateTimeField(auto_now=True)
is_published = models.BooleanField(default=True)
cat = models.ForeignKey('Category',
on_delete=models.PROTECT)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post', kwargs={'post_id': self.pk})
class Meta:
verbose_name = 'My exercises'
verbose_name_plural = 'My exercises'
ordering = ['id']
class Category(models.Model):
name = models.CharField(max_length=100, db_index=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('category', kwargs={'cat_id': self.pk})
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
ordering = ['id']
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,954
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/apps.py
|
from django.apps import AppConfig
class ItstepConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'itstep'
verbose_name = 'It Step'
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,955
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/migrations/0003_rename_cat_id_exercises_cat.py
|
# Generated by Django 3.2.4 on 2021-06-13 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('itstep', '0002_auto_20210613_1534'),
]
operations = [
migrations.RenameField(
model_name='exercises',
old_name='cat_id',
new_name='cat',
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,956
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/actors/admin.py
|
from django.contrib import admin
from django.db import models
from .models import Actor
# Register your models here.
@admin.register(Actor)
class ActorAdmin(admin.ModelAdmin):
pass
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,957
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/urls.py
|
from django.urls import path
from .views import add_actor, movie_new, feedback_view, index, MovieListView, movies_list, movie_added_me
urlpatterns = [
path("", index),
path("add-movie", movie_new, name="add-movie"),
path("movie-list", movies_list, name="movie-list"),
path("feedback", feedback_view),
path("movie-added-me", movie_added_me, name="movie-added-me"),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,958
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/migrations/0004_alter_exercises_cat.py
|
# Generated by Django 3.2.4 on 2021-06-13 19:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('itstep', '0003_rename_cat_id_exercises_cat'),
]
operations = [
migrations.AlterField(
model_name='exercises',
name='cat',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='itstep.category'),
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,959
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/actors/migrations/0002_alter_actor_birthday.py
|
# Generated by Django 3.2.4 on 2021-06-09 16:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('actors', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='actor',
name='birthday',
field=models.DateField(),
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,960
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/templatetags/itstep_tags.py
|
from django import template
from itstep.models import *
register = template.Library()
@register.simple_tag(name='getcats')
def get_categories():
return Category.objects.all()
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,961
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/admin.py
|
from django.contrib import admin
from django.db.models import fields
from django.forms.models import ModelForm
from .models import Genre, Movie
from django import forms
# Register your models here.
@admin.register(Genre)
class GenreAdmin(admin.ModelAdmin):
pass
class MovieAdminForm(forms.ModelForm):
class Meta:
model = Movie
fields = "__all__"
exclude = ["year"]
# def clean_name(self):
# if self.cleaned_data["name"] == self.cleaned_data["name"].upper():
# raise forms.ValidationError("Empty field")
# return self.cleaned_data["name"]
@admin.register(Movie)
class MovieAdmin(admin.ModelAdmin):
list_display = ("name", "year", "genre", "actors_count")
list_filter = ("year",)
search_fields = ("name",)
form = MovieAdminForm
def actors_count(self, obj):
actors_count = obj.actors.all().count()
return actors_count
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,962
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/views.py
|
from django.http.response import Http404
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseNotFound
from .models import *
# Create your views here.
menu = [
{'title': "About the site", 'url_name': 'about'},
{'title': "Add articl", 'url_name': 'add_page'},
{'title': "Feedback", 'url_name': 'contact'},
{'title': "Login", 'url_name': 'login'},
]
def index(request):
posts = Exercises.objects.all()
context = {
'posts': posts,
'menu': menu,
'title': 'Main page',
'cat_selected': 0,
}
return render(request, 'itstep/index.html', context=context)
def about(request):
return render(request, 'itstep/about.html', {'menu': menu, 'title': 'About the site'})
def add_page(request):
return HttpResponse(f'add_page')
def contact(request):
return HttpResponse(f'contact')
def login(request):
return HttpResponse(f'login')
def show_post(request, post_id):
return HttpResponse(f'Archive by {post_id}')
def show_category(request, cat_id):
posts = Exercises.objects.filter(cat_id=cat_id)
if len(posts) == 0:
raise Http404()
context = {
'posts': posts,
'menu': menu,
'title': 'Main page',
'cat_selected': cat_id,
}
return render(request, 'itstep/index.html', context=context)
def pageNotFound(request, exception):
return HttpResponseNotFound('<h1>Page not found :(</h1>')
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,963
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/movies/forms.py
|
from django import forms
from django.db.models import fields
from .models import Movie
class AddMovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = "__all__"
exclude = ["created_at", "user"]
class FeedbackForm(forms.Form):
text = forms.CharField(label="You feedback here:", help_text="HELP TEXT", max_length=30)
name = forms.CharField(label="Your name", max_length=40)
email = forms.EmailField(label="Enter your email")
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,964
|
SergMagpie/my_project
|
refs/heads/master
|
/hollywood/actors/models.py
|
from django.db import models
class Actor(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=40)
birthday = models.DateField()
height = models.PositiveIntegerField()
def __str__(self) -> str:
return f"{self.first_name} {self.last_name}"
# CRUD
# C - create
# R - read
# U - update
# D - delete
# lt,gt, lte, gte - less than, greater than,
# 185 195 включительно
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,965
|
SergMagpie/my_project
|
refs/heads/master
|
/my_site/itstep/migrations/0002_auto_20210613_1534.py
|
# Generated by Django 3.2.4 on 2021-06-13 12:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('itstep', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=100)),
],
),
migrations.AddField(
model_name='exercises',
name='cat_id',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='itstep.category'),
),
]
|
{"/hollywood/movies/views.py": ["/hollywood/movies/models.py", "/hollywood/movies/forms.py"], "/hollywood/actors/admin.py": ["/hollywood/actors/models.py"], "/hollywood/movies/urls.py": ["/hollywood/movies/views.py"], "/hollywood/movies/admin.py": ["/hollywood/movies/models.py"], "/my_site/itstep/views.py": ["/my_site/itstep/models.py"], "/hollywood/movies/forms.py": ["/hollywood/movies/models.py"]}
|
31,971
|
s045pd/detector
|
refs/heads/main
|
/task.py
|
import sqlite3
import urllib3
import yagmail
from celery import Celery, platforms
from requests_html import requests
from config import Config
from log import success, warning
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
platforms.C_FORCE_ROOT = True
celery_db = f"{Config.name}_worker.db"
sqlite3.connect(celery_db)
app_common = Celery(Config.name, broker=f"sqla+sqlite:///{celery_db}")
@app_common.task(shared=False)
def feishu(text: str, msg_type: str = "text", api_url: str = Config.feishu_API) -> None:
requests.post(api_url, json={"msg_type": msg_type, "content": {"text": text}})
@app_common.task(shared=False)
def mail_to(
content: str,
subject: str = "",
usr: str = Config.mail_usr,
pwd: str = Config.mail_pwd,
host: str = Config.mail_host,
port: int = Config.mail_port,
targets: list = None,
) -> None:
targets = Config.mail_targets
if not targets:
targets = [usr]
warning(
f"收到邮件发送任务 发件人: [{usr}] 主机: [{host}:{port}] 目标: [{targets}] 内容: [{len(content)}]"
)
with yagmail.SMTP(
user=usr,
password=pwd,
port=port,
smtp_ssl=False,
smtp_skip_login=False if pwd else True,
soft_email_validation=False,
host=host if host else "smtp." + usr.split("@")[-1],
) as client:
client.send(to=targets, subject=subject, contents=content)
success(
f"成功发送 发件人: [{usr}] 主机: [{host}:{port}] 目标: [{targets}] 内容: [{len(content)}]"
)
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
31,972
|
s045pd/detector
|
refs/heads/main
|
/log.py
|
import logging
from termcolor import colored
from config import Config
logging.basicConfig(format="[%(asctime)s] %(message)s", level=logging.INFO)
loger = logging.getLogger(Config.name)
def base_msg(txt, color, tag="[-]"):
loger.info(f"{tag}{colored(txt, color)}")
def info(txt):
base_msg(txt, "blue", "[*]")
def success(txt):
base_msg(txt, "green", "[+]")
def warning(txt):
base_msg(txt, "yellow", "[=]")
def error(txt):
base_msg(txt, "red", "[x]")
if __name__ == "__main__":
info("blue")
success("green")
warning("yellow")
error("red")
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
31,973
|
s045pd/detector
|
refs/heads/main
|
/common.py
|
import hashlib
from contextlib import contextmanager
from dataclasses import dataclass
try:
from replit import db
except Exception as e:
import pickledb
from config import Config
db = pickledb.load(f"{Config.name}_datas.db", True)
@contextmanager
def db_action():
db = pickledb.load(f"{Config.name}_datas.db", False)
try:
yield db
finally:
db.dump()
def hash_code(strs):
return hashlib.new("md5", strs.encode()).hexdigest()
@dataclass
class DB:
def add(self, data: dict):
"""
数据添加
"""
key = hash_code(data["url"])
last_data = db.get(key)
db[key] = data
return not bool(last_data)
def delete(self, key: str):
"""
数据删除
"""
try:
del db[key]
except:
pass
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
31,974
|
s045pd/detector
|
refs/heads/main
|
/main.py
|
import datetime
import json
import logging
import sys
from dataclasses import dataclass, field
from urllib.parse import urljoin
import urllib3
import yagmail
from celery import Celery
from requests_html import HTMLSession
from tqdm import tqdm
import config
from common import DB
from log import info, success, warning
from task import feishu, mail_to
DEBUG = len(sys.argv) == 2 and sys.argv[1] == "debug"
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if sys.version_info[0] == 2:
sys.version_info.major
sys.setdefaultencoding("utf-8")
# now_time = datetime.datetime.today().strftime('Y%.m%.%d')
@dataclass
class Detector:
report_url: str = "https://m.aliyun.com/doc/notice_list/9213612.html"
# email_from = '信息安全管理部<xxx@xxx.com>'
# mail_tmplate = '<br>安全漏洞预警</br><a href={url}><br>{title}</br>{body}'
def __post_init__(self):
"""
初始化Session
"""
self.session = HTMLSession()
self.session.headers = {
"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Mobile Safari/537.36"
}
self.session.verify = False
self.tb = DB()
def get_data(self):
"""
获得基础数据
"""
resp = self.session.get(self.report_url)
for tag_a in tqdm(list(resp.html.pq("div.xs-content > a").items())):
url = urljoin(resp.url, tag_a.attr("href"))
tag_date, tag_title = tag_a("div")
self.save_data(
{
"date": tag_date.text,
"title": tag_title.text,
"url": url,
"content": self.get_content(url),
}
)
def get_content(self, url):
"""
获得详情
"""
resp = self.session.get(url)
datas = {}
last_key = None
for tag_p in resp.html.pq("div#se-knowledge > p").items():
key = tag_p("strong").text()
text = tag_p.text().strip()
if key:
last_key = key
elif text and last_key:
if last_key not in datas:
datas[last_key] = ""
datas[last_key] += f"{text}\n"
elif not text:
last_key = None
return datas
def save_data(self, data):
"""
存储数据并判断是否已存在
"""
if self.tb.add(data) or DEBUG:
self.notice(data)
def notice(self, data):
"""
发起预警
"""
if datetime.datetime.today().strftime("%m-%d") != data["date"] and not DEBUG:
return
title, body = self.format_msg(data)
success(f"notice {title}")
feishu.delay(f"{title}\n\n{body}")
mail_to.delay(body, title)
# mail_to(body, title)
def format_msg(self, data):
"""
整理数据成消息
"""
# body = "body"
title = f'{data["title"]}\n'
body = f"漏洞等级: {data['content'].get('漏洞评级','未知').strip().split()[-1]}\n参考链接: <a href='{data['url']}' target='_blank'></a> {data['url']}"
return title, body
if __name__ == "__main__":
Detector().get_data()
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
31,975
|
s045pd/detector
|
refs/heads/main
|
/config.py
|
class Config:
name = "CVEMonitor"
feishu_API = "xxxxx"
mail_usr = "xxxxx"
mail_pwd = "xxxxxx"
mail_host = "xxxxx"
mail_port = 25
mail_targets = [
"xxxxxxx",
"xxxxxxxx",
]
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
31,976
|
s045pd/detector
|
refs/heads/main
|
/scheduler.py
|
import sqlite3
from datetime import timedelta
from celery import Celery, platforms
from celery.schedules import crontab
from config import Config
from main import Detector
platforms.C_FORCE_ROOT = True
celery_db = f"{Config.name}_scheduler.db"
sqlite3.connect(celery_db)
app_run = Celery(Config.name, broker=f"sqla+sqlite:///{celery_db}")
app_run.conf.update(
beat_schedule={
"定时检测": {
"task": "scheduler.run_check",
# "schedule": crontab(hour=9, minute=00),
"schedule": timedelta(seconds=60),
}
}
)
@app_run.task()
def run_check():
Detector().get_data()
|
{"/task.py": ["/config.py", "/log.py"], "/log.py": ["/config.py"], "/common.py": ["/config.py"], "/main.py": ["/config.py", "/common.py", "/log.py", "/task.py"], "/scheduler.py": ["/config.py", "/main.py"]}
|
32,036
|
changyaochen/rbo
|
refs/heads/master
|
/tests/test.py
|
"""
This module contains some test cases.
"""
import string
import numpy as np
import pytest
from rbo.rbo import RankingSimilarity
TESTS = [
# Sanity checks
(string.ascii_lowercase, string.ascii_lowercase, 1.0),
(string.ascii_lowercase, string.ascii_lowercase[:7], 1.0),
("abcde", "fghij", 0.0),
# RBO Paper Figure 5
("abcdefg", "zcavwxy", 0.312),
# Source: https://ragrawal.wordpress.com/2013/01/18/comparing-ranked-list/
("abcde", "bacde", 0.8),
("abcde", "abced", 0.95),
# One-Element lists
("a", "a", 1.0),
("a", "b", 0),
# Empty lists
("", "", 1),
("a", "", 0),
("", "a", 0),
]
@pytest.mark.parametrize("list_1, list_2, expected", TESTS)
def test_rbo(list_1: list, list_2: list, expected: float):
"""
Args:
list_1: List 1.
list_2: List 2.
expected: Expected RBO.
Returns:
None
"""
p = 0.95 # pylint: disable=invalid-name
list_1, list_2 = list(list_1), list(list_2)
print("List 1 is: {}".format(list_1))
print("List 2 is: {}".format(list_2))
rs_object = RankingSimilarity(list_1, list_2, verbose=True)
rbo = rs_object.rbo(p=1.0)
print("The implemented Average Overlap is: {:6.3f}".format(rbo))
print("The correct answer is: {:6.3f}".format(expected))
assert np.round(rbo, decimals=3) == expected
print("The implemented rbo_ext 1 is: {:6.3f}".format(
rs_object.rbo(p=p, k=3, ext=True)))
print("The implemented rbo_ext 2 is: {:6.3f}".format(
rs_object.rbo_ext(p=p)))
|
{"/tests/test.py": ["/rbo/rbo.py"], "/rbo/__init__.py": ["/rbo/rbo.py"]}
|
32,037
|
changyaochen/rbo
|
refs/heads/master
|
/rbo/rbo.py
|
# pylint: disable=C0103, R0914, R0201
"""Main module for rbo."""
from typing import List, Optional, Union
import numpy as np
from tqdm import tqdm
class RankingSimilarity:
"""
This class will include some similarity measures between two different
ranked lists.
"""
def __init__(
self,
S: Union[List, np.ndarray],
T: Union[List, np.ndarray],
verbose: bool = False,
) -> None:
"""
Initialize the object with the required lists.
Examples of lists:
S = ["a", "b", "c", "d", "e"]
T = ["b", "a", 1, "d"]
Both lists reflect the ranking of the items of interest, for example,
list S tells us that item "a" is ranked first, "b" is ranked second,
etc.
Args:
S, T (list or numpy array): lists with alphanumeric elements. They
could be of different lengths. Both of the them should be
ranked, i.e., each element"s position reflects its respective
ranking in the list. Also we will require that there is no
duplicate element in each list.
verbose: If True, print out intermediate results. Default to False.
"""
assert type(S) in [list, np.ndarray]
assert type(T) in [list, np.ndarray]
assert len(S) == len(set(S))
assert len(T) == len(set(T))
self.S, self.T = S, T
self.N_S, self.N_T = len(S), len(T)
self.verbose = verbose
self.p = 0.5 # just a place holder
def assert_p(self, p: float) -> None:
"""Make sure p is between (0, 1), if so, assign it to self.p.
Args:
p (float): The value p.
"""
assert 0.0 < p < 1.0, "p must be between (0, 1)"
self.p = p
def _bound_range(self, value: float) -> float:
"""Bounds the value to [0.0, 1.0]."""
try:
assert (0 <= value <= 1 or np.isclose(1, value))
return value
except AssertionError:
print("Value out of [0, 1] bound, will bound it.")
larger_than_zero = max(0.0, value)
less_than_one = min(1.0, larger_than_zero)
return less_than_one
def rbo(
self,
k: Optional[float] = None,
p: float = 1.0,
ext: bool = False,
) -> float:
"""
This the weighted non-conjoint measures, namely, rank-biased overlap.
Unlike Kendall tau which is correlation based, this is intersection
based.
The implementation if from Eq. (4) or Eq. (7) (for p != 1) from the
RBO paper: http://www.williamwebber.com/research/papers/wmz10_tois.pdf
If p = 1, it returns to the un-bounded set-intersection overlap,
according to Fagin et al.
https://researcher.watson.ibm.com/researcher/files/us-fagin/topk.pdf
The fig. 5 in that RBO paper can be used as test case.
Note there the choice of p is of great importance, since it
essentially control the "top-weightness". Simply put, to an extreme,
a small p value will only consider first few items, whereas a larger p
value will consider more items. See Eq. (21) for quantitative measure.
Args:
k: The depth of evaluation.
p: Weight of each agreement at depth d:
p**(d-1). When set to 1.0, there is no weight, the rbo returns
to average overlap.
ext: If True, we will extrapolate the rbo, as in Eq. (23).
Returns:
The rbo at depth k (or extrapolated beyond).
"""
if not self.N_S and not self.N_T:
return 1 # both lists are empty
if not self.N_S or not self.N_T:
return 0 # one list empty, one non-empty
if k is None:
k = float("inf")
k = min(self.N_S, self.N_T, k)
# initialize the agreement and average overlap arrays
A, AO = [0] * k, [0] * k
if p == 1.0:
weights = [1.0 for _ in range(k)]
else:
self.assert_p(p)
weights = [1.0 * (1 - p) * p**d for d in range(k)]
# using dict for O(1) look up
S_running, T_running = {self.S[0]: True}, {self.T[0]: True}
A[0] = 1 if self.S[0] == self.T[0] else 0
AO[0] = weights[0] if self.S[0] == self.T[0] else 0
for d in tqdm(range(1, k), disable=~self.verbose):
tmp = 0
# if the new item from S is in T already
if self.S[d] in T_running:
tmp += 1
# if the new item from T is in S already
if self.T[d] in S_running:
tmp += 1
# if the new items are the same, which also means the previous
# two cases did not happen
if self.S[d] == self.T[d]:
tmp += 1
# update the agreement array
A[d] = 1.0 * ((A[d - 1] * d) + tmp) / (d + 1)
# update the average overlap array
if p == 1.0:
AO[d] = ((AO[d - 1] * d) + A[d]) / (d + 1)
else: # weighted average
AO[d] = AO[d - 1] + weights[d] * A[d]
# add the new item to the running set (dict)
S_running[self.S[d]] = True
T_running[self.T[d]] = True
if ext and p < 1:
return self._bound_range(AO[-1] + A[-1] * p**k)
return self._bound_range(AO[-1])
def rbo_ext(self, p=0.98):
"""
This is the ultimate implementation of the rbo, namely, the
extrapolated version. The corresponding formula is Eq. (32) in the rbo
paper.
"""
self.assert_p(p)
if not self.N_S and not self.N_T:
return 1 # both lists are empty
if not self.N_S or not self.N_T:
return 0 # one list empty, one non-empty
# since we are dealing with un-even lists, we need to figure out the
# long (L) and short (S) list first. The name S might be confusing
# but in this function, S refers to short list, L refers to long list
if len(self.S) > len(self.T):
L, S = self.S, self.T
else:
S, L = self.S, self.T
s, l = len(S), len(L) # noqa
# initialize the overlap and rbo arrays
# the agreement can be simply calculated from the overlap
X, A, rbo = [0] * l, [0] * l, [0] * l
# first item
S_running, L_running = {S[0]}, {L[0]} # for O(1) look up
X[0] = 1 if S[0] == L[0] else 0
A[0] = X[0]
rbo[0] = 1.0 * (1 - p) * A[0]
# start the calculation
disjoint = 0
ext_term = A[0] * p
for d in tqdm(range(1, l), disable=~self.verbose):
if d < s: # still overlapping in length
S_running.add(S[d])
L_running.add(L[d])
# again I will revoke the DP-like step
overlap_incr = 0 # overlap increment at step d
# if the new items are the same
if S[d] == L[d]:
overlap_incr += 1
else:
# if the new item from S is in L already
if S[d] in L_running:
overlap_incr += 1
# if the new item from L is in S already
if L[d] in S_running:
overlap_incr += 1
X[d] = X[d - 1] + overlap_incr
# Eq. (28) that handles the tie. len() is O(1)
A[d] = 2.0 * X[d] / (len(S_running) + len(L_running))
rbo[d] = rbo[d - 1] + 1.0 * (1 - p) * (p**d) * A[d]
ext_term = 1.0 * A[d] * p**(d + 1) # the extrapolate term
else: # the short list has fallen off the cliff
L_running.add(L[d]) # we still have the long list
# now there is one case
overlap_incr = 1 if L[d] in S_running else 0
X[d] = X[d - 1] + overlap_incr
A[d] = 1.0 * X[d] / (d + 1)
rbo[d] = rbo[d - 1] + 1.0 * (1 - p) * (p**d) * A[d]
X_s = X[s - 1] # this the last common overlap
# second term in first parenthesis of Eq. (32)
disjoint += 1.0 * (1 - p) * (p**d) * \
(X_s * (d + 1 - s) / (d + 1) / s)
ext_term = 1.0 * ((X[d] - X_s) / (d + 1) + X[s - 1] / s) * \
p**(d + 1) # last term in Eq. (32)
return self._bound_range(rbo[-1] + disjoint + ext_term)
def top_weightness(
self,
p: Optional[float] = None,
d: Optional[int] = None):
"""
This function will evaluate the degree of the top-weightness of the
rbo. It is the implementation of Eq. (21) of the rbo paper.
As a sanity check (per the rbo paper),
top_weightness(p=0.9, d=10) should be 86%
top_weightness(p=0.98, d=50) should be 86% too
Args:
p (float), default None: A value between zero and one.
d (int), default None: Evaluation depth of the list.
Returns:
A float between [0, 1], that indicates the top-weightness.
"""
# sanity check
self.assert_p(p)
if d is None:
d = min(self.N_S, self.N_T)
else:
d = min(self.N_S, self.N_T, int(d))
if d == 0:
top_w = 1
elif d == 1:
top_w = 1 - 1 + 1.0 * (1 - p) / p * (np.log(1.0 / (1 - p)))
else:
sum_1 = 0
for i in range(1, d):
sum_1 += 1.0 * p**(i) / i
top_w = 1 - p**(i) + 1.0 * (1 - p) / p * (i + 1) * \
(np.log(1.0 / (1 - p)) - sum_1) # here i == d-1
if self.verbose:
print("The first {} ranks have {:6.3%} of the weight of "
"the evaluation.".format(d, top_w))
return self._bound_range(top_w)
|
{"/tests/test.py": ["/rbo/rbo.py"], "/rbo/__init__.py": ["/rbo/rbo.py"]}
|
32,038
|
changyaochen/rbo
|
refs/heads/master
|
/rbo/__init__.py
|
"""Main module for rbo."""
from .rbo import RankingSimilarity
|
{"/tests/test.py": ["/rbo/rbo.py"], "/rbo/__init__.py": ["/rbo/rbo.py"]}
|
32,039
|
methadoom/PetFriendsPytest
|
refs/heads/main
|
/settings.py
|
valid_email = "vlad15.08@bk.ru"
valid_password = "qwe123"
|
{"/tests/PetFriendsTest.py": ["/settings.py"]}
|
32,040
|
methadoom/PetFriendsPytest
|
refs/heads/main
|
/tests/PetFriendsTest.py
|
from api import PetFriends
from settings import valid_email, valid_password
import os
pf = PetFriends()
def test_get_api_key_for_valid_user(email=valid_email, password=valid_password):
status, result = pf.get_api_key(email, password)
assert status == 200
assert 'key' in result
def test_get_all_pets_with_valid_key(filter=''):
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.get_list_of_pets(auth_key, filter)
assert status == 200
assert len(result['pets']) > 0
def test_add_new_pet_with_valid_data(name='Кот', animal_type='Кот', age='1', pet_photo='images\cat1.jpg'):
pet_photo = os.path.join(os.path.dirname(__file__), pet_photo)
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.add_new_pet(auth_key, name, animal_type, age, pet_photo)
assert status == 200
assert result['name'] == name
def test_successful_update_self_pet_info(name='Пёс', animal_type='Кошка', age=5):
_, auth_key = pf.get_api_key(valid_email, valid_password)
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
if len(my_pets['pets']) > 0:
status, result = pf.update_pet_info(auth_key, my_pets['pets'][0]['id'], name, animal_type, age)
assert status == 200
assert result['name'] == name
else:
raise Exception("There is no my pets")
def test_successful_delete_self_pet():
_, auth_key = pf.get_api_key(valid_email, valid_password)
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
if len(my_pets['pets']) == 0:
pf.add_new_pet(auth_key, "Пёс", "Кошка", "5", "images\cat1.jpg")
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
pet_id = my_pets['pets'][0]['id']
status, _ = pf.delete_pet(auth_key, pet_id)
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
assert status == 200
assert pet_id not in my_pets.values()
# 1 удаление несуществующего животного
def test_delete_non_existent_self_pet():
_, auth_key = pf.get_api_key(valid_email, valid_password)
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
if len(my_pets['pets']) == 0:
pf.add_new_pet(auth_key, "Кот", "Кот", "1", "images\P1040103.jpg")
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
pet_id = my_pets['pets'][1]['id']
status, _ = pf.delete_pet(auth_key, pet_id)
_, my_pets = pf.get_list_of_pets(auth_key, "my_pets")
assert status == 200
assert pet_id not in my_pets.values()
# 2 загрузить видео вместо фото питомца
def test_add_new_pet_with_video_image(name='Вася', animal_type='Кот', age='3', pet_photo='images\кот.mp4'):
pet_photo = os.path.join(os.path.dirname(__file__), pet_photo)
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.add_new_pet(auth_key, name, animal_type, age, pet_photo)
assert status == 500
# 3 назвать питомца спец символами
def test_add_new_pet_with_invalid_name(name='!"№;%:?*', animal_type='Кот', age='1', pet_photo='images\cat1.jpg'):
pet_photo = os.path.join(os.path.dirname(__file__), pet_photo)
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.add_new_pet(auth_key, name, animal_type, age, pet_photo)
assert status == 400
# 4 возраст питомца буквами
def test_add_new_pet_with_invalid_age(name='Кот', animal_type='Кот', age='один год', pet_photo='images\cat1.jpg'):
pet_photo = os.path.join(os.path.dirname(__file__), pet_photo)
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.add_new_pet(auth_key, name, animal_type, age, pet_photo)
assert status == 400
# 5 попробоват изменить питомца не из списка my_pets
def test_successful_update_self_pet_info(filter ='', name='Пёс', animal_type='Кошка', age=5):
_, auth_key = pf.get_api_key(valid_email, valid_password)
status, result = pf.get_list_of_pets(auth_key, filter)
if len(result['pets']) > 0:
status, result = pf.update_pet_info(auth_key, filter[0]['id'], name, animal_type, age)
assert status == 400
|
{"/tests/PetFriendsTest.py": ["/settings.py"]}
|
32,043
|
fxia22/lcp-physics
|
refs/heads/master
|
/lcp_physics/physics/__init__.py
|
import lcp_physics.physics.bodies
import lcp_physics.physics.collisions
import lcp_physics.physics.constraints
import lcp_physics.physics.engines
import lcp_physics.physics.forces
import lcp_physics.physics.utils
import lcp_physics.physics.world
from .utils import Params
from .bodies import Body, Circle, Rect, Hull
from .world import World, run_world
from .forces import gravity, ExternalForce
from .constraints import Joint, FixedJoint, XConstraint, YConstraint, RotConstraint, TotalConstraint
__all__ = ['bodies', 'collisions', 'constraints', 'engine', 'forces', 'utils',
'world', 'Params', 'Body', 'Circle', 'Rect', 'Hull', 'World', 'run_world',
'gravity', 'ExternalForce', 'Joint', 'FixedJoint', 'XConstraint',
'YConstraint', 'RotConstraint', 'TotalConstraint']
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,044
|
fxia22/lcp-physics
|
refs/heads/master
|
/lcp_physics/lcp/lcp.py
|
from enum import Enum
import torch
from torch.autograd import Function
from .solvers import batch_pdipm as pdipm_b
from .util import bger, expandParam, extract_nBatch
class LCPSolvers(Enum):
PDIPM_BATCHED = 1
class LCPFunction(Function):
def __init__(self, eps=1e-12, verbose=-1, notImprovedLim=3,
maxIter=10, solver=LCPSolvers.PDIPM_BATCHED):
super().__init__()
self.eps = eps
self.verbose = verbose
self.notImprovedLim = notImprovedLim
self.maxIter = maxIter
self.solver = solver
self.Q_LU = self.S_LU = self.R = None
def forward(self, Q_, p_, G_, h_, A_, b_, F_):
# TODO Write detailed documentation.
"""Solve a batch of mixed LCPs.
"""
nBatch = extract_nBatch(Q_, p_, G_, h_, A_, b_)
Q, _ = expandParam(Q_, nBatch, 3)
p, _ = expandParam(p_, nBatch, 2)
G, _ = expandParam(G_, nBatch, 3)
h, _ = expandParam(h_, nBatch, 2)
A, _ = expandParam(A_, nBatch, 3)
b, _ = expandParam(b_, nBatch, 2)
F, _ = expandParam(F_, nBatch, 3)
_, nineq, nz = G.size()
neq = A.size(1) if A.ndimension() > 0 else 0
assert(neq > 0 or nineq > 0)
self.neq, self.nineq, self.nz = neq, nineq, nz
if self.solver == LCPSolvers.PDIPM_BATCHED:
self.Q_LU, self.S_LU, self.R = pdipm_b.pre_factor_kkt(Q, G, F, A)
zhats, self.nus, self.lams, self.slacks = pdipm_b.forward(
Q, p, G, h, A, b, F, self.Q_LU, self.S_LU, self.R,
self.eps, self.verbose, self.notImprovedLim,
self.maxIter, solver=pdipm_b.KKTSolvers.LU_PARTIAL)
else:
assert False
# self.verify_lcp(zhats, Q, G, A, F, p, h)
self.save_for_backward(zhats, Q_, p_, G_, h_, A_, b_, F_)
return zhats
def backward(self, dl_dzhat):
zhats, Q, p, G, h, A, b, F = self.saved_tensors
nBatch = extract_nBatch(Q, p, G, h, A, b)
Q, Q_e = expandParam(Q, nBatch, 3)
p, p_e = expandParam(p, nBatch, 2)
G, G_e = expandParam(G, nBatch, 3)
h, h_e = expandParam(h, nBatch, 2)
A, A_e = expandParam(A, nBatch, 3)
b, b_e = expandParam(b, nBatch, 2)
F, F_e = expandParam(F, nBatch, 3)
neq, nineq, nz = self.neq, self.nineq, self.nz
# D = torch.diag((self.lams / self.slacks).squeeze(0)).unsqueeze(0)
d = self.lams / self.slacks
pdipm_b.factor_kkt(self.S_LU, self.R, d)
dx, _, dlam, dnu = pdipm_b.solve_kkt(self.Q_LU, d, G, A, self.S_LU,
dl_dzhat, torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, nineq).type_as(G),
torch.zeros(nBatch, neq).type_as(G))
dps = dx
dGs = (bger(dlam, zhats) + bger(self.lams, dx))
if G_e:
dGs = dGs.mean(0).squeeze(0)
dFs = (bger(dlam, self.lams) + bger(self.lams, dlam))
# dFs = torch.ones(dFs.size()).double()
if F_e:
assert False # TODO
dhs = -dlam
if h_e:
dhs = dhs.mean(0).squeeze(0)
if neq > 0:
dAs = bger(dnu, zhats) + bger(self.nus, dx)
dbs = -dnu
if A_e:
dAs = dAs.mean(0).squeeze(0)
if b_e:
dbs = dbs.mean(0).squeeze(0)
else:
dAs, dbs = None, None
dQs = 0.5 * (bger(dx, zhats) + bger(zhats, dx))
if Q_e:
dQs = dQs.mean(0).squeeze(0)
grads = (dQs, dps, dGs, dhs, dAs, dbs, dFs)
return grads
def verify_lcp(self, zhats, Q, G, A, F, p, h):
epsilon = 1e-7
c1 = (self.slacks >= 0).all()
c2 = (self.lams >= 0).all()
c3 = (torch.abs(self.slacks * self.lams) < epsilon).all()
conds = c1 and c2 and c3
l1 = Q.matmul(zhats.unsqueeze(2)) + G.transpose(1, 2).matmul(self.lams.unsqueeze(2)) \
+ p.unsqueeze(2)
if A.dim() > 0:
l1 += A.transpose(1, 2).matmul(self.nus.unsqueeze(2))
# XXX Flipped signs for G*z. Why?
l2 = -G.matmul(zhats.unsqueeze(2)) + F.matmul(self.lams.unsqueeze(2)) \
+ h.unsqueeze(2) - self.slacks.unsqueeze(2)
l3 = A.matmul(zhats.unsqueeze(2)) if A.dim() > 0 else torch.Tensor([0])
lcp = (torch.abs(l1) < epsilon).all() and (torch.abs(l2) < epsilon).all() \
and (torch.abs(l3) < epsilon).all()
if not conds:
print('Complementarity conditions have imprecise solution.')
if not lcp:
print('LCP has imprecise solution.')
return conds and lcp
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,045
|
fxia22/lcp-physics
|
refs/heads/master
|
/tests/test_batch.py
|
import unittest
from lcp_physics.physics.bodies import Circle, Rect
from lcp_physics.physics.constraints import TotalConstraint
from lcp_physics.physics.forces import ExternalForce, gravity
from lcp_physics.physics.utils import Params
from lcp_physics.physics.world import BatchWorld, run_world
TIME = 20
DT = Params.DEFAULT_DT
class TestBatch(unittest.TestCase):
def setUp(self):
# Run without displaying
self.screen = None
# Run with display
# import pygame
# pygame.init()
# width, height = 1000, 600
# self.screen = pygame.display.set_mode((width, height), pygame.DOUBLEBUF)
# pygame.display.set_caption('2D Engine')
# self.screen.set_alpha(None)
def testBatch(self):
pass # No batch world for now
# # World 1
# bodies1 = []
# joints1 = []
#
# r = Rect([500, 300], [900, 10])
# bodies1.append(r)
# joints1.append(TotalConstraint(r))
#
# c = Circle([100, 100], 30)
# bodies1.append(c)
# c.add_force(ExternalForce(gravity, multiplier=100))
#
# # World 2
# bodies2 = []
# joints2 = []
#
# r = Rect([500, 300], [900, 10])
# bodies2.append(r)
# joints2.append(TotalConstraint(r))
#
# c = Circle([100, 100], 30)
# bodies2.append(c)
# c.add_force(ExternalForce(gravity, multiplier=100))
#
# # World 3
# bodies3 = []
# joints3 = []
#
# r = Rect([500, 300], [900, 10])
# bodies3.append(r)
# joints3.append(TotalConstraint(r))
#
# c = Circle([25, 200], 30)
# bodies3.append(c)
# c.add_force(ExternalForce(gravity, multiplier=100))
#
# world = BatchWorld([bodies1, bodies2, bodies3], [joints1, joints2, joints3],
# dt=DT)
# run_world(world, run_time=TIME, screen=self.screen)
if __name__ == '__main__':
unittest.main()
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,046
|
fxia22/lcp-physics
|
refs/heads/master
|
/lcp_physics/physics/forces.py
|
from torch.autograd import Variable
from .utils import Params, wrap_variable
Tensor = Params.TENSOR_TYPE
def gravity(t):
return ExternalForce.DOWN
def vert_impulse(t):
if t < 0.1:
return ExternalForce.DOWN
else:
return ExternalForce.ZEROS
def hor_impulse(t):
if t < 0.1:
return ExternalForce.RIGHT
else:
return ExternalForce.ZEROS
def rot_impulse(t):
if t < 0.1:
return ExternalForce.ROT
else:
return ExternalForce.ZEROS
class ExternalForce:
# Pre-store basic forces
DOWN = Variable(Tensor([0, 0, 1]))
RIGHT = Variable(Tensor([0, 1, 0]))
ROT = Variable(Tensor([1, 0, 0]))
ZEROS = Variable(Tensor([0, 0, 0]))
def __init__(self, force_func=gravity, multiplier=100.):
self.multiplier = wrap_variable(multiplier)
self.force = lambda t: force_func(t) * self.multiplier
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,047
|
fxia22/lcp-physics
|
refs/heads/master
|
/lcp_physics/physics/engines.py
|
"""
Author: Filipe de Avila Belbute Peres
Based on: M. B. Cline, Rigid body simulation with contact and constraints, 2002
"""
import torch
from torch.autograd import Variable
from scipy.sparse.csc import csc_matrix
from scipy.sparse.linalg.dsolve.linsolve import splu, spsolve
import numpy as np
from .utils import Params, binverse
from lcp_physics.lcp.lcp import LCPFunction
Tensor = Params.TENSOR_TYPE
class Engine:
def solve_dynamics(self, world, dt, stabilization=False):
raise NotImplementedError
def batch_solve_dynamics(self, world, dt, stabilization=False):
raise NotImplementedError
class PdipmEngine(Engine):
def __init__(self):
self.lcp_solver = LCPFunction
self.cached_inverse = None
def solve_dynamics(self, world, dt, stabilization=False):
t = world.t
# Get Jacobians
Je = world.Je()
Jc = None
neq = Je.size(0) if Je.ndimension() > 0 else 0
f = world.apply_forces(t)
u = torch.matmul(world.M(), world.get_v()) + dt * f
if neq > 0:
u = torch.cat([u, Variable(Tensor(neq).zero_())])
if not world.collisions:
# No contact constraints, no need to solve LCP
if neq > 0:
P = torch.cat([torch.cat([world.M(), -Je.t()], dim=1),
torch.cat([Je, Variable(Tensor(neq, neq).zero_())],
dim=1)])
else:
P = world.M()
if self.cached_inverse is None:
# try:
inv = torch.inverse(P)
if world.static_inverse:
self.cached_inverse = inv
# except RuntimeError:
# # XXX
# print('\nRegularizing singular matrix.\n')
# inv = torch.inverse(P + Variable(torch.eye(P.size(0),
# P.size(1)).type_as(P.data) * 1e-10))
else:
inv = self.cached_inverse
x = torch.matmul(inv, u) # Eq. 2.41
else:
# Solve Mixed LCP (Kline 2.7.2)
# TODO Organize
Jc = world.Jc()
v = torch.matmul(Jc, world.get_v()) * world.restitutions()
TM = world.M().unsqueeze(0)
if neq > 0:
TJe = Je.unsqueeze(0)
b = Variable(Tensor(Je.size(0)).unsqueeze(0).zero_())
else:
TJe = Variable(Tensor())
b = Variable(None)
TJc = Jc.unsqueeze(0)
Tu = u[:world.M().size(0)].unsqueeze(0)
Tv = v.unsqueeze(0)
E = world.E()
mu = world.mu()
Jf = world.Jf()
TJf = Jf.unsqueeze(0)
TE = E.unsqueeze(0)
Tmu = mu.unsqueeze(0)
G = torch.cat([TJc, TJf,
Variable(Tensor(TJf.size(0), Tmu.size(1), TJf.size(2))
.zero_())], dim=1)
F = Variable(Tensor(G.size(1), G.size(1)).zero_().unsqueeze(0))
F[:, TJc.size(1):-TE.size(2), -TE.size(2):] = TE
F[:, -Tmu.size(1):, :Tmu.size(2)] = Tmu
F[:, -Tmu.size(1):, Tmu.size(2):Tmu.size(2) + TE.size(1)] = \
-TE.transpose(1, 2)
h = torch.cat([Tv,
Variable(Tensor(Tv.size(0), TJf.size(1) + Tmu.size(1))
.zero_())], 1)
# adjust precision depending on difficulty of step, with maxIter in [3, 20]
# measured by number of iterations performed on current step (world.dt / dt)
max_iter = max(int(20 / (world.dt / dt)), 3)
x = -self.lcp_solver(maxIter=max_iter, verbose=-1)(TM, Tu, G, h, TJe, b, F)
new_v = x[:world.vec_len * len(world.bodies)].squeeze(0)
# Post-stabilization
if stabilization:
ge = torch.matmul(Je, new_v)
gc = None
if Jc is not None:
gc = torch.matmul(Jc, new_v) + torch.matmul(Jc, new_v) * -world.restitutions()
dp = self.post_stabilization(world.M(), Je, Jc, ge, gc)
new_v = (new_v - dp).squeeze(0)
return new_v
def post_stabilization(self, M, Je, Jc, ge, gc):
u = torch.cat([Variable(Tensor(Je.size(1)).zero_()), ge])
if Jc is None:
neq = Je.size(0) if Je.ndimension() > 0 else 0
if neq > 0:
P = torch.cat([torch.cat([M, -Je.t()], dim=1),
torch.cat([Je, Variable(Tensor(neq, neq).zero_())],
dim=1)])
else:
P = M
if self.cached_inverse is None:
inv = torch.inverse(P)
else:
inv = self.cached_inverse
# try:
x = torch.matmul(inv, u)
# except RuntimeError: # XXX
# print('\nRegularizing singular matrix in stabilization.\n')
# x = torch.matmul(torch.inverse(P + Variable(torch.eye(P.size(0), P.size(1)).type_as(P.data) * 1e-10)), u)
else:
v = gc
TM = M.unsqueeze(0)
TJe = Je.unsqueeze(0)
TJc = Jc.unsqueeze(0)
Th = u[:M.size(0)].unsqueeze(0)
Tb = u[M.size(0):].unsqueeze(0)
Tv = v.unsqueeze(0)
F = Variable(Tensor(TJc.size(1), TJc.size(1)).zero_().unsqueeze(0))
x = self.lcp_solver()(TM, Th, TJc, Tv, TJe, Tb, F)
# x = np.asarray(x).ravel()
dp = x[:M.size(0)]
return dp
def batch_solve_dynamics(self, world, dt, stabilization=False):
t = world.t
f = world.apply_forces(t)
u_ = torch.bmm(world.M(), world.get_v().unsqueeze(2)).squeeze(2) + dt * f
x = Variable(world.get_v().data.new(world.get_v().size()).zero_())
colls_idx = world.has_n_collisions(0)
if colls_idx.any():
x_idx = colls_idx.unsqueeze(1).expand(x.size(0), x.size(1))
M = world.M(num_colls=0)
batch_size = M.size(0)
Je = world.Je(num_colls=0)
neq = Je.size(1) if Je.dim() > 0 else 0
u = u_[colls_idx.unsqueeze(1).expand(colls_idx.size(0), u_.size(1))].view(torch.sum(colls_idx), -1)
# No contact constraints, no need to solve LCP
A = M
if neq > 0:
u = torch.cat([u, Variable(Tensor(batch_size, neq).zero_())], 1)
A = torch.cat([torch.cat([M, -Je.transpose(1, 2)], dim=2),
torch.cat([Je, Variable(Tensor(batch_size, neq, neq).zero_())],
dim=2)], dim=1)
try:
x[x_idx] = torch.bmm(binverse(A), u.unsqueeze(2)).squeeze(2) # Eq. 2.41
except RuntimeError: # XXX
print('\nRegularizing singular matrix.\n')
# XXX Use expand below?
reg = Variable(torch.eye(A.size(1), A.size(2)).type_as(A.data) * 1e-7).repeat(A.size(0), 1, 1)
x[x_idx] = torch.bmm(binverse(A + reg), u.unsqueeze(2)).squeeze(2)
else:
# XXX Works only for immutable M matrices (true for circles and squares)
x[x_idx] = torch.bmm(world.invM(num_colls=0), u.unsqueeze(2)).squeeze(2)
# Solve Mixed LCP (Kline 2.7.2)
# TODO Organize
for i in range(1, 2): # TODO Number of possible collisions
colls_idx = world.has_n_collisions(i)
if not colls_idx.any():
continue
x_idx = colls_idx.unsqueeze(1).expand(x.size(0), x.size(1))
M = world.M(num_colls=i)
batch_size = M.size(0)
u = u_[colls_idx.unsqueeze(1).expand(colls_idx.size(0), u_.size(1))].view(torch.sum(colls_idx), -1)
Je = world.Je(num_colls=i)
neq = Je.size(1) if Je.dim() > 0 else 0
Jc = world.Jc(num_colls=i)
v = torch.bmm(Jc, (world.get_v(num_colls=i) * world.restitutions(num_colls=i)).unsqueeze(2)).squeeze(2)
if neq > 0:
b = Variable(Tensor(batch_size, Je.size(1)).zero_())
else:
Je = Variable(Tensor())
b = Variable(None)
E = world.E(num_colls=i)
mu = world.mu(num_colls=i)
Jf = world.Jf(num_colls=i)
G = torch.cat([Jc, Jf,
Variable(Tensor(Jf.size(0), mu.size(1), Jf.size(2))
.zero_())], dim=1)
F = Variable(Tensor(G.size(0), G.size(1), G.size(1)).zero_())
F[:, Jc.size(1):-E.size(2), -E.size(2):] = E
F[:, -mu.size(1):, :mu.size(2)] = mu
F[:, -mu.size(1):, mu.size(2):mu.size(2) + E.size(1)] = \
-E.transpose(1, 2)
h = torch.cat([v,
Variable(Tensor(v.size(0), Jf.size(1) + mu.size(1))
.zero_())], 1)
# adjust precision depending on difficulty of step, with maxIter in [3, 20]
# measured by number of iterations performed on current step (world.dt / dt)
max_iter = max(int(20 / (world.dt / dt)), 3)
x[x_idx] = -self.lcp_solver(maxIter=max_iter, verbose=-1)(M, u, G, h, Je, b, F)
new_v = x[:, :world.vec_len * len(world.worlds[0].bodies)]
# Post-stabilization
if stabilization:
raise NotImplementedError
return new_v
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,048
|
fxia22/lcp-physics
|
refs/heads/master
|
/lcp_physics/physics/world.py
|
import time
from functools import lru_cache
import ode
import torch
from torch.autograd import Variable
from . import engines as engines_module
from . import collisions as collisions_module
from .utils import Indices, Params, cross_2d, get_instance
X, Y = Indices.X, Indices.Y
DIM = Params.DIM
TOL = 1e-12
Tensor = Params.TENSOR_TYPE
class World:
def __init__(self, bodies, constraints=[], dt=Params.DEFAULT_DT, engine=Params.DEFAULT_ENGINE,
collision_callback=Params.DEFAULT_COLLISION, eps=Params.DEFAULT_EPSILON,
fric_dirs=Params.DEFAULT_FRIC_DIRS, post_stab=Params.POST_STABILIZATION):
self.collisions_debug = None # XXX
# Load classes from string name defined in utils
self.engine = get_instance(engines_module, engine)
self.collision_callback = get_instance(collisions_module, collision_callback)
self.t = 0
self.dt = dt
self.eps = eps
self.fric_dirs = fric_dirs
self.post_stab = post_stab
self.bodies = bodies
self.vec_len = len(self.bodies[0].v)
self.space = ode.HashSpace()
for i, b in enumerate(bodies):
b.geom.body = i
self.space.add(b.geom)
self.static_inverse = True
self.num_constraints = 0
self.joints = []
for j in constraints:
b1, b2 = j.body1, j.body2
i1 = bodies.index(b1)
i2 = bodies.index(b2) if b2 else None
self.joints.append((j, i1, i2))
self.num_constraints += j.num_constraints
if not j.static:
self.static_inverse = False
M_size = bodies[0].M.size(0)
self._M = Variable(Tensor(M_size * len(bodies), M_size * len(bodies)).zero_())
# TODO Better way for diagonal block matrix?
for i, b in enumerate(bodies):
self._M[i * M_size:(i + 1) * M_size, i * M_size:(i + 1) * M_size] = b.M
self.set_v(torch.cat([b.v for b in bodies]))
self.collisions = None
self.find_collisions()
def step(self, fixed_dt=False):
dt = self.dt
if fixed_dt:
end_t = self.t + self.dt
while self.t < end_t:
dt = end_t - self.t
self.step_dt(dt)
else:
self.step_dt(dt)
# @profile
def step_dt(self, dt):
start_v = self.v
start_p = torch.cat([b.p for b in self.bodies])
start_rot_joints = [(j[0].rot1, j[0].rot2) for j in self.joints]
start_collisions = self.collisions
assert all([c[0][3].data[0] <= TOL for c in self.collisions]), \
'Interpenetration at beginning of step'
while True:
new_v = self.engine.solve_dynamics(self, dt, self.post_stab)
self.set_v(new_v)
# try step with current dt
for body in self.bodies:
body.move(dt)
for joint in self.joints:
joint[0].move(dt)
self.find_collisions()
if all([c[0][3].data[0] <= TOL for c in self.collisions]):
break
else:
dt /= 2
# reset state to beginning of step
# XXX Avoid clones?
self.set_v(start_v.clone())
self.set_p(start_p.clone())
for j, c in zip(self.joints, start_rot_joints):
# XXX Clone necessary?
j[0].rot1 = c[0].clone()
j[0].rot2 = c[1].clone() if j[0].rot2 is not None else None
j[0].update_pos()
self.collisions = start_collisions
self.t += dt
def get_v(self):
return self.v
def set_v(self, new_v):
self.v = new_v
for i, b in enumerate(self.bodies):
b.v = self.v[i * len(b.v):(i + 1) * len(b.v)]
def set_p(self, new_p):
for i, b in enumerate(self.bodies):
b.set_p(new_p[i * self.vec_len:(i + 1) * self.vec_len])
def apply_forces(self, t):
return torch.cat([b.apply_forces(t) for b in self.bodies])
def find_collisions(self):
self.collisions = []
# ODE collision detection
self.space.collide([self], self.collision_callback)
def restitutions(self):
restitutions = Variable(Tensor(len(self.collisions)))
for i, c in enumerate(self.collisions):
r1 = self.bodies[c[1]].restitution
r2 = self.bodies[c[2]].restitution
restitutions[i] = (r1 + r2) / 2
# restitutions[i] = math.sqrt(r1 * r2)
return restitutions
def M(self):
return self._M
def Je(self):
Je = Variable(Tensor(self.num_constraints,
self.vec_len * len(self.bodies)).zero_())
row = 0
for joint in self.joints:
J1, J2 = joint[0].J()
i1 = joint[1]
i2 = joint[2]
Je[row:row + J1.size(0),
i1 * self.vec_len:(i1 + 1) * self.vec_len] = J1
if J2 is not None:
Je[row:row + J2.size(0),
i2 * self.vec_len:(i2 + 1) * self.vec_len] = J2
row += J1.size(0)
return Je
def Jc(self):
Jc = Variable(Tensor(len(self.collisions), self.vec_len * len(self.bodies)).zero_())
for i, collision in enumerate(self.collisions):
c = collision[0] # c = (normal, collision_pt_1, collision_pt_2)
i1 = collision[1]
i2 = collision[2]
J1 = torch.cat([cross_2d(c[1], c[0]).unsqueeze(1),
c[0].unsqueeze(0)], dim=1)
J2 = -torch.cat([cross_2d(c[2], c[0]).unsqueeze(1),
c[0].unsqueeze(0)], dim=1)
Jc[i, i1 * self.vec_len:(i1 + 1) * self.vec_len] = J1
Jc[i, i2 * self.vec_len:(i2 + 1) * self.vec_len] = J2
return Jc
def Jf(self):
Jf = Variable(Tensor(len(self.collisions) * self.fric_dirs,
self.vec_len * len(self.bodies)).zero_())
for i, collision in enumerate(self.collisions):
c = collision[0] # c = (normal, collision_pt_1, collision_pt_2)
# find orthogonal vector in 2D
dir1 = torch.cross(torch.cat([c[0], Variable(Tensor(1).zero_())]),
Variable(Tensor([0, 0, 1])))[:DIM]
dir2 = -dir1
i1 = collision[1] # body 1 index
i2 = collision[2] # body 2 index
J1 = torch.cat([
torch.cat([cross_2d(c[1], dir1).unsqueeze(1),
dir1.unsqueeze(0)], dim=1),
torch.cat([cross_2d(c[1], dir2).unsqueeze(1),
dir2.unsqueeze(0)], dim=1),
], dim=0)
J2 = torch.cat([
torch.cat([cross_2d(c[2], dir1).unsqueeze(1),
dir1.unsqueeze(0)], dim=1),
torch.cat([cross_2d(c[2], dir2).unsqueeze(1),
dir2.unsqueeze(0)], dim=1),
], dim=0)
Jf[i * self.fric_dirs:(i + 1) * self.fric_dirs,
i1 * self.vec_len:(i1 + 1) * self.vec_len] = J1
Jf[i * self.fric_dirs:(i + 1) * self.fric_dirs,
i2 * self.vec_len:(i2 + 1) * self.vec_len] = -J2
return Jf
def mu(self):
return self._memoized_mu(*[(c[1], c[2]) for c in self.collisions])
# @lru_cache()
def _memoized_mu(self, *collisions):
# collisions is argument so that lru_cache works
mu = Variable(Tensor(len(self.collisions)).zero_())
for i, collision in enumerate(self.collisions):
i1 = collision[1]
i2 = collision[2]
mu[i] = torch.sqrt(self.bodies[i1].fric_coeff * self.bodies[i2].fric_coeff)
return torch.diag(mu)
def E(self):
return self._memoized_E(len(self.collisions))
# @lru_cache()
def _memoized_E(self, num_collisions):
n = self.fric_dirs * num_collisions
E = Tensor(n, num_collisions).zero_()
for i in range(num_collisions):
E[i * self.fric_dirs: (i + 1) * self.fric_dirs, i] += 1
return Variable(E)
def save_state(self):
raise NotImplementedError
# p = torch.cat([Variable(b.p.data) for b in self.bodies])
# state_dict = {'p': p, 'v': Variable(self.v.data), 't': self.t}
# return state_dict
def load_state(self, state_dict):
raise NotImplementedError
# self.set_p(state_dict['p'])
# self.set_v(state_dict['v'])
# self.t = state_dict['t']
# self._M.detach_()
# self.restitutions.detach_()
# import inspect
# for b in self.bodies:
# for m in inspect.getmembers(b, lambda x: isinstance(x, Variable)):
# m[1].detach_()
# for j in self.joints:
# for m in inspect.getmembers(j, lambda x: isinstance(x, Variable)):
# m[1].detach_()
# self.find_collisions()
def reset_engine(self):
raise NotImplementedError
# self.engine = self.engine.__class__()
class BatchWorld:
def __init__(self, bodies, constraints=[], dt=Params.DEFAULT_DT, engine=Params.DEFAULT_ENGINE,
collision_callback=Params.DEFAULT_COLLISION, eps=Params.DEFAULT_EPSILON,
fric_dirs=Params.DEFAULT_FRIC_DIRS, post_stab=Params.POST_STABILIZATION):
self.t = 0.
self.dt = dt
self.engine = get_instance(engines_module, engine)
self.post_stab = post_stab
self.worlds = []
for i in range(len(bodies)):
w = World(bodies[i], constraints[i], dt=dt, engine=engine,
collision_callback=collision_callback, eps=eps,
fric_dirs=fric_dirs,
post_stab=post_stab)
self.worlds.append(w)
self.vec_len = self.worlds[0].vec_len
self._v = None
self.v_changed = True
self.collisions = self.has_collisions()
self._restitutions = torch.cat([w.restitutions().unsqueeze(0) for w in self.worlds], dim=0)
def step(self):
dt = self.dt
start_vs = self.get_v()
self.v_changed = True
start_ps = torch.cat([torch.cat([b.p for b in w.bodies]).unsqueeze(0) for w in self.worlds], dim=0)
start_rot_joints = [[(j[0].rot1, j[0].rot2) for j in w.joints] for w in self.worlds]
start_collisions = [w.collisions for w in self.worlds]
for w in self.worlds:
assert all([c[0][3].data[0] <= 0 for c in w.collisions]), \
'Interpenetration at beginning of step'
while True:
self.collisions = self.has_collisions()
new_v = self.engine.batch_solve_dynamics(self, dt, self.post_stab)
self.set_v(new_v)
# try step with current dt
done = []
for w in self.worlds:
for body in w.bodies:
body.move(dt)
for joint in w.joints:
joint[0].move(dt)
w.find_collisions()
done.append(all([c[0][3].data[0] <= 0 for c in w.collisions]))
if all(done):
break
else:
dt /= 2
# reset state to beginning of step
# XXX Avoid clones?
self.set_v(start_vs.clone())
self.set_p(start_ps.clone())
for i, w in enumerate(self.worlds):
for j, c in zip(w.joints, start_rot_joints[i]):
# XXX Clone necessary?
j[0].rot1 = c[0].clone()
j[0].rot2 = c[1].clone() if j[0].rot2 is not None else None
j[0].update_pos()
w.collisions = start_collisions[i]
self.t += dt
for w in self.worlds:
w.t += dt
def get_v(self, num_colls=None):
if self.v_changed:
self._v = torch.cat([w.v.unsqueeze(0) for w in self.worlds], dim=0)
self.v_changed = False
# TODO Optimize / organize
if num_colls is not None:
v = torch.cat([w.v.unsqueeze(0) for w in self.worlds if len(w.collisions) == num_colls], dim=0)
return v
return self._v
def set_v(self, new_v):
for i, w in enumerate(self.worlds):
w.set_v(new_v[i])
def restitutions(self, num_colls=None):
# TODO Organize / consolidate on other class
if num_colls is not None:
r = torch.cat([w.restitutions().unsqueeze(0) for w in self.worlds if len(w.collisions) == num_colls], dim=0)
return r
else:
return self._restitutions
def set_p(self, new_p):
for i, w in enumerate(self.worlds):
w.set_p(new_p[i])
def has_collisions(self):
return any([w.collisions for w in self.worlds])
def has_n_collisions(self, num_colls):
ret = torch.ByteTensor([len(w.collisions) == num_colls for w in self.worlds])
if self.worlds[0]._M.is_cuda:
ret = ret.cuda()
return ret
def apply_forces(self, t):
forces = []
for w in self.worlds:
forces.append(torch.cat([b.apply_forces(t) for b in w.bodies]).unsqueeze(0))
return torch.cat(forces, dim=0)
def find_collisions(self):
self.collisions = []
# ODE collision detection
self.space.collide([self], self.collision_callback)
# def gather_batch(self, func):
# gather = []
# for w in self.worlds:
# gather.append(func().unsqueeze(0))
# return torch.cat(gather, dim=0)
def M(self, num_colls=None):
Ms = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
Ms.append(w.M().unsqueeze(0))
M = torch.cat(Ms, dim=0)
return M
def invM(self, num_colls=None):
invMs = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
invMs.append(w.invM().unsqueeze(0))
invM = torch.cat(invMs, dim=0)
return invM
def Je(self, num_colls=None):
jes = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
tmp = w.Je()
tmp = tmp.unsqueeze(0) if tmp.dim() > 0 else tmp
jes.append(tmp)
if jes[0].dim() > 0:
Je = torch.cat(jes, dim=0)
else:
Je = Variable(Tensor([]))
return Je
def Jc(self, num_colls=None):
# max_collisions = max([len(w.collisions) for w in self.worlds])
jcs = []
for w in self.worlds:
if len(w.collisions) == num_colls:
jcs.append(w.Jc().unsqueeze(0))
# else:
# jcs.append(Variable(w._M.data.new(1, max_collisions,
# self.vec_len * len(w.bodies)).zero_()))
Jc = torch.cat(jcs, dim=0)
return Jc
def Jf(self, num_colls=None):
# max_collisions = max([len(w.collisions) for w in self.worlds])
jfs = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
jfs.append(w.Jf().unsqueeze(0))
# else:
# jfs.append(Variable(w._M.data.new(1, 2 * max_collisions,
# self.vec_len * len(w.bodies)).zero_()))
Jf = torch.cat(jfs, dim=0)
return Jf
def mu(self, num_colls=None):
# max_collisions = max([len(w.collisions) for w in self.worlds])
mus = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
mus.append(w.mu().unsqueeze(0))
# else:
# mus.append(Variable(w._M.data.new(1, max_collisions,
# max_collisions).zero_()))
mu = torch.cat(mus, dim=0)
return mu
def E(self, num_colls=None):
# max_collisions = max([len(w.collisions) for w in self.worlds])
Es = []
for w in self.worlds:
if num_colls is None or len(w.collisions) == num_colls:
Es.append(w.E().unsqueeze(0))
# else:
# Es.append(Variable(w._M.data.new(1, 2 * max_collisions,
# max_collisions).zero_()))
E = torch.cat(Es, dim=0)
return E
def save_state(self):
raise NotImplementedError
def load_state(self, state_dict):
raise NotImplementedError
def reset_engine(self):
raise NotImplementedError
def run_world(world, dt=Params.DEFAULT_DT, run_time=10,
print_time=True, screen=None, recorder=None):
"""Helper function to run a simulation forward once a world is created.
"""
# If in batched mode don't display simulation
if hasattr(world, 'worlds'):
screen = None
if screen is not None:
import pygame
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((255, 255, 255))
animation_dt = dt
elapsed_time = 0.
prev_frame_time = -animation_dt
start_time = time.time()
while world.t < run_time:
world.step()
if screen is not None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
return
if elapsed_time - prev_frame_time >= animation_dt or recorder:
prev_frame_time = elapsed_time
screen.blit(background, (0, 0))
update_list = []
for body in world.bodies:
update_list += body.draw(screen)
for joint in world.joints:
update_list += joint[0].draw(screen)
# Visualize collision points and normal for debug
# (Uncomment collisions_debug line in collision handler)
# if world.collisions_debug:
# for c in world.collisions_debug:
# (normal, p1, p2, penetration), b1, b2 = c
# b1_pos = world.bodies[b1].pos
# b2_pos = world.bodies[b2].pos
# p1 = p1 + b1_pos
# p2 = p2 + b2_pos
# pygame.draw.circle(screen, (0, 255, 0), p1.data.numpy().astype(int), 5)
# pygame.draw.circle(screen, (0, 0, 255), p2.data.numpy().astype(int), 5)
# pygame.draw.line(screen, (0, 255, 0), p1.data.numpy().astype(int),
# (p1.data.numpy() + normal.data.numpy() * 100).astype(int), 3)
if not recorder:
# Don't refresh screen if recording
pygame.display.update(update_list)
pygame.display.flip() # XXX
else:
recorder.record(world.t)
elapsed_time = time.time() - start_time
if not recorder:
# Adjust frame rate dynamically to keep real time
wait_time = world.t - elapsed_time
if wait_time >= 0 and not recorder:
wait_time += animation_dt # XXX
time.sleep(max(wait_time - animation_dt, 0))
# animation_dt -= 0.005 * wait_time
# elif wait_time < 0:
# animation_dt += 0.005 * -wait_time
# elapsed_time = time.time() - start_time
elapsed_time = time.time() - start_time
if print_time:
print('\r ', '{} / {} {} '.format(int(world.t), int(elapsed_time),
1 / animation_dt), end='')
|
{"/lcp_physics/physics/__init__.py": ["/lcp_physics/physics/engines.py", "/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/tests/test_batch.py": ["/lcp_physics/physics/forces.py", "/lcp_physics/physics/world.py"], "/lcp_physics/physics/engines.py": ["/lcp_physics/lcp/lcp.py"], "/lcp_physics/physics/world.py": ["/lcp_physics/physics/__init__.py"]}
|
32,060
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/register.py
|
# tools to help register & identify commands
import logging
import os
import pprint
import subprocess as sp
import sys
from jinja2 import Template
import yaml
import leip
from leip import set_local_config
from kea.utils import find_executable, register_executable
from kea.utils import get_tool_conf, create_kea_link
lg = logging.getLogger(__name__)
@leip.arg('-V', '--version', help='version number')
@leip.arg('-a', '--appname', help='application name')
@leip.command
def show(app, args):
lg.info("show conf: %s", args.appname)
conf = get_tool_conf(app, args.appname, args.version)
del conf['versions']
yaml.dump(conf, sys.stdout, default_flow_style=False)
@leip.hook('post_argparse')
def run_register_tool_commands(app):
if app.kea_args.list_versions:
print_tool_versions(app, app.conf['appname'])
exit(0)
@leip.arg('-v', '--version', help='version to set default')
@leip.arg('-a', '--appname', help='application name')
@leip.command
def set_default(app, args):
"""
List tool versions
"""
tkey = args.version
tdata = app.conf['app.{}.version.{}'.format(args.appname, args.version)]
print "set default:"
print '{}: {}'.format(tkey, tdata['executable'])
tv = tdata['version']
if len(tv) < 80:
print ' version: {}'.format(tv)
else:
i = 0
while tv:
if i == 0:
print ' version: {}'.format(tv[:80])
tv = tv[80:]
else:
print ' {}'.format(tv[:86])
tv = tv[86:]
i += 1
set_local_config(app, 'app.{}.default_version'.format(args.appname), tkey)
def print_tool_versions(app, appname):
tool_version_data = app.conf['app.{}.versions'.format(appname)]
def_version = app.conf['app.{}.default_version'.format(appname)]
for tkey in tool_version_data:
tdata = app.conf['app.{}.versions.{}'.format(appname, tkey)]
print '{}: {}'.format(tkey, tdata['executable'])
tv = tdata['version']
if len(tv) < 80:
print ' version: {}'.format(tdata['version'])
else:
i = 0
while tv:
if i == 0:
print ' version: {}'.format(tv[:80])
tv = tv[80:]
else:
print ' {}'.format(tv[:86])
tv = tv[86:]
i += 1
if tkey == def_version:
print(" default")
@leip.arg('-a', '--appname', help='application name')
@leip.command
def list(app, args):
"""
List tool versions
"""
if not args.appname is None:
print_tool_versions(app, args.appname)
return
for tool in app.conf['app']:
if 'versions' in app.conf['app.{}'.format(tool)]:
print(tool)
@leip.arg('-a', '--appname', help='application name')
@leip.command
def create_execs(app, args):
"""
Create Kea executables
"""
for tool in app.conf['app']:
if args.appname and tool == args.appname:
if 'versions' in app.conf['app.{}'.format(tool)]:
print(tool)
create_kea_link(app, tool)
@leip.flag('-d', '--set_default', help='set this executable as default')
@leip.arg('-V', '--version', help='version number')
#@leip.arg('-a', '--appname', help='application name')
@leip.arg('appname')
@leip.command
def add(app, args):
execname = args.appname
if '/' in execname:
execname = execname.split('/')[-1]
lg.debug("register %s", execname)
execs = []
for f in find_executable(args.appname):
execs.append(f)
no_execs = len(execs)
# make sure there is a link in the kea bin path
keabin = os.path.expanduser(app.conf['bin_path'])
if not os.path.exists(keabin):
os.makedirs(keabin)
if no_execs == 1 and args.version:
register_executable(app, execname, execs[0], args.version,
args.set_default)
return
if no_execs > 1 and args.version:
lg.error("multiple executables found & have only one version")
for e in execs:
lg.error(" - %s", e)
exit(-1)
if no_execs > 1 and args.is_default:
lg.error("multiple executables found & cannot set them all as default")
for e in execs:
lg.error(" - %s", e)
exit(-1)
toolconf = get_tool_conf(app, execname, version=None)
if not 'version_command' in toolconf:
lg.error("No information on how to retrieve tool version")
lg.error("Please specify using:")
lg.error(" kea conf set app.%s.version_command '<command>'",
execname)
exit(-1)
version_command = toolconf['version_command']
lg.debug("version command: %s", version_command)
vc_template = Template(version_command)
lg.info("version command: %s", version_command)
for ex in execs:
cmd = vc_template.render(executable=ex)
P = sp.Popen(cmd, shell=True, stdout=sp.PIPE)
o, e = P.communicate()
version = o.strip()
register_executable(app, execname, ex, version,
args.set_default)
#lg.warning("Could not register (yet?)")
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,061
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/pbs_executor.py
|
import copy
import logging
import os
import subprocess as sp
from uuid import uuid4
from jinja2 import Template
import arrow
import leip
from kea.executor import BasicExecutor, get_deferred_cl
lg = logging.getLogger(__name__)
PBS_SUBMIT_SCRIPT_HEADER = """#!/bin/bash
#PBS -N {{appname}}.{{uuid}}
#PBS -e {{ cwd }}/pbs/{{appname}}.{{uuid}}.$PBS_JOBID.err
#PBS -o {{ cwd }}/pbs/{{appname}}.{{uuid}}.$PBS_JOBID.out
#PBS -l nodes={{nodes}}:ppn={{ppn}}
{%- if mem %}
#PBS -l mem={{mem}}
{%- endif %}
{%- if account %}
#PBS -A {{ account }}{% endif %}
{%- if walltime %}
#PBS -l walltime={{ walltime }}{% endif %}
set -v
{%- if virtual_env %}
#load the same virtual env as was active during submission
source "{{ virtual_env }}/bin/activate"
{% endif %}
#make sure we go to the working directory
cd {{ cwd }}
# start actual command
"""
class PbsExecutor(BasicExecutor):
def __init__(self, app):
super(PbsExecutor, self).__init__(app)
if not os.path.exists('./pbs'):
os.makedirs('./pbs')
self.buffer = []
self.cl_per_job = self.app.kea_args.threads
if self.cl_per_job == -1:
self.cl_per_job = 1
self.batch = 0
self.clno = 0
def submit_to_pbs(self):
uuid = str(uuid4())[:8]
#write pbs script
pbs_script = os.path.join(
'pbs',
'{}.{}.pbs'.format(self.app.conf['appname'], uuid))
template = Template(PBS_SUBMIT_SCRIPT_HEADER)
data = copy.copy(self.app.conf['pbs'])
data['appname'] = self.app.conf['appname']
data['cwd'] = os.getcwd()
data['uuid'] = uuid
#virtual env?
data['virtual_env'] = os.environ.get('VIRTUAL_ENV')
lg.debug("submit to pbs with uuid %s", uuid)
for info in self.buffer:
#for logging.
info['mode'] = 'asynchronous'
info['submitted'] = arrow.now()
info['pbs_uuid'] = uuid
info['pbs_script_file'] = pbs_script
if not self.app.kea_args.pbs_nodes is None:
data['nodes'] = self.app.kea_args.pbs_nodes
elif not data['nodes']:
data['nodes'] = 1
if self.app.kea_args.pbs_ppn:
data['ppn'] = self.app.kea_args.pbs_ppn
elif not data['ppn']:
data['ppn'] = self.cl_per_job
with open(pbs_script, 'w') as F:
data['name']
F.write(template.render(**data))
F.write("\n")
for info in self.buffer:
F.write("( " + " ".join(get_deferred_cl(info)))
F.write(" ) & \n")
F.write("wait\n")
F.write('echo "done"\n')
self.clno += 1
#fire & forget the pbs job
pbs_cl = ['qsub', pbs_script]
if self.app.kea_args.pbs_dry_run:
print " ".join(pbs_cl)
else:
P = sp.Popen(pbs_cl, stdout=sp.PIPE)
o, e = P.communicate()
pid = o.strip().split('.')[0]
for info in self.buffer:
info['pbs_jobid'] = pid
lg.warning("Starting job: %s with pbs pid %s", pbs_script, pid)
self.buffer = []
self.batch += 1
def fire(self, info):
self.buffer.append(info)
if len(self.buffer) >= self.cl_per_job:
lg.info("submitting pbs job. No commands: %d",
len(self.buffer))
self.submit_to_pbs()
def finish(self):
if len(self.buffer) > 0:
lg.info("submitting pbs job. No commands: %d", len(self.buffer))
self.submit_to_pbs()
@leip.hook('pre_argparse')
def prep_sge_exec(app):
app.executors['pbs'] = PbsExecutor
for a in '--pbs_nodes --pbs_ppn --pbs_account'.split():
app.kea_arg_harvest_extra[a] = 1
app.kea_argparse.add_argument('--pbs_nodes',
help='No nodes requested (default=jobs '
+ 'submitted)', type=int)
app.kea_argparse.add_argument('--pbs_ppn',
help='No ppn requested (default=cl per '
+ 'job)', type=int)
app.kea_argparse.add_argument('--pbs_account',
help='Account requested (default none)')
app.kea_argparse.add_argument('--pbs_dry_run',
action='store_true',
help='create script, do not submit)')
#pre_argparse
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,062
|
pombreda/Kea
|
refs/heads/master
|
/kea/files.py
|
import logging
import os
from mad2.recrender import recrender
import kea.mad
from kea.utils import message
lg = logging.getLogger(__name__)
#lg.setLevel(logging.DEBUG)
def register_file(info, name, category, filename):
madfile = kea.mad.get_madfile(filename)
message('info', 'registered {}/{}: {}', category, name, filename)
info['files'][name]['filename'] = filename
info['files'][name]['madfile'] = madfile
info['files'][name]['category'] = category
def set_input(info, filename):
lg.debug("set %s as input file", filename)
madfile = kea.mad.get_madfile(filename)
info['files']['input']['filename'] = filename
info['files']['input']['madfile'] = madfile
info['files']['input']['category'] = 'input'
def set_output(info, filename):
lg.debug("set %s as output file", filename)
madfile = kea.mad.get_madfile(filename)
info['files']['output']['filename'] = filename
info['files']['output']['madfile'] = madfile
info['files']['output']['category'] = 'output'
def assign_on_position(info, name, category, pos):
filename = info['cl'][1]
madfile = kea.mad.get_madfile(filename)
info['files'][name]['madfile'] = madfile
info['files'][name]['category'] = category
def assign_filename(info, name, category, filename):
madfile = kea.mad.get_madfile(filename)
info['files'][name]['madfile'] = madfile
info['files'][name]['category'] = category
def flag_find(lst, flg):
if not flg or not flg in lst:
lg.debug("Cannot assign find file with flag %s", flg)
return None
p = lst.index(flg)
if p+1 >= len(lst):
lg.warning("Cannot assign find file with flag %s (cl too short)", flg)
return None
return lst[p+1]
def flag_find_list(lst, flg):
if not flg or not flg in lst:
lg.debug("Cannot assign find file with flag %s", flg)
return set()
rv = []
for i, f in enumerate(lst[:-1]):
lg.debug('%s %s %s', f, f == flg, lst[i+1])
if f == flg:
rv.append(lst[i+1])
return rv
#
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,063
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/provenance.py
|
import sys
import logging
import os
import pkg_resources
import shutil
import subprocess as sp
import arrow
import leip
from mad2.recrender import recrender
import kea.mad
lg = logging.getLogger(__name__)
#lg.setLevel(logging.DEBUG)
def get_last_provenance_data(madfile):
if not 'provenance' in madfile:
return None
prokeys = sorted(madfile['provenance'].keys())
lastkey = prokeys[-1]
return madfile['provenance'][lastkey]
@leip.hook('post_fire', 5)
def check_output(app, info):
"""
In the case of a non-zero return code - mark the output files
"""
rc = info['returncode']
if rc == 0:
return
for fn, fi in info['files'].items():
if fi['category'] != 'output':
continue
filename = fi['filename']
if os.path.exists(fi['filename']):
move_to = os.path.join(
os.path.dirname(filename),
os.path.basename(filename) + '.kea_error')
lg.warning("non zero RC - moving %s to %s", filename, move_to)
shutil.move(filename, move_to)
#change file data for this run
fi['filename'] = move_to
fi['madfile'] = kea.mad.get_madfile(move_to)
@leip.hook('pre_fire')
def check_run(app, info):
lg.debug("Check if we need to run this command")
no_files_notok = 0
no_outputfiles_seen = 0
output_files = {}
all_filenames = set()
#first, find all outputfiles
#also - if an outputfile does not exists - decide to run right away
#also - if an inputfile is not present - decide to run also
# it is not up to Kea to decide not to run in the case of an
# error - the tool may complain
for name in info['files']:
finf = info['files'][name]
all_filenames.add(name)
if not os.path.exists(finf['filename']):
if finf['category'] == 'output':
#output file does not exist - continue
lg.debug("Output file %s does not exist - run", name)
return
else:
#non output file - should have been present
lg.warning("Cannot check provenance")
lw.warning("%s/%s file: %s does not exist",
finf['category'], name, finf['filename'])
return
if finf['category'] == 'output':
lg.debug("found output: %s", name)
no_outputfiles_seen += 1
output_files[name] = finf
for output_name, output_finf in output_files.items():
#check if the input/outputfile structure is consistent
#find output file - check if inputs have changed
lg.debug("check provenance of: %s", finf['filename'])
out_prov = get_last_provenance_data(output_finf['madfile'])
if not out_prov:
lg.debug("no provenance data for output file %s", output_name)
#no provenance data for this outputfile - so - run
return
if set(out_prov['derived_from'].keys()) != all_filenames:
#provenance recorded filenames do not match this run's filenames
#hence - we shall run the tool:
lg.debug("provenance data of %s does not match current run",
output_name)
return
#print(out_prov.pretty())
for fn in out_prov.get('derived_from', []):
lg.debug(" - prov check %s", fn)
out_prov_file = out_prov['derived_from'][fn]
fn_finf = info['files'].get(fn)
current_sha1 = fn_finf['madfile']['sha1sum']
lg.debug(' - current sha1 : %s', current_sha1)
lg.debug(' - prov recorded sha1 : %s', out_prov_file['sha1sum'])
if out_prov_file['sha1sum'] != current_sha1:
lg.debug(" - sha1sum mismatch!")
no_files_notok += 1
if no_outputfiles_seen > 0 and no_files_notok == 0:
lg.warning("Skipping - provenance data indicates that this is a rerun")
info['skip'] = True
def annotate(app, info, fname, fdata):
madapp = kea.mad.get_madapp()
lg.debug("annotating '%s' output file: '%s'",
fname, fdata['filename'])
maf = fdata['madfile']
maf.load() # make sure we're dealing with current data
stamp = str(info['stop']).split('.')[0]
prod = maf.mad['provenance.%s' % stamp]
prod['tool_name'] = info['app_name']
prod['tool_path'] = info['executable']
prod['tool_version'] = info['app_version']
prod['username'] = maf['username']
prod['userid'] = maf['userid']
prod['host'] = maf['host']
prod['started_at_time'] = str(info['start'])
prod['stopped_at_time'] = str(info['stop'])
prod['runtime'] = str(info['stop'] - info['start'])
prod['command_line'] = " ".join(info['cl'])
prod['working_directory'] = info['cwd']
prod['kea_command_line'] = info['full_cl']
prod['kea_executable'] = info['kea_executable']
prod['kea_version'] = pkg_resources.get_distribution("kea").version
for fn in info['files']:
fd = info['files'][fn]
fdmaf = fd['madfile']
#store all recorded files
derf = prod['derived_from'][fn]
derf['filename'] = fdmaf['fullpath']
derf['category'] = fd['category']
derf['kea_name'] = fn
derf['sha1sum'] = fdmaf['sha1sum']
derf['host'] = fdmaf['host']
#heritable data
if not fd['category'] == 'input':
continue
for k in fdmaf.mad:
kinfo = madapp.conf['keywords'][k]
if not kinfo.get('propagate', False):
continue
maf[k] = fdmaf.mad[k]
maf.save()
@leip.hook('post_fire',100)
def add_provenance(app, info):
lg.debug('Adding provenance data')
rc = info['returncode']
for fname in info['files']:
lg.debug(' for file: %s', fname)
fdata = info['files'][fname]
if fdata['category'] != 'output':
continue
annotate(app, info, fname, fdata)
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,064
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/logger.py
|
from collections import OrderedDict
import logging
import arrow
from mad2 import madfile
import leip
from lockfile import FileLock
lg = logging.getLogger(__name__)
def to_str(s):
if isinstance(s, madfile.MadFile):
if 'sha1sum' in s:
return '{} (sha1: {})'.format(s['inputfile'], s['sha1sum'])
else:
return '{}'.format(s['inputfile'])
else:
return str(s)
@leip.hook('post_run')
def log_cl(app, all_info):
try:
with FileLock('kea.log'):
for i, info in enumerate(all_info):
with open('kea.log', 'a') as F:
F.write("-" * 80 + "\n")
for i in info:
val = info[i]
if val is None:
#do not print any key/vals where the value
#is None
continue
F.write("{}: ".format(i))
if i == 'cl':
F.write(" ".join(val) + "\n")
elif i == 'files':
F.write("\n")
for fi in val:
fim = val[fi]['madfile']
fic = val[fi]['category']
F.write(" - %s:\n" % fi)
F.write(" path: %s\n" % fim['fullpath'])
F.write(" sha1sum: %s\n" % fim['sha1sum'])
F.write(" category: %s\n" % fic)
elif isinstance(val, list):
F.write("\n")
for lv in val:
F.write(' - {}\n'.format(to_str(lv)))
else:
F.write(" {}\n".format(to_str(val)))
except:
lg.warning("Cannot write to Kea log file")
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,065
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/memory.py
|
import sys
import os
import subprocess as sp
from mad2 import ui
import leip
@leip.hook('pre_argparse')
def main_arg_define(app):
app.kea_argparse.add_argument('-R', '--remember', action='store_true',
help='Save command line to this folder ' +
'in a file called "kea.sh"')
@leip.hook('post_fire')
def memory_store_cl(app, info):
"""
Store command line in history
"""
if app.kea_args.is_iteration:
#do not store cl when this is an iteration
return
histdir = os.path.join(os.path.expanduser('~'),
'.config', 'kea', 'history')
fullcl = info['full_cl'].strip()
if info.get('iteration', 0) > 0:
#only do one (first) iteration since this function will
#be the same for all iteration
return
#expand executable
ls = fullcl.split()
lsx, lsr = ls[0], ls[1:]
lsx = os.path.abspath(lsx)
#remove +-R (if it is there)
while '+-R' in lsr:
lsr.remove('+-R')
fullcl = (lsx + " " + " ".join(lsr)).strip()
_store_to_histfile(fullcl, histdir, info['app_name'])
_store_to_histfile(fullcl, histdir, '__all__')
if app.kea_args.remember:
with open('kea.sh', 'w') as F:
F.write(fullcl + "\n")
def _store_to_histfile(cl, histdir, histfilename):
try:
if not os.path.exists(histdir):
os.makedirs(histdir)
except OSError, IOError:
#cannot create history dir - do not store
return
histfile = os.path.join(histdir, histfilename)
if os.path.exists(histfile):
histsize = os.stat(histfile).st_size
hist_exists = True
else:
histsize = 0
hist_exists = False
with open(histfile, 'a+') as F:
if hist_exists:
if histsize > 3333:
F.seek(-3000, 2)
else:
F.seek(0)
last = F.read().rstrip().rsplit("\n", 1)[-1]
last = last.strip()
if last and last == cl:
return
F.write(cl + "\n")
@leip.arg('-a', '--appname', help='application name')
@leip.commandName('!')
def memory_history(app, args):
if args.appname is None:
appname = '__all__'
else:
appname = args.appname
val = ui.askUser(appname, 'kea', default='__last__',
prompt='')
rc = sp.call(val, shell=True)
exit(rc)
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,066
|
pombreda/Kea
|
refs/heads/master
|
/kea/executor.py
|
import copy
import logging
from multiprocessing.dummy import Pool as ThreadPool
import os
import signal
import subprocess as sp
import sys
import arrow
lg = logging.getLogger(__name__)
def get_deferred_cl(info):
kap = info['kea_arg_prefix']
cl = [info['kea_executable']] + info['cl']
if info['stdout_file']:
cl.extend(['{}-o'.format(kap), info['stdout_file']])
if info['stderr_file']:
cl.extend(['{}-e'.format(kap), info['stderr_file']])
return cl
def simple_runner(info, defer_run=False):
"""
Defer run executes the run with the current executor, but with
the Kea executable so that all kea related functionality is
executed in the second stage.
"""
stdout_handle = None # Unless redefined - do not capture stdout
stderr_handle = None # Unless redefined - do not capture stderr
kap = info['kea_arg_prefix']
if defer_run:
cl = get_deferred_cl(info)
else:
cl = [info['executable']] + info['cl']
if info['stdout_file']:
stdout_handle = open(info['stdout_file'], 'w')
if info['stderr_file']:
stderr_handle = open(info['stderr_file'], 'w')
lg.debug(" cl: %s", cl)
info['start'] = arrow.now()
if defer_run:
P = sp.Popen(cl)
info['pid'] = P.pid
info['submitted'] = arrow.now()
info['status'] = 'deferred'
else:
def preexec(): # Don't forward signals.
os.setpgrp()
P = sp.Popen(cl, stdout=stdout_handle, stderr=stderr_handle,
preexec_fn = preexec)
info['pid'] = P.pid
P.communicate()
info['stop'] = arrow.now()
info['stop'] = arrow.now()
info['runtime'] = info['stop'] - info['start']
info['returncode'] = P.returncode
if info['returncode'] == 0:
info['status'] = 'success'
else:
info['status'] = 'error'
class BasicExecutor(object):
def __init__(self, app):
lg.debug("Starting executor")
self.interrupted = False
self.app = app
self.threads = self.app.kea_args.threads
if self.threads < 2:
self.simple = True
else:
self.simple = False
self.pool = ThreadPool(self.threads)
lg.debug("using a threadpool with %d threads", self.threads)
def fire(self, info):
lg.debug("start execution")
if self.interrupted:
#refuse execution after an interrupt was caught
info['returncode'] = 2
info['status'] = 'not_executed'
return
def sigint_handler(sgn, frame):
#capture sigint
#send sigint
if self.interrupted:
lg.warning('Captured Ctrl+C twice - exit now')
sys.exit(-1)
if self.simple:
self.interrupted = True
info['status'] = 'interrupted'
lg.warning('Captured Ctrl+C - quitting')
lg.warning('Sending SIGINT to %d', info['pid'])
os.kill(info['pid'], signal.SIGINT)
#os.kill(info['pid'], signal.SIGKILL)
# weirdly enough, this following line makes the less pager
# behave normally - i.e. this program quits when less quits
signal.signal(signal.SIGPIPE, lambda s,f: None)
# capture sigint as well
signal.signal(signal.SIGINT, sigint_handler)
if self.simple:
simple_runner(info)
else:
self.pool.apply_async(simple_runner, [info,], {'defer_run': False})
def finish(self):
if not self.simple:
lg.debug('waiting for the threads to finish')
self.pool.close()
self.pool.join()
lg.debug('finished waiting for threads to finish')
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
class DummyExecutor(BasicExecutor):
def fire(self, info):
lg.debug("start dummy execution")
cl = [info['executable']] + copy.copy(info['cl'])
if info['stdout_file']:
cl.extend(['>', info['stdout_file']])
if info['stderr_file']:
cl.extend(['2>', info['stderr_file']])
lg.debug(" cl: %s", cl)
print " ".join(cl)
info['mode'] = 'synchronous'
executors = {
'simple': BasicExecutor,
# 'bg': BgExecutor,
'dummy': DummyExecutor,
}
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,067
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/cl_render.py
|
import copy
import logging
import leip
from mad2.recrender import recrender
lg = logging.getLogger(__name__)
#lg.setLevel(logging.DEBUG)
@leip.hook('pre_fire', 1000)
def hook_pre_run(app, info):
rinf = copy.copy(info)
rinf['f'] = {}
for fn in info['files']:
rinf['f'][fn] = rinf['files'][fn]['madfile']
for i in range(len(info['cl'])):
v = info['cl'][i]
if ('{{' in v) or ('{%' in v):
lg.debug("rendering: %s", v)
nv = recrender(v, rinf)
info['cl'][i] = nv
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,068
|
pombreda/Kea
|
refs/heads/master
|
/kea/cl_generator.py
|
"""
Map arguments
input:
{name~*}
the * is treated as a glob and can be part of a filename
name is used to expand the capture part later.
{name=[1-5]}
name is replaced by the range of
{name=a,b,c,d}
"""
from collections import OrderedDict
import copy
import glob
import logging
import itertools
import re
import sys
import fantail
lg = logging.getLogger(__name__)
#plg.setLevel(logging.DEBUG)
RE_FIND_MAPINPUT = re.compile(
r'(?<![{\\]){([a-zA-Z_][a-zA-Z0-9_]*)?([\~\=]?)([^}]+)?}(?![\\}])')
def very_basic_command_line_generator(app):
"""
Most basic command line generator possible
"""
cl = sys.argv[1:]
yield cl
def map_range_expand(map_info, cl):
"""
Convert a map range to a list of items:
first - range conversion:
- a,b,c -> ['a', 'b', 'c']
- 5:10 -> [5,6,7,8,9]
- 5:10:2 -> [5,7,9]
then, merge with the command line item in which this was embedded (if any)
so: test.{a=5:10:2}.in becomes: ['test.5.in', 'test.7.in', 'test.9.in']
"""
mappat_range_3 = re.match(r'([0-9]+):([0-9]+):([0-9]+)',
map_info['pattern'])
mappat_range_2 = re.match(r'([0-9]+):([0-9]+)',
map_info['pattern'])
if mappat_range_3:
start, stop, step = mappat_range_3.groups()
lg.debug("expanding numerical range: %s %s %s", start, stop, step)
map_items = map(str, range(int(start), int(stop)+1, int(step)))
elif mappat_range_2:
start, stop = mappat_range_2.groups()
map_items = map(str, range(int(start), int(stop)+1))
elif ',' in map_info['pattern']:
map_items = [x.strip() for x in map_info['pattern'].split(',')]
else:
lg.critical("Can not parse range: %s", map_info['pattern'])
exit(-1)
return map_items
def _unescape(x):
if r'\{' in x:
x = x.replace(r'\{', '{')
if r'\}' in x:
x = x.replace(r'\}', '}')
return x
def map_glob_expand(map_info, cl):
globpattern = RE_FIND_MAPINPUT.sub(
map_info['pattern'], map_info['arg'])
globhits = glob.glob(globpattern)
if len(globhits) == 0:
lg.critical("No hits found for pattern: %s", globpattern)
exit(-1)
sta, tail = map_info['start'], map_info['tail']
#print globhits, sta, tail, globhits[0][sta:]
if sta == tail == 0:
return sorted(globhits)
elif tail == 0:
return sorted([g[sta:] for g in globhits])
else:
return sorted([g[sta:-tail] for g in globhits])
def map_iter(map_info):
for i in map_info.get('items', []):
map_clean = copy.copy(map_info)
map_clean['item'] = i
yield map_clean
def apply_map_info_to_cl(newcl, map_info):
item = map_info['item']
for i, arg in enumerate(newcl):
map_to_re = map_info['re_replace']
map_fr_re = map_info['re_from']
if map_fr_re.search(arg):
newcl[i] = map_fr_re.sub(item, arg)
elif map_to_re.search(arg):
newcl[i] = map_to_re.sub(item, arg)
return newcl
def basic_command_line_generator(app):
"""
Command line generator that expands globs & ranges
"""
info = fantail.Fantail()
stdin_file = app.kea_args.stdin
stdout_file = app.kea_args.stdout
stderr_file = app.kea_args.stderr
#cl = [app.conf['executable']] + sys.argv[1:]
cl = sys.argv[1:]
max_no_jobs = app.kea_args.maxjobs
#check if there are map arguments in here
mapins = []
mapcount = 0
replacements = 0
## find all map definitions
for i, arg in enumerate(cl):
fipa = RE_FIND_MAPINPUT.search(arg)
if not fipa:
continue
if fipa.groups()[2] is None:
replacements += 1
continue
#print fipa.groups()
mapins.append(i)
# no map definitions found - then simply return the cl & execute
if len(mapins) == 0:
newcl = map(_unescape, cl)
info['cl'] = newcl
info['iteration'] = 0
info['stdin_file'] = stdin_file
info['stdout_file'] = stdout_file
info['stderr_file'] = stderr_file
yield info
return
#define iterators for each of the definitions
mapiters = []
for arg_pos in mapins:
map_info = {}
map_info['pos'] = arg_pos
map_info['arg'] = cl[arg_pos]
map_info['re_search'] = RE_FIND_MAPINPUT.search(cl[arg_pos])
name, operator, pattern = map_info['re_search'].groups()
if operator is None:
if ':' in pattern:
operator = '='
else:
operator = '~' #default to file type glob
if name is None:
name = ""
lg.debug("cl expand 1")
lg.debug(" - name : %s", name)
lg.debug(" - operator : %s", operator)
lg.debug(" - pattern : %s", pattern)
map_info['name'] = name
map_info['operator'] = operator
map_info['pattern'] = pattern
re_from_re = r'({' + map_info['name'] + r'[\~\=]?[^}]*})'
map_info['re_from'] = \
re.compile(re_from_re)
map_info['re_replace'] = \
re.compile(r'({' + map_info['name'] + r'})')
map_info['start'] = map_info['re_search'].start()
map_info['tail'] = len(cl[arg_pos]) - map_info['re_search'].end()
if map_info['operator'] == '~':
map_info['items'] = map_glob_expand(map_info, cl)
elif map_info['operator'] == '=':
map_info['items'] = map_range_expand(map_info, cl)
mapiters.append(map_iter(map_info))
for i, map_info_set in enumerate(itertools.product(*mapiters)):
if i >= max_no_jobs:
break
newcl = copy.copy(cl)
newinfo = copy.copy(info)
newstdin = stdin_file
newstdout = stdout_file
newstderr = stderr_file
for map_info in map_info_set:
newcl = apply_map_info_to_cl(newcl, map_info)
if not newstdin is None:
newstdin = apply_map_info_to_cl([newstdin], map_info)[0]
if not newstdout is None:
newstdout = apply_map_info_to_cl([newstdout], map_info)[0]
if not newstderr is None:
newstderr = apply_map_info_to_cl([newstderr], map_info)[0]
newcl = map(_unescape, cl)
newinfo['cl'] = newcl
newinfo['iteration'] = i
newinfo['stdin_file'] = newstdin
newinfo['stdout_file'] = newstdout
newinfo['stderr_file'] = newstderr
yield newinfo
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,069
|
pombreda/Kea
|
refs/heads/master
|
/kea/mad.py
|
import leip
import logging
import mad2.util
lg = logging.getLogger(__name__)
MADAPP = None
def get_madapp():
global MADAPP
if MADAPP is None:
app = leip.app('mad2')
MADAPP = app
return MADAPP
def get_madfile(filename):
mapp = get_madapp()
return mad2.util.get_mad_file(mapp, filename)
def finish():
lg.debug("running mad finish hook")
mapp = get_madapp()
mapp.run_hook('finish')
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,070
|
pombreda/Kea
|
refs/heads/master
|
/kea/__init__.py
|
# kea runner
import argparse
import copy
from collections import OrderedDict
import logging
import os
import subprocess as sp
import sys
import leip
import fantail
import mad2.util as mad2util
import kea.mad
from kea.utils import get_tool_conf
from kea.plugin.register import print_tool_versions
from kea.cl_generator import basic_command_line_generator
from kea.executor import executors
lg = logging.getLogger(__name__)
#lg.setLevel(logging.DEBUG)
conf = leip.get_config('kea')
#leiplog = logging.getLogger('leip')
#leiplog.setLevel(logging.DEBUG)
class Kea(leip.app):
def __init__(self, *args, **kwargs):
if len(args) == 0:
name = os.path.split(sys.argv[0])[1]
if name[:3] == 'kea_':
name = name[3:]
else:
name = args[0]
# Call Leip - we do not need the Leip argparser:
super(Kea, self).__init__('kea', disable_commands=True)
# replace the config by a stack so we can backfill
self.conf = fantail.Fanstack([self.conf, fantail.Fantail()])
# hack - if kea verbose is set - do that early:
verbose_flag = self.conf['arg_prefix'] + '-v'
if verbose_flag in sys.argv:
lg.setLevel(logging.DEBUG)
#default executors
self.executors = executors
lg.debug("start kea initialization")
# different hooks!
self.hook_order = [
'pre_argparse',
'argparse',
'post_argparse',
'prepare',
'pre_run',
'run',
'finish']
# for the kea argparse (starting with app.conf.arg_prefix)
# default prefix = '---'
# need to define how many arguments are taken from the command
# line for each flag - the rest is handled by argparse
self.kea_arg_harvest_extra = {}
self.kea_argparse = argparse.ArgumentParser(
prog='(kea){}'.format(name),
description='Kea wrapper for: {}'.format(name),
epilog='NOTE: Prefix all Kea arguments with: "' +
self.conf['arg_prefix'] + '"')
self._madapp = None # hold the Leip Mad Application
self.conf['appname'] = name
self.conf['kea_executable'] = sys.argv[0]
self.discover(globals())
@property
def madapp(self):
if not self._madapp is None:
return self._madapp
self._madapp = leip.app('mad2', disable_commands=True)
return self._madapp
def get_madfile(self, filename):
return mad2util.get_mad_file(self.madapp, filename)
@leip.hook('pre_argparse')
def main_arg_define(app):
for a in ('-V --version -j --threads -x --executor -o --stdout ' +
'-e --stderr -n --maxjobs').split():
app.kea_arg_harvest_extra[a] = 1
app.kea_argparse.add_argument('-V', '--version', default='default',
help='version number to use')
app.kea_argparse.add_argument('-L', '--list_versions', action='store_true',
help='list all versions of this tool & exit')
app.kea_argparse.add_argument('-v', '--verbose', action='store_true')
app.kea_argparse.add_argument('-E', '--command_echo', action='store_true',
help='echo Kea commands to stdout')
app.kea_argparse.add_argument('-j', '--threads', type=int, default=-1,
help='kea threads to use (if applicable)')
app.kea_argparse.add_argument('-n', '--maxjobs', type=int, default=1e12,
help='max no jobs to execute')
app.kea_argparse.add_argument('-x', '--executor', help='executor to use')
app.kea_argparse.add_argument('-o', '--stdout', help='save stdout to')
app.kea_argparse.add_argument('-e', '--stderr', help='save stderr to')
#this flag is added to mark a run as being an iteration of another kea
#run - we need thils -ls to, for example, prevent extensive logging.
app.kea_argparse.add_argument('--is_iteration', help=argparse.SUPPRESS,
action='store_true')
@leip.hook('argparse')
def kea_argparse(app):
"""
Separate Kea arguments from tool arguments & feed the kea arguments
to argparse
"""
app.original_args = copy.copy(sys.argv)
prefix = app.conf['arg_prefix']
prelen = len(prefix)
new_sysargv = []
kea_argv = []
i = 0
while i < len(sys.argv):
a = sys.argv[i]
if a.startswith(prefix + '-'):
flag = a[prelen:]
kea_argv.append(flag)
harvest_no = app.kea_arg_harvest_extra.get(flag, 0)
if harvest_no > 0:
harvest = sys.argv[i + 1:i + 1 + harvest_no]
kea_argv.extend(harvest)
i += harvest_no
else:
new_sysargv.append(a)
i += 1
app.kea_clargs = kea_argv
app.kea_args = app.kea_argparse.parse_args(kea_argv)
lg.debug("kea args: {}".format(" ".join(kea_argv)))
lg.debug("com args: {}".format(" ".join(new_sysargv)))
lg.debug("kea argparse: {}".format(str(app.kea_args)))
sys.argv = new_sysargv
@leip.hook('post_argparse')
def main_arg_process(app):
"""
Process parsed arguments
"""
if app.kea_args.verbose:
lg.setLevel(logging.DEBUG)
if app.kea_args.command_echo:
app.conf['command_echo'] = True
app.conf['threads'] = app.kea_args.threads
@leip.hook('prepare', 10)
def prepare_config(app):
# see if there is a candidate subcommand - i.e. the first
# argument to the executable not starting with a '-' or with
# the arg_prefix
candidate_subcommand = None
for a in sys.argv[1:]:
if a[:1] == '-' or a[:1] == app.conf['arg_prefix']:
continue
candidate_subcommand = a
break
version = app.kea_args.version
if version is None:
lg.debug("Prepping tool conf: %s (default version)",
app.conf['appname'])
else:
lg.debug("Prepping tool conf: %s %s",
app.conf['appname'], version)
conf = get_tool_conf(app, app.conf['appname'], app.kea_args.version,
candidate_subcommand)
app.conf.stack[1] = conf
lg.debug("Loaded config: %s", app.conf['appname'])
@leip.hook('run')
def run_kea(app):
lg.debug("Start Kea run")
executor_name = 'simple'
if app.kea_args.executor:
executor_name = app.kea_args.executor
lg.info("loading executor %s", executor_name)
executor = app.executors[executor_name](app)
all_info = []
for info in basic_command_line_generator(app):
info['executable'] = app.conf['executable']
info['kea_executable'] = app.conf['kea_executable']
info['kea_arg_prefix'] = app.conf['arg_prefix']
info['app_name'] = app.conf['appname']
info['app_version'] = app.conf.get('version', 'unknown')
all_info.append(info)
info['executor'] = executor_name
cl = info['cl']
lg.debug("command line arguments: %s", " ".join(cl))
if app.conf.get('command_echo') and not \
executor.interrupted:
print info['executable'] + " " + \
" ".join(cl)
info['kea_args'] = " ".join(app.kea_clargs)
info['cwd'] = os.getcwd()
info['full_cl'] = " ".join(app.original_args)
app.run_hook('pre_fire', info)
if info.get('skip'):
lg.debug("Skipping firing")
continue
executor.fire(info)
app.run_hook('post_fire', info)
executor.finish()
app.run_hook('post_run', all_info)
@leip.hook('finish')
def kea_finish(app):
kea.mad.finish()
@leip.hook('prepare')
def find_executable(app):
lg.debug("find executable location")
if 'executable' in app.conf:
return
this = sys.argv[0]
P = sp.Popen(['which', '-a', app.conf['appname']], stdout=sp.PIPE)
out, err = P.communicate()
for line in out.strip().split("\n"):
if os.path.samefile(line, this):
# this is the called executable wrapper - ignore
continue
else:
app.conf['executable'] = line.strip()
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,071
|
pombreda/Kea
|
refs/heads/master
|
/kea/utils.py
|
import copy
import logging
import os
import subprocess as sp
import sys
import textwrap
from fantail import Fantail
from termcolor import cprint
from leip import set_local_config
lg = logging.getLogger(__name__)
def message(cat, message, *args):
if len(args) > 0:
message = message.format(*args)
current_loglevel = logging.getLogger(__name__).getEffectiveLevel()
if current_loglevel >= logging.WARNING and \
cat.lower()[:2] == 'in':
return
message = " ".join(message.split())
color = {'er': 'red',
'wa': 'yellow',
'in': 'green',
}.get(cat.lower()[:2], 'blue')
cprint('Kea', 'cyan', end="/")
cprint(cat, color, end=': ')
for line in textwrap.wrap(message):
print " " + line
def get_tool_conf(app, name, version='default', subcommand=None):
data = copy.copy(app.conf['group.default'])
if not name in app.conf['app']:
return Fantail()
tool_data = copy.copy(app.conf['app.{}'.format(name)])
group = tool_data.get('group')
if not group is None:
group_data = app.conf['group.{}'.format(group)]
if group_data:
data.update(group_data)
data.update(tool_data)
if version is 'default':
version = tool_data.get('default_version', None)
if (not version is None) and (not version in tool_data['versions']):
candidates = []
for v in tool_data['versions']:
fullv = tool_data['versions'][v]['version']
if v in fullv:
candidates.append(v)
if not version is None:
version_data = tool_data['versions.{}'.format(version)]
data.update(version_data)
data['version_key'] = version
if (not 'subcommand' in data) or \
(subcommand is None) or \
(not subcommand in data['subcommand']):
return data
subcommand_data = data['subcommand.{}'.format(subcommand)]
data.update(subcommand_data)
return data
def is_kea(fname):
with open(fname) as F:
start = F.read(1000)
fline = start.strip().split("\n")[0]
if not fline.startswith('#!'):
lg.debug(" - not a shell script - not kea")
return False
if not 'python' in fline:
lg.debug(" - not a python script - not kea")
return False
if 'load_entry_point' in start and \
'Kea==' in start:
lg.debug(" - looks like a link to the kea entry point script - kea")
return True
if 'import Kea' in start or \
'from Kea import' in start:
lg.debug(" - looks like custom Kea script - kea")
return True
lg.debug(" - does not look like a kea script")
return False
def find_executable(name):
# check if this is a single executable:
if os.path.isfile(name) and os.access(name, os.X_OK):
executable = name
name = os.path.basename(executable)
yield os.path.abspath(executable)
else:
# no? try to use the 'which' tool
# no '/' allowed anymore
if '/' in name:
raise IOError(name)
P = sp.Popen(['which', '-a', name], stdout=sp.PIPE)
out, err = P.communicate()
for line in out.strip().split("\n"):
lg.debug("check %s", line)
if not is_kea(line):
lg.debug("%s is not a kea file", line)
yield os.path.abspath(line)
def create_kea_link(app, name):
"""
"""
base = app.conf['bin_path']
linkpath = os.path.expanduser(os.path.join(base, name))
lg.debug("checking: %s", linkpath)
if os.path.lexists(linkpath):
lg.debug("path exists: %s", linkpath)
os.unlink(linkpath)
keapath = sys.argv[0]
lg.info("creating link from %s", linkpath)
lg.info(" to: %s", keapath)
os.symlink(keapath, linkpath)
def register_executable(app, name, executable, version, is_default=None):
"""
Register an executable
"""
allversions = list('abcdefghijklmnopqrstuvwxyz123456789')
is_first_version = True
version_key = 'a'
if app.conf.has_key('app.{}.versions'.format(name)):
is_first_version = False
for k in app.conf['app.{}.versions'.format(name)]:
vinf = app.conf['app.{}.versions.{}'
.format(name, k)]
if vinf['executable'] == executable:
lg.warning("Executable is already registered - overwriting")
version_key = k
break
# registered - we do not want to use this key
allversions.remove(k)
version_key = allversions[0]
if is_default == False:
if is_first_version:
lg.debug("First version of %s - setting to default", name)
is_default = True
else:
lg.debug("Other version of %s present - not setting default", name)
is_default = False
lg.warning("register %s - %s - %s - %s", name, executable,
version_key, version)
if is_default:
lg.warning("Set version %s as default", version_key)
set_local_config(app, 'app.{}.default_version'.format(name),
version_key)
basekey = 'app.{}.versions.{}'.format(name, version_key)
lg.debug("register to: %s", basekey)
set_local_config(app, '{}.executable'.format(basekey), executable)
set_local_config(app, '{}.version'.format(basekey), version)
create_kea_link(app, name)
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,072
|
pombreda/Kea
|
refs/heads/master
|
/kea/plugin/filefind.py
|
import copy
import logging
import sys
import leip
from mad2.recrender import recrender
import kea.files
lg = logging.getLogger(__name__)
lg.setLevel(logging.DEBUG)
MADAPP = None
@leip.hook('pre_fire')
def hook_pre_run(app, info):
ffc = app.conf.get('filefind')
if not ffc:
return
#determine category
def get_category(finf):
if 'category' in finf:
return finf['category']
elif name.startswith('input'):
return 'input'
elif name.startswith('output'):
return 'output'
else:
return 'used'
processed = []
#find all files - except of type render
for name in ffc:
finf = ffc[name]
if 'position' in finf:
pos = finf['position']
if len(info['cl']) <= pos:
lg.info("Cannot assign file %s - cl too short", name)
continue
filename = info['cl'][pos]
elif 'flag' in finf:
filename = kea.files.flag_find(info['cl'], finf['flag'])
elif 'pipe' in finf:
pipe = finf['pipe']
lg.debug("output is send to %s", pipe)
filename = info.get('{}_file'.format(pipe))
if filename is None:
lg.warning("Tool output is send to {}".format(pipe))
lg.warning("Cannot capture provenance data")
lg.warning("maybe use: {0}-e / {0}-o".format(
app.conf['arg_prefix']))
continue
else:
continue
if 'render' in finf:
template = finf['render']
filename = recrender(template, {'this' : filename})
processed.append(name)
if not filename is None:
kea.files.register_file(
info, name, get_category(finf), filename)
#find all files with only a render field
for name in ffc:
finf = ffc[name]
if name in processed: continue
if not 'render' in finf: continue
template = finf['render']
filename = recrender(template, info)
if '{' in filename:
lg.warning("Cannot render file %s - '%s'", name, template)
continue
kea.files.register_file(
info, name, get_category(finf), filename)
@leip.hook('post_fire', 1)
def check_sha1sum(app, info):
if not 'files' in info:
return
for f in info['files']:
mf = info['files'][f]['madfile']
from mad2.hash import get_sha1sum_mad
get_sha1sum_mad(mf)
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,073
|
pombreda/Kea
|
refs/heads/master
|
/kea/cli.py
|
import os
import copy
import sys
import leip
from kea import Kea
def dispatch():
"""
Run the MadMax app
"""
app.run()
kea_conf = leip.get_config('kea')
prefix = kea_conf['arg_prefix']
thisapp = os.path.basename(sys.argv[0])
if thisapp == 'kea':
#calling the kea tool directly:
app = leip.app(name='kea')
else:
#calling a tool that links to kea - Kea wrapper mode:
# if prefix = '+', the first argument starts with '++'
if len(sys.argv) > 1 and sys.argv[1][:2] == prefix + prefix:
cmd = sys.argv[1][2:]
#replace sys.argv &
sys.argv = ['kea', cmd, '-a', thisapp] + sys.argv[2:]
app = leip.app(name='kea')
else:
app = Kea()
|
{"/kea/files.py": ["/kea/mad.py", "/kea/utils.py"], "/kea/plugin/provenance.py": ["/kea/mad.py"], "/kea/plugin/filefind.py": ["/kea/files.py"], "/kea/cli.py": ["/kea/__init__.py"]}
|
32,084
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/apps.py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class FoodsparkConfig(AppConfig):
name = 'foodspark'
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,085
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/admin.py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Customer)
admin.site.register(Restaurant)
admin.site.register(FoodItem)
admin.site.register(Order)
admin.site.register(Cart)
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,086
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/templates/foodspark/location.py
|
import requests
api_key='AIzaSyDqJGSCO-ZKrbtBQcLflxwqG3zPcP-B2vY'
geo_url = "https://maps.googleapis.com/maps/api/js?key=AIzaSyDqJGSCO-ZKrbtBQcLflxwqG3zPcP-B2vY&callback=initMap"
my_address = {'address': 'Ahmedabad,Gujarat, India',
'language': 'en'}
response = requests.get(geo_url, params = my_address)
results = response.json()['results']
print(results)
my_geo = results[0]['geometry']['location']
print("Longitude:",my_geo['lng'],"\n","Latitude:",my_geo['lat'])
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,087
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/views.py
|
from django.shortcuts import get_object_or_404,render, redirect
from django.http import HttpResponse
from .models import *
import json
from django.views.decorators import csrf
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib import messages
from django.core.exceptions import *
import datetime
from django.shortcuts import render
from django.contrib.auth import authenticate, login as auth_login
from django.conf import settings
from .models import Transaction
from .paytm import generate_checksum, verify_checksum
from django.views.decorators.csrf import csrf_exempt
from django.utils.datastructures import MultiValueDictKeyError
from geopy.distance import geodesic
from geopy.geocoders import Nominatim
import numpy as np
import math, random
import smtplib
import statistics
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
def initiate_payment(request):
if request.method == "GET":
user1=Customer.objects.get(email=request.session['id'])
t = Temp.objects.get(custid_id=user1.email)
amount=t.amount
return render(request,'foodspark/pay.html',{'amount':amount})
try:
if 'id' in request.session.keys():
if request.session['type'] == 'customer':
email = request.POST['username']
password = request.POST['password']
'''
print('hi')
print(amount)
print("hi")'''
try:
customer = Customer.objects.get(email=email)
if customer.check_password(password):
request.session['id'] = email
request.session['type'] = 'customer'
#return redirect('/')
else:
messages.error(request,'Password Incorrect')
return render(request, 'foodspark/pay.html', context={'error': 'Wrong Account Details or amount'})
except:
return redirect('/')
'''
user = authenticate(request, username=email, password=password)
if user is None:
raise ValueError
auth_login(request=request, user=user)'''
except:
return render(request, 'foodspark/pay.html', context={'error': 'Wrong Account Details or amount'})
user1=Customer.objects.get(email=request.session['id'])
t = Temp.objects.get(custid_id=user1.email)
amount=t.amount
transaction = Transaction.objects.create(made_by=email, amount=amount)
transaction.save()
merchant_key = settings.PAYTM_SECRET_KEY
params = (
('MID', settings.PAYTM_MERCHANT_ID),
('ORDER_ID', str(transaction.order_id)),
('CUST_ID', str(transaction.made_by)),
('TXN_AMOUNT', str(transaction.amount)),
('CHANNEL_ID', settings.PAYTM_CHANNEL_ID),
('WEBSITE', settings.PAYTM_WEBSITE),
# ('EMAIL', request.user.email),
# ('MOBILE_N0', '9911223388'),
('INDUSTRY_TYPE_ID', settings.PAYTM_INDUSTRY_TYPE_ID),
('CALLBACK_URL', 'http://127.0.0.1:8000/callback/'),
# ('PAYMENT_MODE_ONLY', 'NO'),
)
paytm_params = dict(params)
checksum = generate_checksum(paytm_params, merchant_key)
transaction.checksum = checksum
transaction.save()
paytm_params['CHECKSUMHASH'] = checksum
print('SENT: ', checksum)
return render(request, 'foodspark/redirect.html', context=paytm_params)
@csrf_exempt
def callback(request):
if request.method == 'POST':
form = request.POST
response_dict = {}
for i in form.keys():
response_dict[i] = form[i]
if i == 'CHECKSUMHASH':
checksum = form[i]
merchant_key = settings.PAYTM_SECRET_KEY
verify = verify_checksum(response_dict, merchant_key, checksum)
if verify:
if response_dict['RESPCODE'] == '01':
print('order successful')
else:
print('order was not successful because' + response_dict['RESPMSG'])
return render(request, 'foodspark/callback.html', {'response': response_dict})
def home(request):
if 'id' in request.session.keys():
if request.session['type'] == 'customer':
foodlist = FoodItem.objects.all().order_by('-ordercount')[:5]
restaurants = Restaurant.objects.order_by('name')
context = {
'customer':Customer.objects.get(email=request.session['id']),
'restaurants' : restaurants,
'foodlist' : foodlist,
'count' : 1
}
return render(request,'foodspark/userhome.html',context)
elif request.session['type'] == 'deliveryboy':
query = Order.objects.order_by('-pk').all()
dic = {}
customer = {}
for x in query:
dic2 = {}
if(x.deliverystatus == 'd'):
continue
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
dic[x] = dic2
customer[x] = x.customer
print("Orders")
print(dic)
context = {
'foods' : dic,
'deliveryboy' : DeliveryBoy.objects.get(email=request.session['id']),
'customer': customer
}
return render(request,'foodspark/dbhome.html',context)
elif request.session['type'] == 'restaurant':
restaurant = Restaurant.objects.get(email=request.session['id'])
query = Order.objects.order_by('-pk').all()
dic = {}
customer = {}
for x in query:
if x.restaurant_id == restaurant.email:
dic2 = {}
if(x.deliverystatus == 'd'):
continue
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
dic[x] = dic2
customer[x] = x.customer
context = {
'foods' : dic,
'customer' : customer,
'restaurant' : restaurant,
}
return render(request,'foodspark/resthome.html',context)
else:
return render(request,"foodspark/login.html")
@ensure_csrf_cookie
def login(request):
print("hello")
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
try:
customer = Customer.objects.get(email=email)
if customer.check_password(password):
request.session['id'] = email
request.session['type'] = 'customer'
return redirect('/')
else:
messages.error(request,'Password Incorrect')
return redirect('/')
except:
try:
restaurant = get_object_or_404(Restaurant, email=email)
if restaurant.check_password(password):
request.session['id'] = email
request.session['type'] = 'restaurant'
return redirect('/')
else:
messages.error(request,'Password Incorrect')
return redirect('/')
except:
try:
deliveryboy = DeliveryBoy.objects.get(email=email)
if deliveryboy.check_password(password):
request.session['id'] = email
request.session['type'] = 'deliveryboy'
return redirect('/')
else:
messages.error(request,'Password Incorrect')
return redirect('/')
except:
return redirect('/')
elif request.method == 'GET':
return render(request,'foodspark/login.html')
def signup(request):
if request.method == 'POST':
email = request.POST.get('email')
name = request.POST.get('name')
phone = request.POST.get('phone')
password = request.POST.get('password')
address = request.POST.get('address')
usertype = request.POST.get('usertype')
geolocator = Nominatim()
loca = geolocator.geocode(address)
print(loca.address)
if usertype == 'Customer':
user = Customer(name = name, email = email, phone = phone, address = address)
user.set_password(user.make_password(password))
user.save()
request.session['id'] = email
request.session['type'] = 'customer'
#customer = Customer.objects.get(email=request.session['id'])
l1=(loca.latitude,loca.longitude)
t=Locuser()
t.custid = email
t.address_user = l1
t.save()
elif usertype == 'Restaurant':
rest = Restaurant.objects.all()
test=0
for x in rest:
if(x.name == name):
messages.error(request,"Restaurant with this name already exists")
test=1
return render(request,'foodspark/login.html')
if(test==0):
user = Restaurant(name= name, email = email, phone = phone, address = address)
user.set_password(user.make_password(password))
user.save()
request.session['id'] = email
request.session['type'] = 'restaurant'
l2=(loca.latitude,loca.longitude)
t=Locrest()
t.restid=email
t.address_rest=l2
t.save()
elif usertype == 'DeliveryBoy':
user = DeliveryBoy(name= name, email = email, phone = phone, address = address)
user.set_password(user.make_password(password))
user.save()
request.session['id'] = email
request.session['type'] = 'deliveryboy'
return redirect('/')
if request.method == 'GET':
return render(request,'foodspark/login.html')
def logout(request):
try:
del request.session['id']
del request.session['type']
request.session.modified = True
except KeyError:
pass
return render(request, 'foodspark/login.html')
def editDetails(request):
if request.method == 'POST':
if request.session['type'] == 'customer':
customer = Customer.objects.get(email=request.session['id'])
context = {
'customer':customer,
}
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
city = request.POST.get('city')
password = request.POST.get('password')
print("ADDRESS")
print(address)
if name!="":
customer.name = name
if address!="":
customer.address = address
geolocator = Nominatim()
loca = geolocator.geocode(address)
print(loca.address)
l1=(loca.latitude,loca.longitude)
t=Locuser.objects.get(custid=request.session['id'])
t.address_user = l1
t.save()
if city!="":
customer.city = city
if phone!="":
customer.phone = phone
if password!="":
customer.set_password(customer.make_password(password))
customer.save()
messages.success(request,'Successfully saved :)')
return render(request,'foodspark/userdetails.html',context)
elif request.session['type'] == 'deliveryboy':
deliveryboy = DeliveryBoy.objects.get(email=request.session['id'])
context = {
'deliveryboy':deliveryboy,
}
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
city = request.POST.get('city')
if name!="":
deliveryboy.name = name
if address!="":
deliveryboy.address = address
if city!="":
deliveryboy.city = city
if phone!="":
deliveryboy.phone = phone
deliveryboy.save()
messages.success(request,'Successfully saved :)')
return render(request,'foodspark/dbdetails.html',context)
elif request.session['type'] == 'restaurant':
restaurant = Restaurant.objects.get(email=request.session['id'])
context = {
'restaurant' : restaurant,
}
name = request.POST.get('name')
phone = request.POST.get('phone')
address = request.POST.get('address')
res_type = request.POST.get('res_type')
cuisine = request.POST.get('cuisine')
city = request.POST.get('city')
if phone!="":
restaurant.phone = phone
if address!="":
restaurant.address = address
if name!="":
restaurant.name = name
# restaurant.res_type = res_type
if cuisine!="":
restaurant.cuisine =cuisine
if city!="":
restaurant.city = city
restaurant.save()
messages.success(request,'Successfully saved :)')
return render(request,'foodspark/restdetails.html',context)
elif request.method == 'GET':
return render(request,'foodspark/details.html')
def changePassword(request):
if request.method == "POST":
if request.session['type'] == 'customer':
customer = Customer.objects.get(email=request.session['id'])
oldPassword = request.POST.get('oldPassword')
newPassword = request.POST.get('newPassword')
if customer.check_password(oldPassword):
customer.set_password(customer.make_password(newPassword))
messages.success(request,"Password Successfully Changed")
customer.save()
else:
messages.error(request,"Old password is incorrect")
return render(request,'foodspark/changePassword.html')
elif request.session['type'] == 'deliveryboy':
deliveryboy = DeliveryBoy.objects.get(email=request.session['id'])
oldPassword = request.POST.get('oldPassword')
newPassword = request.POST.get('newPassword')
if deliveryboy.check_password(oldPassword):
deliveryboy.set_password(deliveryboy.make_password(newPassword))
messages.success(request,"Password Successfully Changed")
deliveryboy.save()
else:
messages.error(request,"Old password is incorrect")
return render(request,'foodspark/changePassword.html')
elif request.session['type'] == 'restaurant':
restaurant = Restaurant.objects.get(email=request.session['id'])
oldPassword = request.POST.get('oldPassword')
newPassword = request.POST.get('newPassword')
if restaurant.check_password(oldPassword):
restaurant.set_password(restaurant.make_password(newPassword))
messages.success(request,"Password Successfully Changed")
restaurant.save()
else:
messages.error(request,"Old password is incorrect")
return render(request,'foodspark/changePassword.html')
elif request.method == 'GET':
return render(request,'foodspark/changePassword.html')
def search(request):
searchkey = request.POST.get('search')
searchtype = request.POST.get('search_param')
print("SEARCH")
print(searchkey)
print(searchtype)
if searchtype == 'Restaurant':
restaurants = Restaurant.objects.filter(name__contains=searchkey)
elif searchtype == 'Cuisine':
foods = FoodItem.objects.filter(cuisine__contains=searchkey)
restaurants = []
for x in foods:
if x.resid not in restaurants:
restaurants.append(x.resid)
elif searchtype == 'Food':
foods = FoodItem.objects.filter(name__contains=searchkey)
restaurants = []
for x in foods:
if x.resid not in restaurants:
restaurants.append(x.resid)
elif searchtype == 'City':
print(searchkey)
restaurants = Restaurant.objects.filter(city__contains=searchkey)
elif searchtype == 'All':
restaurants = Restaurant.objects.filter(name__contains=searchkey)
restaurants = list(restaurants)
foods_cuisine = FoodItem.objects.filter(cuisine__contains=searchkey)
print(foods_cuisine)
foods = FoodItem.objects.filter(name__contains=searchkey)
for x in foods:
if x.resid not in restaurants:
restaurants.append(x.resid)
for x in foods_cuisine:
if x.resid not in restaurants:
restaurants.append(x.resid)
rescity = Restaurant.objects.filter(city__contains=searchkey)
rescity = list(rescity)
for i in rescity:
if i not in restaurants:
restaurants.append(i)
print(restaurants)
count=0
context = {
'customer':Customer.objects.get(email=request.session['id']),
'rest' : restaurants,
'searchkey' : searchkey,
'count' : count
}
return render(request,'foodspark/userhome.html',context)
def restaurantOrderHistory(request):
restaurant = Restaurant.objects.get(email=request.session['id'])
query = Order.objects.order_by('-pk').all()
dic = {}
customer = {}
for x in query:
if x.restaurant_id == restaurant.email:
dic2 = {}
if(x.deliverystatus == 'p'):
continue
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
dic[x] = dic2
customer[x] = x.customer
context = {
'foods' : dic,
'customer' : customer,
'restaurant' : restaurant,
}
return render(request,'foodspark/resthistory.html',context)
def amt(request,amount):
customer=Customer.objects.get(email=request.session['id'])
Temp.objects.filter(custid_id=customer.email).delete()
#customer=Customer.objects.get(email=request.session['id'])
t=Temp(custid_id=customer.email,amount=amount)
t.save()
def ratings(request):
restaurant = Restaurant.objects.get(email=request.session['id'])
ratings=Ratings.objects.filter(restid=restaurant.email)
dic1={}
dic2={}
dic3={}
customer = {}
list1={}
i=0
for x in ratings:
c1=Customer.objects.get(email=x.custid_id)
temp1=('Name: '+c1.name)
temp2=('Rating: '+str(x.rating))
temp3=('Review: '+str(x.review))
customer.setdefault(i,[]).append(temp1)
customer.setdefault(i,[]).append(temp2)
customer.setdefault(i,[]).append(temp3)
'''dic1[x]=x.rating
dic2[x]=x.review
dic3[x]=c1.name'''
i=i+1
#list=[dic1,dic2,dic3]
context = {
'customer' : customer,
'restaurant' : restaurant
}
print(context)
return render(request,'foodspark/ratings.html',context)
def restprofile(request):
restaurant = Restaurant.objects.get(email=request.session['id'])
fooditems = FoodItem.objects.all()
menu = {}
for fi in fooditems:
if fi.resid == restaurant:
try:
menu[fi.cuisine].append(fi)
except KeyError:
menu[fi.cuisine] = [fi]
context = {
'restaurant' : restaurant,
'menu' : menu
}
return render(request,'foodspark/restprofile.html',context)
def restview(request,restname):
if 'id' in request.session.keys():
try:
customer = Customer.objects.get(email=request.session['id'])
print(customer.email)
restaurant =Restaurant.objects.get(name=restname)
print(restaurant.name)
foodall = FoodItem.objects.all()
recall = FoodItem.objects.all().order_by('-ordercount')[:5]
add1=Locuser.objects.get(custid=request.session['id'])
add_user=eval(add1.address_user)
add2=Locrest.objects.get(restid=restaurant.email)
add_rest=eval(add2.address_rest)
distance=geodesic(add_user,add_rest).kilometers
print(distance)
speed=30
eta=str(float(distance/speed))
ratings=Ratings.objects.filter(restid_id=restaurant.email)
count=0
sum1=0
for x in ratings:
sum1=sum1+x.rating
count=count+1
if count==0:
mean_ratings='NA'
else:
mean_ratings=str(float(sum1/count))
fooditems = {}
for x in foodall:
if x.resid.email == restaurant.email:
try:
fooditems[x.cuisine].append(x)
except KeyError:
fooditems[x.cuisine] = [x]
recitems = []
for x in recall:
if x.resid.email == restaurant.email:
try:
recitems.append(x.name)
except KeyError:
recitems = [x.name]
print(recitems)
context = {
'customer' : customer,
'restaurant': restaurant,
'fooditems' : fooditems,
'recitems' : recitems,
'distance' : round(distance,2),
'mean_ratings' : mean_ratings
}
return render(request,'foodspark/restview.html',context)
except ObjectDoesNotExist:
return HttpResponse("Sorry no restaurant with this name")
else:
return redirect('/')
def cart(request):
if 'id' in request.session.keys():
print(request.method)
if request.method == 'GET':
print('hello1')
customer = Customer.objects.get(email=request.session['id'])
query = Cart.objects.all()
cart = {}
amount = 0
for x in query:
if x.customer.email == customer.email:
amount = amount + x.fooditem.price * x.foodqty
try:
cart[x.fooditem.resid].append(x)
except KeyError:
cart[x.fooditem.resid] = [x]
if not cart:
messages.info(request,"Your cart is currently empty")
context = {
'customer': customer,
'cart' : cart,
'amount' : amount
}
return render(request,"foodspark/ordercart.html",context)
elif request.method == 'POST':
########delete cart update order
print('hello2')
customer = Customer.objects.get(email=request.session['id'])
orders = {}
ordersqty = {}
for q in Cart.objects.all():
if q.customer == Customer.objects.get(email=request.session['id']):
try:
orders[q.fooditem.resid] = orders[q.fooditem.resid] + ',' + str(q.fooditem.pk)
except KeyError:
orders[q.fooditem.resid] = str(q.fooditem.pk)
try:
ordersqty[q.fooditem.resid] = ordersqty[q.fooditem.resid] + ',' + str(q.foodqty)
except KeyError:
ordersqty[q.fooditem.resid] = str(q.foodqty)
q.delete()
for x,y in zip(orders,ordersqty):
o = Order(customer=customer,restaurant=x,foodlist=orders[x],foodqty=ordersqty[y],ordertime=datetime.datetime.now(),deliverystatus='p')
o.calamount()
o.save()
print("INN")
print(o.pk)
deli = DeliveryItem(deliverystatus='p', deliveryboy_id="", order_id_id=o.pk)
deli.save()
messages.success(request,"Payment Successfull :)")
context = {
'customer': customer
}
return render(request,"foodspark/ordercart.html")
else:
print('hello')
else:
return render(request,"foodspark/login.html")
def details(request):
if 'id' in request.session.keys():
if request.session['type'] == 'customer':
context = {
'customer':Customer.objects.get(email=request.session['id']),
}
return render(request,'foodspark/userdetails.html',context)
elif request.session['type'] == 'restaurant':
context = {
'restaurant':Restaurant.objects.get(email=request.session['id'])
}
return render(request,'foodspark/restdetails.html',context)
elif request.session['type'] == 'deliveryboy':
context = {
'deliveryboy':DeliveryBoy.objects.get(email=request.session['id'])
}
return render(request,'foodspark/dbdetails.html', context)
else:
return render(request,"foodspark/login.html")
def history(request):
if 'id' in request.session.keys():
customer = Customer.objects.get(email=request.session['id'])
query = Order.objects.order_by('-pk').all()
pending_rest = {}
pending_items = {}
reject_rest = {}
reject_items = {}
history_rest = {}
history_items = {}
eta={}
#print(distance)
for x in query:
if x.customer == customer:
if(x.deliverystatus == 'p' or x.deliverystatus == 'a' or x.deliverystatus == 'o'):
print("1")
dic2 = {}
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
pending_items[x] = dic2
pending_rest[x] = x.restaurant
add1=Locuser.objects.get(custid=request.session['id'])
add_user=eval(add1.address_user)
add2=Locrest.objects.get(restid=x.restaurant_id)
add_rest=eval(add2.address_rest)
distance=geodesic(add_user,add_rest).kilometers
speed=30
eta[x]=str(round(float(distance/speed)*60,2))
print(eta)
if(x.deliverystatus == 'd'):
dic2 = {}
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
history_items[x] = dic2
history_rest[x] = x.restaurant
if(x.deliverystatus == 'r'):
dic3 = {}
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic3[i] = j
reject_items[x] = dic3
reject_rest[x] = x.restaurant
context = {
'customer' : customer,
'pending_items' : pending_items,
'pending_rest' : pending_rest,
'history_items' : history_items,
'history_rest' : history_rest,
'reject_items' : reject_items,
'reject_rest' : reject_rest,
'eta' : eta
}
return render(request,"foodspark/userhistory.html",context)
else:
return render(request,"foodspark/login.html")
def dbhistory(request):
boy = DeliveryBoy.objects.get(email=request.session['id'])
delivery=DeliveryItem.objects.filter(deliveryboy_id=boy.email)
#query = Order.objects.order_by('-pk').all()
print("DELIVERY")
print(delivery)
dic = {}
customer = {}
for deliveryboy in delivery:
print(deliveryboy.order_id_id)
query1 = Order.objects.filter(id=deliveryboy.order_id_id)
dic2 = {}
for x in query1:
dic2={}
if(x.deliverystatus == 'p' or x.deliverystatus == 'a'):
continue
x.calamount()
for i,j in zip(x.getfooditems(),x.getqty()):
dic2[i] = j
dic[x] = dic2
customer[x] = x.customer
context = {
'foods' : dic,
'customer' : customer,
'deliveryboy' : boy,
}
return render(request,"foodspark/dbhistory.html",context)
def recommendedRests():
pass
def saveToCart(request):
if 'id' in request.session.keys():
Cart.objects.all().delete()
foodall = FoodItem.objects.all()
for x in foodall:
if 'food' + str(x.pk) in request.POST.keys():
if int(request.POST['food' + str(x.pk)]) > 0:
cartitem = Cart(customer = Customer.objects.get(email=request.session['id']), fooditem = FoodItem.objects.get(pk=x.pk), foodqty= request.POST['food' + str(x.pk)])
cartitem.save()
customer = Customer.objects.get(email=request.session['id'])
query = Cart.objects.all()
cart = {}
amount = 0
for x in query:
if x.customer.email == customer.email:
amount = amount + x.fooditem.price * x.foodqty
try:
cart[x.fooditem.resid].append(x)
except KeyError:
cart[x.fooditem.resid] = [x]
if not cart:
messages.info(request,"Your cart is currently empty")
amt(request,amount)
context = {
'customer': customer,
'cart' : cart,
'amount' : amount
}
for x,y in cart.items():
for z in y:
z.fooditem.ordercount = z.fooditem.ordercount + z.foodqty
z.fooditem.save()
return render(request,"foodspark/ordercart.html",context)
else:
return render(request,"foodspark/login.html")
def delivered(request):
print("ORDER ID")
if 'id' in request.session.keys() and request.session['type'] == 'deliveryboy':
#try:
order = Order.objects.get(pk=request.POST['orderid1'])
print("ORDER ID")
print(order.id)
#except:
# order = Order.objects.get(pk)
order.deliverystatus = 'd'
order.save()
di = DeliveryItem.objects.get(order_id_id = order.pk)
di.deliverystatus = 'd'
di.save()
return redirect('/')
else:
return render(request,"foodspark/login.html")
def accepted(request):
if 'id' in request.session.keys() and request.session['type'] == 'restaurant':
order = Order.objects.get(pk = request.POST['orderid'])
print(order.pk)
order.deliverystatus = 'a'
order.save()
di = DeliveryItem.objects.get(order_id_id = order.pk)
di.deliverystatus = 'a'
di.save()
return redirect('/')
else:
return render(request,"foodspark/login.html")
def declined(request):
if 'id' in request.session.keys() and request.session['type'] == 'restaurant':
order = Order.objects.get(pk = request.POST['orderid'])
print(order.pk)
order.deliverystatus = 'r'
order.save()
'''di = DeliveryItem.objects.get(order_id_id = order.pk)
di.deliverystatus = 'r'
di.save()'''
return redirect('/')
else:
return render(request,"foodspark/login.html")
def decide(request):
if request.POST:
if '_accept' in request.POST:
accepted(request)
elif '_decline' in request.POST:
declined(request)
return redirect('/')
else:
return render(request,"foodspark/userhome.html")
def onway(request):
if 'id' in request.session.keys() and request.session['type'] == 'deliveryboy':
order = Order.objects.get(pk = request.POST.get('orderid',False))
if order.deliverystatus == 'a':
order.deliverystatus = 'o'
else:
order.deliverystatus = 'd'
order.save()
deliveryboy = DeliveryBoy.objects.get(email=request.session['id'])
#order_id = Order.objects.get(pk = request.POST['id'])
di = DeliveryItem.objects.get(order_id_id = order.pk)
if di.deliverystatus == 'a':
di.deliverystatus = 'o'
else:
di.deliverystatus = 'd'
di.deliveryboy_id = deliveryboy.email
di.save()
#DeliveryItem.order_id = order.id
di.save()
return redirect('/')
else:
return render(request,"foodspark/login.html")
def addfooditem(request):
if 'id' in request.session.keys() and request.session['type'] == 'restaurant':
restaurant = Restaurant.objects.get(email=request.session['id'])
name = request.POST['name']
cuisine = request.POST['cuisine']
price = request.POST['price']
food = FoodItem(resid=restaurant,name=name,cuisine=cuisine,price=price,course='s',availability_time=datetime.datetime.now())
food.save()
return redirect('/restprofile/')
else:
return render(request,"foodspark/login.html")
def removefooditem(request):
if 'id' in request.session.keys() and request.session['type'] == 'restaurant':
restaurant = Restaurant.objects.get(email=request.session['id'])
food = FoodItem.objects.get(pk=request.POST['foodid'])
food.delete()
return redirect('/restprofile/')
else:
return render(request,"foodspark/login.html")
def about(request):
if 'id' in request.session.keys():
if request.session['type'] == 'restaurant':
user = Restaurant.objects.get(email=request.session['id'])
elif request.session['type'] == 'customer':
user = Customer.objects.get(email=request.session['id'])
else:
user = DeliveryBoy.objects.get(email=request.session['id'])
context = {
'user': user,
}
return render(request,"foodspark/about.html",context)
else:
return render(request,"foodspark/about.html")
def acceptDelivery(request):
if 'id' in request.session.keys() and request.session['type'] == 'deliveryboy':
deliveryboy = DeliveryBoy.objects.get(email=request.session['id'])
order_id = Order.objects.get(pk = request.POST['orderid'])
DeliveryItem.deliveryboy = deliveryboy
DeliveryItem.order_id = order_id
DeliveryItem.save()
context = {
'deliveryboy': deliveryboy,
'order_id' : order_id,
}
return render(request,"foodspark/dbdetails.html",context)
else:
return render(request,"foodspark/login.html")
def restrating(request,restname):
if 'id' in request.session.keys() and request.session['type'] == 'customer':
custid = Customer.objects.get(email=request.session['id'])
restaurant =Restaurant.objects.get(name=restname)
rating1 = Ratings.objects.filter(restid_id=restaurant.email ,custid_id=custid.email)
print("LEN RATING")
print(len(rating1))
print(rating1)
if(len(rating1)==0):
food = Ratings(restid_id=restaurant.email ,custid_id=custid.email,rating=request.POST.get('rating'),review=request.POST.get('review'))
food.save()
else:
food = Ratings.objects.get(restid_id=restaurant.email ,custid_id=request.session['id'])
food.rating=request.POST.get('rating')
food.review=request.POST.get('review')
print(food.rating)
print(food.review)
food.save()
return redirect('/')
else:
return render(request,"foodspark/login.html")
def deleteAccount(request):
if 'id' in request.session.keys() and request.session['type'] == 'customer':
Customer.objects.get(email=request.session['id']).delete()
if 'id' in request.session.keys() and request.session['type'] == 'restaurant':
Restaurant.objects.get(email=request.session['id']).delete()
if 'id' in request.session.keys() and request.session['type'] == 'deliveryboy':
DeliveryBoy.objects.get(email=request.session['id']).delete()
return redirect(request,"foodspark/login.html")
else:
return render(request,"foodspark/login.html")
def email(request):
return render(request,"foodspark/email.html")
def otp_sent(request):
if request.method == 'POST':
email = request.POST.get('email')
try:
customer = Customer.objects.get(email=email)
digits = "0123456789"
OTP = ""
for i in range(4) :
OTP += digits[math.floor(random.random() * 10)]
customer.set_password(customer.make_password(OTP))
customer.save()
msg = MIMEMultipart()
msg['Subject'] = "This is your new password"
body = OTP
msg.attach(MIMEText(str(body), 'plain'))
s = smtplib.SMTP('smtp.gmail.com', 587)
# start TLS for security
s.starttls()
print("HELLOOOO")
# Authentication
s.login("foodfrenzy18@gmail.com", "cjqzmzhdiuhiescg")
# message to be sent
#message = "This is your new password: " + str(OTP)
# sending the mail
print("HELLO3")
s.sendmail("foodfrenzy18@gmail.com", email, str(msg))
# terminating the session
s.quit()
return render(request,"foodspark/login.html")
except:
return HttpResponse("Sorry no user with this email")
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,088
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/urls.py
|
from django.conf.urls import url
from django.urls import path
from .views import initiate_payment, callback
from . import views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^login/?$', views.login, name='login'),
url(r'^logout/?$', views.logout, name='logout'),
url(r'^home/?$',views.home, name='home'),
url(r'^dbhome/?$',views.home, name='home'),
url(r'^signup/?$', views.signup, name='signup'),
url(r'^search/?$',views.search,name='search'),
url(r'^details/?$',views.details,name='details'),
url(r'^dbdetails/?$',views.details,name='dbdetails'),
url(r'^savedetails/?$',views.editDetails,name='editDetails'),
url(r'^addtocart/cart/?$',views.cart,name='cart'),
url(r'^cart/?$',views.cart,name='cart'),
url(r'^history/?$',views.history,name='history'),
url(r'^addrating/(?P<restname>[a-zA-Z0-9\s]+)/?$',views.restrating,name='restrating'),
url(r'^dbhistory/?$',views.dbhistory,name='dbhistory'),
url(r'^addtocart/?$',views.saveToCart,name='saveToCart'),
url(r'^ratings/?$',views.ratings,name='ratings'),
url(r'^restprofile/?$',views.restprofile,name='restprofile'),
url(r'^resthistory/?$',views.restaurantOrderHistory,name='resthistory'),
url(r'^delivered/?$',views.delivered,name='delivered'),
url(r'^accepted/?$',views.accepted,name='accepted'),
url(r'^declined/?$',views.declined,name='declined'),
url(r'^decide/?$',views.decide,name='decide'),
url(r'^onway/?$',views.onway,name='onway'),
url(r'^addfooditem/?$',views.addfooditem,name='addfooditem'),
url(r'^removefooditem/?$',views.removefooditem,name='removefooditem'),
url(r'^callback/?$', views.callback, name='callback'),
url(r'^addtocart/pay/?$', views.initiate_payment, name='pay'),
# url(r'^makepaymenet/?$'.views.makepaymenet,name='makepaymenet'),
url(r'^restaurant/(?P<restname>[a-zA-Z0-9\s]+)/?$',views.restview,name='restview'),
url(r'^about/?$',views.about,name='about'),
url(r'^deleteAccount/?$',views.deleteAccount,name='deleteAccount'),
url(r'^forget_password/?$',views.otp_sent,name='forget_password'),
url(r'^email/?$',views.email,name='email'),
]
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,089
|
devshree07/FoodFrenzy
|
refs/heads/master
|
/foodspark/models.py
|
from __future__ import unicode_literals
from django.db import models
from django.core.validators import RegexValidator
import hashlib
from django.contrib.auth import get_user_model
from django.core.validators import *
from django.core.exceptions import ValidationError
import datetime
# default order time bug hai
User = get_user_model()
class Transaction(models.Model):
#made_by = models.ForeignKey(Customer, related_name='transactions', on_delete=models.CASCADE)
made_by = models.EmailField(max_length=100)
made_on = models.DateTimeField(auto_now_add=True)
amount = models.IntegerField()
order_id = models.CharField(unique=True, max_length=100, null=True, blank=True)
checksum = models.CharField(max_length=10000, null=True, blank=True)
def save(self, *args, **kwargs):
if self.order_id is None and self.made_on and self.id:
self.order_id = self.made_on.strftime('PAY2ME%Y%m%dODR') + str(self.id)
return super().save(*args, **kwargs)
class Restaurant(models.Model):
email = models.EmailField(primary_key = True)
password = models.CharField(max_length=100)
name = models.CharField(max_length=200)
address = models.TextField()
RES_TYPE = (
('B','Bar'),
('R','Restaurant'),
('C','Cafe')
)
res_type = models.CharField(max_length=1,choices = RES_TYPE,default = 'R')
cuisine = models.CharField(null = True, max_length=100)
# RATING = (
# ('1','1'),
# ('2','2'),
# ('3','3'),
# ('4','4'),
# ('5','5')
# )
# rating = models.CharField(null = True,max_length=1,choices = RATING)
# countrating = models.IntegerField(default = 0)
city = models.CharField(max_length = 100,null = True)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.") #############look into regex
phone = models.CharField(validators=[phone_regex],max_length=15,blank = True)
#image = models.ImageField(default = '/home/projjal/Projects/Foodspark/foodspark/static/img')
imgurl = models.CharField(max_length=1000,null=True)
############################################################
def make_password(self ,password):
assert password
password = password.encode('UTF-8')
hashedpassword = hashlib.md5(password).hexdigest()
return hashedpassword
def check_password(self, password):
assert password
password = password.encode('UTF-8')
hashed = hashlib.md5(password).hexdigest()
return self.password == hashed
def set_password(self, password):
self.password = password
class Customer(models.Model):
# userid = models.CharField(primary_key = True,max_length =50)
password = models.CharField(max_length=100)
name = models.CharField(max_length=200)
address = models.TextField()
city = models.CharField(max_length = 100)
email = models.EmailField(primary_key = True)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.") #############look into regex
phone = models.CharField(validators=[phone_regex],max_length=15,blank = True)
def make_password(self ,password):
assert password
password = password.encode('UTF-8')
hashedpassword = hashlib.md5(password).hexdigest()
return hashedpassword
def check_password(self, password):
assert password
password = password.encode('UTF-8')
hashed = hashlib.md5(password).hexdigest()
return self.password == hashed
def set_password(self, password):
self.password = password
class DeliveryBoy(models.Model):
# userid = models.CharField(primary_key = True,max_length =50)
password = models.CharField(max_length=100)
name = models.CharField(max_length=200)
address = models.TextField()
city = models.CharField(max_length = 100)
email = models.EmailField(primary_key = True)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.") #############look into regex
phone = models.CharField(validators=[phone_regex],max_length=15,blank = True)
def make_password(self ,password):
assert password
password = password.encode('UTF-8')
hashedpassword = hashlib.md5(password).hexdigest()
return hashedpassword
def check_password(self, password):
assert password
password = password.encode('UTF-8')
hashed = hashlib.md5(password).hexdigest()
return self.password == hashed
def set_password(self, password):
self.password = password
class FoodItem(models.Model):
resid = models.ForeignKey(Restaurant,on_delete=models.CASCADE)
name = models.CharField(max_length=500)
cuisine = models.CharField(max_length=100)
COURSE = (
('s','Starter'),
('m','Main Course'),
('d','Desert')
)
course = models.CharField(max_length=1,choices=COURSE)
price = models.IntegerField()
availability_time = models.TimeField()
ordercount = models.IntegerField(default = 0)
# image = models.ImageField(null = True) ###########################################################
# group = models.ForeignKey()
class Order(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.CASCADE)
restaurant = models.ForeignKey(Restaurant,on_delete=models.CASCADE)
foodlist = models.CharField(max_length = 500,validators=[validate_comma_separated_integer_list],null=True)
foodqty = models.CharField(max_length = 500,validators=[validate_comma_separated_integer_list],null=True)
amount = models.IntegerField(default = 0)
ordertime = models.TimeField()
orderdate = models.DateField(auto_now_add=True)
DSTATUS = (
('p','Pending'),
('d','Delivered')
)
deliverystatus = models.CharField(max_length=1,choices=DSTATUS,default = 'p')
def calamount(self):
self.amount = 0
myl = self.foodlist.split(",")
qty = self.foodqty.split(",")
for x,y in zip(myl,qty):
fitem = FoodItem.objects.get(pk=int(x))
self.amount = self.amount + fitem.price*int(y)
def getfooditems(self):
myl = self.foodlist.split(",")
items = []
for x in myl:
items.append(FoodItem.objects.get(pk=int(x)))
return items
def getqty(self):
myl = self.foodqty.split(",")
return myl
class Cart(models.Model):
customer = models.ForeignKey(Customer,on_delete=models.CASCADE)
fooditem = models.ForeignKey(FoodItem,on_delete=models.CASCADE)
foodqty = models.IntegerField()
class DeliveryItem(models.Model):
deliveryboy_id = models.EmailField(null=True,default='')
order_id = models.ForeignKey(Order,on_delete=models.CASCADE)
DSTATUS = (
('p','Pending'),
('d','Delivered')
)
deliverystatus = models.CharField(max_length=1,choices=DSTATUS,default = 'p')
#deliverystatus = models.ForeignKey(Order,on_delete=models.CASCADE)
class Ratings(models.Model):
restid = models.ForeignKey(Restaurant,on_delete=models.CASCADE)
custid = models.ForeignKey(Customer,on_delete=models.CASCADE)
rating = models.IntegerField(null=True)
review = models.CharField(max_length=1000,null=True)
#deliverystatus = models.ForeignKey(Order,on_delete=models.CASCADE)
class Temp(models.Model):
custid=models.ForeignKey(Customer,on_delete=models.CASCADE)
amount=models.IntegerField(null=True)
class Locuser(models.Model):
#custid=models.ForeignKey(Customer,on_delete=models.CASCADE)
custid=models.EmailField()
address_user=models.CharField(max_length=1000,null=True)
class Locrest(models.Model):
restid=models.EmailField()
address_rest=models.CharField(max_length=1000,null=True)
|
{"/foodspark/admin.py": ["/foodspark/models.py"], "/foodspark/views.py": ["/foodspark/models.py"], "/foodspark/urls.py": ["/foodspark/views.py"]}
|
32,090
|
Aadil-Rashid/shopping_cart
|
refs/heads/master
|
/shop/admin.py
|
from django.contrib import admin
from . models import Product, Order
# Register your models here.
admin.site.site_header = "E-commerce site"
admin.site.site_title = "Dilbis Shoping Cart"
admin.site.index_title = "Dilbis Shopping"
class ProductAdmin(admin.ModelAdmin):
list_display = ("title", "price", "discount_price", "category") # displays these products in admin/product page...
search_fields = ('category',) # search accoding to category in admin/product page...
# adding acitons to the admin/product page...
def change_category(self, request, queryset):
queryset.update(category="dilbis")
change_category.short_description = 'Default Category'
actions = ('change_category',)
# Displaying certain items to admin/products/name_product...
# fields=('title', 'price',)
# Making list item editable
list_editable = ('price', 'category',)
admin.site.register(Product, ProductAdmin)
admin.site.register(Order)
|
{"/shop/admin.py": ["/shop/models.py"]}
|
32,091
|
Aadil-Rashid/shopping_cart
|
refs/heads/master
|
/shop/migrations/0003_order_total.py
|
# Generated by Django 3.0.8 on 2020-09-03 10:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_order'),
]
operations = [
migrations.AddField(
model_name='order',
name='total',
field=models.IntegerField(default='1'),
preserve_default=False,
),
]
|
{"/shop/admin.py": ["/shop/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.